1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
21 from robot.libraries.BuiltIn import BuiltIn
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.IPAddress import IPAddress
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class InterfaceStatusFlags(IntEnum):
35 """Interface status flags."""
36 IF_STATUS_API_FLAG_ADMIN_UP = 1
37 IF_STATUS_API_FLAG_LINK_UP = 2
40 class MtuProto(IntEnum):
45 MTU_PROTO_API_MPLS = 3
49 class LinkDuplex(IntEnum):
51 LINK_DUPLEX_API_UNKNOWN = 0
52 LINK_DUPLEX_API_HALF = 1
53 LINK_DUPLEX_API_FULL = 2
56 class SubInterfaceFlags(IntEnum):
57 """Sub-interface flags."""
58 SUB_IF_API_FLAG_NO_TAGS = 1
59 SUB_IF_API_FLAG_ONE_TAG = 2
60 SUB_IF_API_FLAG_TWO_TAGS = 4
61 SUB_IF_API_FLAG_DOT1AD = 8
62 SUB_IF_API_FLAG_EXACT_MATCH = 16
63 SUB_IF_API_FLAG_DEFAULT = 32
64 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
65 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
66 SUB_IF_API_FLAG_DOT1AH = 256
69 class RxMode(IntEnum):
71 RX_MODE_API_UNKNOWN = 0
72 RX_MODE_API_POLLING = 1
73 RX_MODE_API_INTERRUPT = 2
74 RX_MODE_API_ADAPTIVE = 3
75 RX_MODE_API_DEFAULT = 4
78 class IfType(IntEnum):
81 IF_API_TYPE_HARDWARE = 0
88 class LinkBondLoadBalanceAlgo(IntEnum):
89 """Link bonding load balance algorithm."""
90 BOND_API_LB_ALGO_L2 = 0
91 BOND_API_LB_ALGO_L34 = 1
92 BOND_API_LB_ALGO_L23 = 2
93 BOND_API_LB_ALGO_RR = 3
94 BOND_API_LB_ALGO_BC = 4
95 BOND_API_LB_ALGO_AB = 5
98 class LinkBondMode(IntEnum):
99 """Link bonding mode."""
100 BOND_API_MODE_ROUND_ROBIN = 1
101 BOND_API_MODE_ACTIVE_BACKUP = 2
102 BOND_API_MODE_XOR = 3
103 BOND_API_MODE_BROADCAST = 4
104 BOND_API_MODE_LACP = 5
107 class RdmaMode(IntEnum):
108 """RDMA interface mode."""
109 RDMA_API_MODE_AUTO = 0
110 RDMA_API_MODE_IBV = 1
114 class AfXdpMode(IntEnum):
115 """AF_XDP interface mode."""
116 AF_XDP_API_MODE_AUTO = 0
117 AF_XDP_API_MODE_COPY = 1
118 AF_XDP_API_MODE_ZERO_COPY = 2
122 """General utilities for managing interfaces"""
125 def pci_to_int(pci_str):
126 """Convert PCI address from string format (0000:18:0a.0) to
127 integer representation (169345024).
129 :param pci_str: PCI address in string representation.
131 :returns: Integer representation of PCI address.
134 pci = list(pci_str.split(u":")[0:2])
135 pci.extend(pci_str.split(u":")[2].split(u"."))
137 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
138 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
141 def pci_to_eth(node, pci_str):
142 """Convert PCI address on DUT to Linux ethernet name.
144 :param node: DUT node
145 :param pci_str: PCI address.
148 :returns: Ethernet name.
151 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
153 stdout, _ = exec_cmd_no_error(node, cmd)
155 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
157 return stdout.strip()
160 def get_interface_index(node, interface):
161 """Get interface sw_if_index from topology file.
163 :param node: Node where the interface is.
164 :param interface: Numeric index or name string of a specific interface.
166 :type interface: str or int
167 :returns: SW interface index.
171 sw_if_index = int(interface)
173 sw_if_index = Topology.get_interface_sw_index(node, interface)
174 if sw_if_index is None:
176 Topology.get_interface_sw_index_by_name(node, interface)
177 except TypeError as err:
178 raise TypeError(f"Wrong interface format {interface}") from err
183 def set_interface_state(node, interface, state, if_type=u"key"):
184 """Set interface state on a node.
186 Function can be used for DUTs as well as for TGs.
188 :param node: Node where the interface is.
189 :param interface: Interface key or sw_if_index or name.
190 :param state: One of 'up' or 'down'.
191 :param if_type: Interface type
193 :type interface: str or int
197 :raises ValueError: If the interface type is unknown.
198 :raises ValueError: If the state of interface is unexpected.
199 :raises ValueError: If the node has an unknown node type.
201 if if_type == u"key":
202 if isinstance(interface, str):
203 sw_if_index = Topology.get_interface_sw_index(node, interface)
204 iface_name = Topology.get_interface_name(node, interface)
206 sw_if_index = interface
207 elif if_type == u"name":
208 iface_key = Topology.get_interface_by_name(node, interface)
209 if iface_key is not None:
210 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
211 iface_name = interface
213 raise ValueError(f"Unknown if_type: {if_type}")
215 if node[u"type"] == NodeType.DUT:
216 if sw_if_index is None:
218 f"Interface index for {interface} not assigned by VPP."
221 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
222 elif state == u"down":
225 raise ValueError(f"Unexpected interface state: {state}")
226 cmd = u"sw_interface_set_flags"
227 err_msg = f"Failed to set interface state on host {node[u'host']}"
229 sw_if_index=int(sw_if_index),
232 with PapiSocketExecutor(node) as papi_exec:
233 papi_exec.add(cmd, **args).get_reply(err_msg)
234 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
235 cmd = f"ip link set {iface_name} {state}"
236 exec_cmd_no_error(node, cmd, sudo=True)
239 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
243 def set_interface_state_pci(
244 node, pf_pcis, namespace=None, state=u"up"):
245 """Set operational state for interface specified by PCI address.
247 :param node: Topology node.
248 :param pf_pcis: List of node's interfaces PCI addresses.
249 :param namespace: Exec command in namespace. (Optional, Default: none)
250 :param state: Up/Down. (Optional, default: up)
256 for pf_pci in pf_pcis:
257 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
258 InterfaceUtil.set_linux_interface_state(
259 node, pf_eth, namespace=namespace, state=state
263 def set_interface_mtu(node, pf_pcis, mtu=9200):
264 """Set Ethernet MTU for specified interfaces.
266 :param node: Topology node.
267 :param pf_pcis: List of node's interfaces PCI addresses.
268 :param mtu: MTU to set. Default: 9200.
272 :raises RuntimeError: If failed to set MTU on interface.
274 for pf_pci in pf_pcis:
275 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
276 cmd = f"ip link set {pf_eth} mtu {mtu}"
277 exec_cmd_no_error(node, cmd, sudo=True)
280 def set_interface_channels(
281 node, pf_pcis, num_queues=1, channel=u"combined"):
282 """Set interface channels for specified interfaces.
284 :param node: Topology node.
285 :param pf_pcis: List of node's interfaces PCI addresses.
286 :param num_queues: Number of channels. (Optional, Default: 1)
287 :param channel: Channel type. (Optional, Default: combined)
290 :type num_queues: int
293 for pf_pci in pf_pcis:
294 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
295 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
296 exec_cmd_no_error(node, cmd, sudo=True)
299 def set_interface_xdp_off(node, pf_pcis):
300 """Detaches any currently attached XDP/BPF program from the specified
303 :param node: Topology node.
304 :param pf_pcis: List of node's interfaces PCI addresses.
308 for pf_pci in pf_pcis:
309 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
310 cmd = f"ip link set dev {pf_eth} xdp off"
311 exec_cmd_no_error(node, cmd, sudo=True)
314 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
315 """Set Ethernet flow control for specified interfaces.
317 :param node: Topology node.
318 :param pf_pcis: List of node's interfaces PCI addresses.
319 :param rxf: RX flow. (Optional, Default: off).
320 :param txf: TX flow. (Optional, Default: off).
326 for pf_pci in pf_pcis:
327 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
328 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
329 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
330 if int(ret_code) not in (0, 78):
331 raise RuntimeError("Failed to set flow control on {pf_eth}!")
334 def set_pci_parameter(node, pf_pcis, key, value):
335 """Set PCI parameter for specified interfaces.
337 :param node: Topology node.
338 :param pf_pcis: List of node's interfaces PCI addresses.
339 :param key: Key to set.
340 :param value: Value to set.
346 for pf_pci in pf_pcis:
347 cmd = f"setpci -s {pf_pci} {key}={value}"
348 exec_cmd_no_error(node, cmd, sudo=True)
351 def vpp_set_interface_mtu(node, interface, mtu):
352 """Apply new MTU value to a VPP hardware interface.
354 The interface should be down when this is called.
356 :param node: VPP node.
357 :param interface: Interface to set MTU on.
358 :param mtu: Ethernet MTU size in Bytes.
360 :type interface: str or int
363 if isinstance(interface, str):
364 sw_if_index = Topology.get_interface_sw_index(node, interface)
366 sw_if_index = interface
367 cmd = u"hw_interface_set_mtu"
368 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
369 args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
370 with PapiSocketExecutor(node) as papi_exec:
371 papi_exec.add(cmd, **args).get_reply(err_msg)
374 def vpp_node_interfaces_ready_wait(node, retries=15):
375 """Wait until all interfaces with admin-up are in link-up state.
377 :param node: Node to wait on.
378 :param retries: Number of retries to check interface status (optional,
383 :raises RuntimeError: If any interface is not in link-up state after
384 defined number of retries.
386 for _ in range(0, retries):
388 out = InterfaceUtil.vpp_get_interface_data(node)
389 for interface in out:
390 if interface.get(u"flags") == 1:
391 not_ready.append(interface.get(u"interface_name"))
394 f"Interfaces still not in link-up state:\n{not_ready}"
400 err = f"Timeout, interfaces not up:\n{not_ready}" \
401 if u"not_ready" in locals() else u"No check executed!"
402 raise RuntimeError(err)
405 def all_vpp_interfaces_ready_wait(nodes, retries=15):
406 """Wait until all interfaces with admin-up are in link-up state for all
407 nodes in the topology.
409 :param nodes: Nodes in the topology.
410 :param retries: Number of retries to check interface status (optional,
416 for node in nodes.values():
417 if node[u"type"] == NodeType.DUT:
418 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
421 def vpp_get_interface_data(node, interface=None):
422 """Get all interface data from a VPP node. If a name or
423 sw_interface_index is provided, return only data for the matching
426 :param node: VPP node to get interface data from.
427 :param interface: Numeric index or name string of a specific interface.
429 :type interface: int or str
430 :returns: List of dictionaries containing data for each interface, or a
431 single dictionary for the specified interface.
433 :raises TypeError: if the data type of interface is neither basestring
436 def process_if_dump(if_dump):
437 """Process interface dump.
439 :param if_dump: Interface dump.
441 :returns: Processed interface dump.
444 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
445 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
446 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
447 if_dump[u"flags"] = if_dump[u"flags"].value
448 if_dump[u"type"] = if_dump[u"type"].value
449 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
450 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
451 if hasattr(if_dump[u"sub_if_flags"], u"value") \
452 else int(if_dump[u"sub_if_flags"])
456 if interface is not None:
457 if isinstance(interface, str):
458 param = u"interface_name"
459 elif isinstance(interface, int):
460 param = u"sw_if_index"
462 raise TypeError(f"Wrong interface format {interface}")
466 cmd = u"sw_interface_dump"
468 name_filter_valid=False,
471 err_msg = f"Failed to get interface dump on host {node[u'host']}"
473 with PapiSocketExecutor(node) as papi_exec:
474 details = papi_exec.add(cmd, **args).get_details(err_msg)
475 logger.debug(f"Received data:\n{details!r}")
477 data = list() if interface is None else dict()
479 if interface is None:
480 data.append(process_if_dump(dump))
481 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
482 data = process_if_dump(dump)
485 logger.debug(f"Interface data:\n{data}")
489 def vpp_get_interface_name(node, sw_if_index):
490 """Get interface name for the given SW interface index from actual
493 :param node: VPP node to get interface data from.
494 :param sw_if_index: SW interface index of the specific interface.
496 :type sw_if_index: int
497 :returns: Name of the given interface.
500 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
501 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
502 if_data = InterfaceUtil.vpp_get_interface_data(
503 node, if_data[u"sup_sw_if_index"]
506 return if_data.get(u"interface_name")
509 def vpp_get_interface_sw_index(node, interface_name):
510 """Get interface name for the given SW interface index from actual
513 :param node: VPP node to get interface data from.
514 :param interface_name: Interface name.
516 :type interface_name: str
517 :returns: Name of the given interface.
520 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
522 return if_data.get(u"sw_if_index")
525 def vpp_get_interface_mac(node, interface):
526 """Get MAC address for the given interface from actual interface dump.
528 :param node: VPP node to get interface data from.
529 :param interface: Numeric index or name string of a specific interface.
531 :type interface: int or str
532 :returns: MAC address.
535 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
536 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
537 if_data = InterfaceUtil.vpp_get_interface_data(
538 node, if_data[u"sup_sw_if_index"])
540 return if_data.get(u"l2_address")
543 def vpp_set_interface_mac(node, interface, mac):
544 """Set MAC address for the given interface.
546 :param node: VPP node to set interface MAC.
547 :param interface: Numeric index or name string of a specific interface.
548 :param mac: Required MAC address.
550 :type interface: int or str
553 cmd = u"sw_interface_set_mac_address"
555 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
556 mac_address=L2Util.mac_to_bin(mac)
558 err_msg = f"Failed to set MAC address of interface {interface}" \
559 f"on host {node[u'host']}"
560 with PapiSocketExecutor(node) as papi_exec:
561 papi_exec.add(cmd, **args).get_reply(err_msg)
564 def tg_set_interface_driver(node, pci_addr, driver):
565 """Set interface driver on the TG node.
567 :param node: Node to set interface driver on (must be TG node).
568 :param pci_addr: PCI address of the interface.
569 :param driver: Driver name.
573 :raises RuntimeError: If unbinding from the current driver fails.
574 :raises RuntimeError: If binding to the new driver fails.
576 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
577 if old_driver == driver:
583 # Unbind from current driver
584 if old_driver is not None:
585 cmd = f"sh -c \"echo {pci_addr} > " \
586 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
587 ret_code, _, _ = ssh.exec_command_sudo(cmd)
588 if int(ret_code) != 0:
589 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
591 # Bind to the new driver
592 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
593 ret_code, _, _ = ssh.exec_command_sudo(cmd)
594 if int(ret_code) != 0:
595 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
598 def tg_get_interface_driver(node, pci_addr):
599 """Get interface driver from the TG node.
601 :param node: Node to get interface driver on (must be TG node).
602 :param pci_addr: PCI address of the interface.
605 :returns: Interface driver or None if not found.
607 :raises RuntimeError: If PCI rescan or lspci command execution failed.
609 return DUTSetup.get_pci_dev_driver(node, pci_addr)
612 def tg_set_interfaces_default_driver(node):
613 """Set interfaces default driver specified in topology yaml file.
615 :param node: Node to setup interfaces driver on (must be TG node).
618 for interface in node[u"interfaces"].values():
619 InterfaceUtil.tg_set_interface_driver(
620 node, interface[u"pci_address"], interface[u"driver"]
624 def update_vpp_interface_data_on_node(node):
625 """Update vpp generated interface data for a given node in DICT__nodes.
627 Updates interface names, software if index numbers and any other details
628 generated specifically by vpp that are unknown before testcase run.
629 It does this by dumping interface list from all devices using python
630 api, and pairing known information from topology (mac address) to state
633 :param node: Node selected from DICT__nodes.
636 interface_list = InterfaceUtil.vpp_get_interface_data(node)
637 interface_dict = dict()
638 for ifc in interface_list:
639 interface_dict[ifc[u"l2_address"]] = ifc
641 for if_name, if_data in node[u"interfaces"].items():
642 ifc_dict = interface_dict.get(if_data[u"mac_address"])
643 if ifc_dict is not None:
644 if_data[u"name"] = ifc_dict[u"interface_name"]
645 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
646 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
648 f"Interface {if_name} found by MAC "
649 f"{if_data[u'mac_address']}"
653 f"Interface {if_name} not found by MAC "
654 f"{if_data[u'mac_address']}"
656 if_data[u"vpp_sw_index"] = None
659 def update_nic_interface_names(node):
660 """Update interface names based on nic type and PCI address.
662 This method updates interface names in the same format as VPP does.
664 :param node: Node dictionary.
667 for ifc in node[u"interfaces"].values():
668 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
669 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
670 f"{int(if_pci[3], 16):x}"
671 if ifc[u"model"] == u"Intel-XL710":
672 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
673 elif ifc[u"model"] == u"Intel-X710":
674 ifc[u"name"] = f"TenGigabitEthernet{loc}"
675 elif ifc[u"model"] == u"Intel-X520-DA2":
676 ifc[u"name"] = f"TenGigabitEthernet{loc}"
677 elif ifc[u"model"] == u"Cisco-VIC-1385":
678 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
679 elif ifc[u"model"] == u"Cisco-VIC-1227":
680 ifc[u"name"] = f"TenGigabitEthernet{loc}"
682 ifc[u"name"] = f"UnknownEthernet{loc}"
685 def update_nic_interface_names_on_all_duts(nodes):
686 """Update interface names based on nic type and PCI address on all DUTs.
688 This method updates interface names in the same format as VPP does.
690 :param nodes: Topology nodes.
693 for node in nodes.values():
694 if node[u"type"] == NodeType.DUT:
695 InterfaceUtil.update_nic_interface_names(node)
698 def update_tg_interface_data_on_node(node):
699 """Update interface name for TG/linux node in DICT__nodes.
702 # for dev in `ls /sys/class/net/`;
703 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
704 "52:54:00:9f:82:63": "eth0"
705 "52:54:00:77:ae:a9": "eth1"
706 "52:54:00:e1:8a:0f": "eth2"
707 "00:00:00:00:00:00": "lo"
709 :param node: Node selected from DICT__nodes.
711 :raises RuntimeError: If getting of interface name and MAC fails.
713 # First setup interface driver specified in yaml file
714 InterfaceUtil.tg_set_interfaces_default_driver(node)
716 # Get interface names
720 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
721 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
723 ret_code, stdout, _ = ssh.exec_command(cmd)
724 if int(ret_code) != 0:
725 raise RuntimeError(u"Get interface name and MAC failed")
726 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
728 interfaces = JsonParser().parse_data(tmp)
729 for interface in node[u"interfaces"].values():
730 name = interfaces.get(interface[u"mac_address"])
733 interface[u"name"] = name
736 def iface_update_numa_node(node):
737 """For all interfaces from topology file update numa node based on
738 information from the node.
740 :param node: Node from topology.
743 :raises ValueError: If numa node ia less than 0.
744 :raises RuntimeError: If update of numa node failed.
747 for if_key in Topology.get_node_interfaces(node):
748 if_pci = Topology.get_interface_pci_addr(node, if_key)
750 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
752 ret, out, _ = ssh.exec_command(cmd)
755 numa_node = 0 if int(out) < 0 else int(out)
758 f"Reading numa location failed for: {if_pci}"
761 Topology.set_interface_numa_node(
762 node, if_key, numa_node
766 raise RuntimeError(f"Update numa node failed for: {if_pci}")
769 def update_all_interface_data_on_all_nodes(
770 nodes, skip_tg=False, skip_vpp=False):
771 """Update interface names on all nodes in DICT__nodes.
773 This method updates the topology dictionary by querying interface lists
774 of all nodes mentioned in the topology dictionary.
776 :param nodes: Nodes in the topology.
777 :param skip_tg: Skip TG node.
778 :param skip_vpp: Skip VPP node.
783 for node in nodes.values():
784 if node[u"type"] == NodeType.DUT and not skip_vpp:
785 InterfaceUtil.update_vpp_interface_data_on_node(node)
786 elif node[u"type"] == NodeType.TG and not skip_tg:
787 InterfaceUtil.update_tg_interface_data_on_node(node)
788 InterfaceUtil.iface_update_numa_node(node)
791 def create_vlan_subinterface(node, interface, vlan):
792 """Create VLAN sub-interface on node.
794 :param node: Node to add VLAN subinterface on.
795 :param interface: Interface name or index on which create VLAN
797 :param vlan: VLAN ID of the subinterface to be created.
799 :type interface: str on int
801 :returns: Name and index of created subinterface.
803 :raises RuntimeError: if it is unable to create VLAN subinterface on the
804 node or interface cannot be converted.
806 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
808 cmd = u"create_vlan_subif"
810 sw_if_index=sw_if_index,
813 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
815 with PapiSocketExecutor(node) as papi_exec:
816 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
818 if_key = Topology.add_new_port(node, u"vlan_subif")
819 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
820 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
821 Topology.update_interface_name(node, if_key, ifc_name)
823 return f"{interface}.{vlan}", sw_if_index
826 def create_vxlan_interface(node, vni, source_ip, destination_ip):
827 """Create VXLAN interface and return sw if index of created interface.
829 :param node: Node where to create VXLAN interface.
830 :param vni: VXLAN Network Identifier.
831 :param source_ip: Source IP of a VXLAN Tunnel End Point.
832 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
836 :type destination_ip: str
837 :returns: SW IF INDEX of created interface.
839 :raises RuntimeError: if it is unable to create VxLAN interface on the
842 cmd = u"vxlan_add_del_tunnel_v3"
845 instance=Constants.BITWISE_NON_ZERO,
846 src_address=IPAddress.create_ip_address_object(
847 ip_address(source_ip)
849 dst_address=IPAddress.create_ip_address_object(
850 ip_address(destination_ip)
852 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
854 decap_next_index=Constants.BITWISE_NON_ZERO,
857 err_msg = f"Failed to create VXLAN tunnel interface " \
858 f"on host {node[u'host']}"
859 with PapiSocketExecutor(node) as papi_exec:
860 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
862 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
863 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
864 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
865 Topology.update_interface_name(node, if_key, ifc_name)
870 def set_vxlan_bypass(node, interface=None):
871 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
873 By adding the IPv4 vxlan-bypass graph node to an interface, the node
874 checks for and validate input vxlan packet and bypass ip4-lookup,
875 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
876 This node will cause extra overhead to for non-vxlan packets which is
879 :param node: Node where to set VXLAN bypass.
880 :param interface: Numeric index or name string of a specific interface.
882 :type interface: int or str
883 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
885 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
887 cmd = u"sw_interface_set_vxlan_bypass"
890 sw_if_index=sw_if_index,
893 err_msg = f"Failed to set VXLAN bypass on interface " \
894 f"on host {node[u'host']}"
895 with PapiSocketExecutor(node) as papi_exec:
896 papi_exec.add(cmd, **args).get_reply(err_msg)
899 def vxlan_dump(node, interface=None):
900 """Get VxLAN data for the given interface.
902 :param node: VPP node to get interface data from.
903 :param interface: Numeric index or name string of a specific interface.
904 If None, information about all VxLAN interfaces is returned.
906 :type interface: int or str
907 :returns: Dictionary containing data for the given VxLAN interface or if
908 interface=None, the list of dictionaries with all VxLAN interfaces.
910 :raises TypeError: if the data type of interface is neither basestring
913 def process_vxlan_dump(vxlan_dump):
914 """Process vxlan dump.
916 :param vxlan_dump: Vxlan interface dump.
917 :type vxlan_dump: dict
918 :returns: Processed vxlan interface dump.
921 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
922 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
925 if interface is not None:
926 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
928 sw_if_index = int(Constants.BITWISE_NON_ZERO)
930 cmd = u"vxlan_tunnel_dump"
932 sw_if_index=sw_if_index
934 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
936 with PapiSocketExecutor(node) as papi_exec:
937 details = papi_exec.add(cmd, **args).get_details(err_msg)
939 data = list() if interface is None else dict()
941 if interface is None:
942 data.append(process_vxlan_dump(dump))
943 elif dump[u"sw_if_index"] == sw_if_index:
944 data = process_vxlan_dump(dump)
947 logger.debug(f"VXLAN data:\n{data}")
951 def create_subinterface(
952 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
954 """Create sub-interface on node. It is possible to set required
955 sub-interface type and VLAN tag(s).
957 :param node: Node to add sub-interface.
958 :param interface: Interface name on which create sub-interface.
959 :param sub_id: ID of the sub-interface to be created.
960 :param outer_vlan_id: Optional outer VLAN ID.
961 :param inner_vlan_id: Optional inner VLAN ID.
962 :param type_subif: Optional type of sub-interface. Values supported by
963 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
966 :type interface: str or int
968 :type outer_vlan_id: int
969 :type inner_vlan_id: int
970 :type type_subif: str
971 :returns: Name and index of created sub-interface.
973 :raises RuntimeError: If it is not possible to create sub-interface.
975 subif_types = type_subif.split()
978 if u"no_tags" in subif_types:
979 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
980 if u"one_tag" in subif_types:
981 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
982 if u"two_tags" in subif_types:
983 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
984 if u"dot1ad" in subif_types:
985 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
986 if u"exact_match" in subif_types:
987 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
988 if u"default_sub" in subif_types:
989 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
990 if type_subif == u"default_sub":
991 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
992 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
994 cmd = u"create_subif"
996 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
998 sub_if_flags=flags.value if hasattr(flags, u"value")
1000 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1001 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1003 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1004 with PapiSocketExecutor(node) as papi_exec:
1005 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1007 if_key = Topology.add_new_port(node, u"subinterface")
1008 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1009 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1010 Topology.update_interface_name(node, if_key, ifc_name)
1012 return f"{interface}.{sub_id}", sw_if_index
1015 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1016 """Create GRE tunnel interface on node.
1018 :param node: VPP node to add tunnel interface.
1019 :param source_ip: Source of the GRE tunnel.
1020 :param destination_ip: Destination of the GRE tunnel.
1022 :type source_ip: str
1023 :type destination_ip: str
1024 :returns: Name and index of created GRE tunnel interface.
1026 :raises RuntimeError: If unable to create GRE tunnel interface.
1028 cmd = u"gre_tunnel_add_del"
1031 instance=Constants.BITWISE_NON_ZERO,
1033 dst=str(destination_ip),
1041 err_msg = f"Failed to create GRE tunnel interface " \
1042 f"on host {node[u'host']}"
1043 with PapiSocketExecutor(node) as papi_exec:
1044 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1046 if_key = Topology.add_new_port(node, u"gre_tunnel")
1047 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1048 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1049 Topology.update_interface_name(node, if_key, ifc_name)
1051 return ifc_name, sw_if_index
1054 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1055 """Create GTPU interface and return sw if index of created interface.
1057 :param node: Node where to create GTPU interface.
1058 :param teid: GTPU Tunnel Endpoint Identifier.
1059 :param source_ip: Source IP of a GTPU Tunnel End Point.
1060 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1063 :type source_ip: str
1064 :type destination_ip: str
1065 :returns: SW IF INDEX of created interface.
1067 :raises RuntimeError: if it is unable to create GTPU interface on the
1070 cmd = u"gtpu_add_del_tunnel"
1073 src_address=IPAddress.create_ip_address_object(
1074 ip_address(source_ip)
1076 dst_address=IPAddress.create_ip_address_object(
1077 ip_address(destination_ip)
1079 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1084 err_msg = f"Failed to create GTPU tunnel interface " \
1085 f"on host {node[u'host']}"
1086 with PapiSocketExecutor(node) as papi_exec:
1087 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1089 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1090 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1091 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1092 Topology.update_interface_name(node, if_key, ifc_name)
1097 def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
1098 """Enable GTPU offload RX onto interface.
1100 :param node: Node to run command on.
1101 :param interface: Name of the specific interface.
1102 :param gtpu_if_index: Index of GTPU tunnel interface.
1105 :type interface: str
1106 :type gtpu_interface: int
1108 sw_if_index = Topology.get_interface_sw_index(node, interface)
1110 cmd = u"gtpu_offload_rx"
1112 hw_if_index=sw_if_index,
1113 sw_if_index=gtpu_if_index,
1117 err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
1118 with PapiSocketExecutor(node) as papi_exec:
1119 papi_exec.add(cmd, **args).get_reply(err_msg)
1122 def vpp_create_loopback(node, mac=None):
1123 """Create loopback interface on VPP node.
1125 :param node: Node to create loopback interface on.
1126 :param mac: Optional MAC address for loopback interface.
1129 :returns: SW interface index.
1131 :raises RuntimeError: If it is not possible to create loopback on the
1134 cmd = u"create_loopback_instance"
1136 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1140 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1141 with PapiSocketExecutor(node) as papi_exec:
1142 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1144 if_key = Topology.add_new_port(node, u"loopback")
1145 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1146 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1147 Topology.update_interface_name(node, if_key, ifc_name)
1149 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1150 Topology.update_interface_mac_address(node, if_key, mac)
1155 def vpp_create_bond_interface(
1156 node, mode, load_balance=None, mac=None, gso=False):
1157 """Create bond interface on VPP node.
1159 :param node: DUT node from topology.
1160 :param mode: Link bonding mode.
1161 :param load_balance: Load balance (optional, valid for xor and lacp
1162 modes, otherwise ignored). Default: None.
1163 :param mac: MAC address to assign to the bond interface (optional).
1165 :param gso: Enable GSO support (optional). Default: False.
1168 :type load_balance: str
1171 :returns: Interface key (name) in topology.
1173 :raises RuntimeError: If it is not possible to create bond interface on
1176 cmd = u"bond_create2"
1178 id=int(Constants.BITWISE_NON_ZERO),
1179 use_custom_mac=bool(mac is not None),
1180 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1183 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1185 lb=0 if load_balance is None else getattr(
1186 LinkBondLoadBalanceAlgo,
1187 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1192 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1193 with PapiSocketExecutor(node) as papi_exec:
1194 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1196 InterfaceUtil.add_eth_interface(
1197 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1199 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1204 def add_eth_interface(
1205 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1207 """Add ethernet interface to current topology.
1209 :param node: DUT node from topology.
1210 :param ifc_name: Name of the interface.
1211 :param sw_if_index: SW interface index.
1212 :param ifc_pfx: Interface key prefix.
1213 :param host_if_key: Host interface key from topology file.
1216 :type sw_if_index: int
1218 :type host_if_key: str
1220 if_key = Topology.add_new_port(node, ifc_pfx)
1222 if ifc_name and sw_if_index is None:
1223 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1225 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1226 if sw_if_index and ifc_name is None:
1227 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1228 Topology.update_interface_name(node, if_key, ifc_name)
1229 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1230 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1231 if host_if_key is not None:
1232 Topology.set_interface_numa_node(
1233 node, if_key, Topology.get_interface_numa_node(
1237 Topology.update_interface_pci_address(
1238 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1242 def vpp_create_avf_interface(
1243 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1244 """Create AVF interface on VPP node.
1246 :param node: DUT node from topology.
1247 :param if_key: Interface key from topology file of interface
1248 to be bound to i40evf driver.
1249 :param num_rx_queues: Number of RX queues.
1250 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1251 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1254 :type num_rx_queues: int
1257 :returns: AVF interface key (name) in topology.
1259 :raises RuntimeError: If it is not possible to create AVF interface on
1262 PapiSocketExecutor.run_cli_cmd(
1263 node, u"set logging class avf level debug"
1267 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1269 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1271 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1275 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1277 # FIXME: Remove once the fw/driver is upgraded.
1279 with PapiSocketExecutor(node) as papi_exec:
1281 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1285 except AssertionError:
1286 logger.error(err_msg)
1288 raise AssertionError(err_msg)
1290 InterfaceUtil.add_eth_interface(
1291 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1295 return Topology.get_interface_by_sw_index(node, sw_if_index)
1298 def vpp_create_af_xdp_interface(
1299 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1301 """Create AF_XDP interface on VPP node.
1303 :param node: DUT node from topology.
1304 :param if_key: Physical interface key from topology file of interface
1305 to be bound to compatible driver.
1306 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1307 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1308 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1309 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1312 :type num_rx_queues: int
1316 :returns: Interface key (name) in topology file.
1318 :raises RuntimeError: If it is not possible to create AF_XDP interface
1321 PapiSocketExecutor.run_cli_cmd(
1322 node, u"set logging class af_xdp level debug"
1325 cmd = u"af_xdp_create_v2"
1326 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1328 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1329 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1330 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1333 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1335 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1336 with PapiSocketExecutor(node) as papi_exec:
1337 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1339 InterfaceUtil.vpp_set_interface_mac(
1340 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1342 InterfaceUtil.add_eth_interface(
1343 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1347 return Topology.get_interface_by_sw_index(node, sw_if_index)
1350 def vpp_create_rdma_interface(
1351 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1353 """Create RDMA interface on VPP node.
1355 :param node: DUT node from topology.
1356 :param if_key: Physical interface key from topology file of interface
1357 to be bound to rdma-core driver.
1358 :param num_rx_queues: Number of RX queues.
1359 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1360 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1361 :param mode: RDMA interface mode - auto/ibv/dv.
1364 :type num_rx_queues: int
1368 :returns: Interface key (name) in topology file.
1370 :raises RuntimeError: If it is not possible to create RDMA interface on
1373 PapiSocketExecutor.run_cli_cmd(
1374 node, u"set logging class rdma level debug"
1377 cmd = u"rdma_create_v3"
1378 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1380 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1381 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1382 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1385 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1386 # Note: Set True for non-jumbo packets.
1389 # TODO: Apply desired RSS flags.
1391 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1392 with PapiSocketExecutor(node) as papi_exec:
1393 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1395 InterfaceUtil.vpp_set_interface_mac(
1396 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1398 InterfaceUtil.add_eth_interface(
1399 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1403 return Topology.get_interface_by_sw_index(node, sw_if_index)
1406 def vpp_add_bond_member(node, interface, bond_if):
1407 """Add member interface to bond interface on VPP node.
1409 :param node: DUT node from topology.
1410 :param interface: Physical interface key from topology file.
1411 :param bond_if: Load balance
1413 :type interface: str
1415 :raises RuntimeError: If it is not possible to add member to bond
1416 interface on the node.
1418 cmd = u"bond_add_member"
1420 sw_if_index=Topology.get_interface_sw_index(node, interface),
1421 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1423 is_long_timeout=False
1425 err_msg = f"Failed to add member {interface} to bond interface " \
1426 f"{bond_if} on host {node[u'host']}"
1427 with PapiSocketExecutor(node) as papi_exec:
1428 papi_exec.add(cmd, **args).get_reply(err_msg)
1431 def vpp_show_bond_data_on_node(node, verbose=False):
1432 """Show (detailed) bond information on VPP node.
1434 :param node: DUT node from topology.
1435 :param verbose: If detailed information is required or not.
1439 cmd = u"sw_bond_interface_dump"
1440 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1442 data = f"Bond data on node {node[u'host']}:\n"
1443 with PapiSocketExecutor(node) as papi_exec:
1444 details = papi_exec.add(cmd).get_details(err_msg)
1446 for bond in details:
1447 data += f"{bond[u'interface_name']}\n"
1448 data += u" mode: {m}\n".format(
1449 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1451 data += u" load balance: {lb}\n".format(
1452 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1454 data += f" number of active members: {bond[u'active_members']}\n"
1456 member_data = InterfaceUtil.vpp_bond_member_dump(
1457 node, Topology.get_interface_by_sw_index(
1458 node, bond[u"sw_if_index"]
1461 for member in member_data:
1462 if not member[u"is_passive"]:
1463 data += f" {member[u'interface_name']}\n"
1464 data += f" number of members: {bond[u'members']}\n"
1466 for member in member_data:
1467 data += f" {member[u'interface_name']}\n"
1468 data += f" interface id: {bond[u'id']}\n"
1469 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1473 def vpp_bond_member_dump(node, interface):
1474 """Get bond interface slave(s) data on VPP node.
1476 :param node: DUT node from topology.
1477 :param interface: Physical interface key from topology file.
1479 :type interface: str
1480 :returns: Bond slave interface data.
1483 cmd = u"sw_member_interface_dump"
1485 sw_if_index=Topology.get_interface_sw_index(node, interface)
1487 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1489 with PapiSocketExecutor(node) as papi_exec:
1490 details = papi_exec.add(cmd, **args).get_details(err_msg)
1492 logger.debug(f"Member data:\n{details}")
1496 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1497 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1499 :param nodes: Nodes in the topology.
1500 :param verbose: If detailed information is required or not.
1504 for node_data in nodes.values():
1505 if node_data[u"type"] == NodeType.DUT:
1506 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1509 def vpp_enable_input_acl_interface(
1510 node, interface, ip_version, table_index):
1511 """Enable input acl on interface.
1513 :param node: VPP node to setup interface for input acl.
1514 :param interface: Interface to setup input acl.
1515 :param ip_version: Version of IP protocol.
1516 :param table_index: Classify table index.
1518 :type interface: str or int
1519 :type ip_version: str
1520 :type table_index: int
1522 cmd = u"input_acl_set_interface"
1524 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1525 ip4_table_index=table_index if ip_version == u"ip4"
1526 else Constants.BITWISE_NON_ZERO,
1527 ip6_table_index=table_index if ip_version == u"ip6"
1528 else Constants.BITWISE_NON_ZERO,
1529 l2_table_index=table_index if ip_version == u"l2"
1530 else Constants.BITWISE_NON_ZERO,
1532 err_msg = f"Failed to enable input acl on interface {interface}"
1533 with PapiSocketExecutor(node) as papi_exec:
1534 papi_exec.add(cmd, **args).get_reply(err_msg)
1537 def get_interface_classify_table(node, interface):
1538 """Get name of classify table for the given interface.
1540 TODO: Move to Classify.py.
1542 :param node: VPP node to get data from.
1543 :param interface: Name or sw_if_index of a specific interface.
1545 :type interface: str or int
1546 :returns: Classify table name.
1549 if isinstance(interface, str):
1550 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1552 sw_if_index = interface
1554 cmd = u"classify_table_by_interface"
1556 sw_if_index=sw_if_index
1558 err_msg = f"Failed to get classify table name by interface {interface}"
1559 with PapiSocketExecutor(node) as papi_exec:
1560 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1565 def get_sw_if_index(node, interface_name):
1566 """Get sw_if_index for the given interface from actual interface dump.
1568 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1570 :param node: VPP node to get interface data from.
1571 :param interface_name: Name of the specific interface.
1573 :type interface_name: str
1574 :returns: sw_if_index of the given interface.
1577 interface_data = InterfaceUtil.vpp_get_interface_data(
1578 node, interface=interface_name
1580 return interface_data.get(u"sw_if_index")
1583 def vxlan_gpe_dump(node, interface_name=None):
1584 """Get VxLAN GPE data for the given interface.
1586 :param node: VPP node to get interface data from.
1587 :param interface_name: Name of the specific interface. If None,
1588 information about all VxLAN GPE interfaces is returned.
1590 :type interface_name: str
1591 :returns: Dictionary containing data for the given VxLAN GPE interface
1592 or if interface=None, the list of dictionaries with all VxLAN GPE
1594 :rtype: dict or list
1596 def process_vxlan_gpe_dump(vxlan_dump):
1597 """Process vxlan_gpe dump.
1599 :param vxlan_dump: Vxlan_gpe nterface dump.
1600 :type vxlan_dump: dict
1601 :returns: Processed vxlan_gpe interface dump.
1604 if vxlan_dump[u"is_ipv6"]:
1605 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1606 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1608 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1609 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1612 if interface_name is not None:
1613 sw_if_index = InterfaceUtil.get_interface_index(
1614 node, interface_name
1617 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1619 cmd = u"vxlan_gpe_tunnel_dump"
1621 sw_if_index=sw_if_index
1623 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1624 with PapiSocketExecutor(node) as papi_exec:
1625 details = papi_exec.add(cmd, **args).get_details(err_msg)
1627 data = list() if interface_name is None else dict()
1628 for dump in details:
1629 if interface_name is None:
1630 data.append(process_vxlan_gpe_dump(dump))
1631 elif dump[u"sw_if_index"] == sw_if_index:
1632 data = process_vxlan_gpe_dump(dump)
1635 logger.debug(f"VXLAN-GPE data:\n{data}")
1639 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1640 """Assign VPP interface to specific VRF/FIB table.
1642 :param node: VPP node where the FIB and interface are located.
1643 :param interface: Interface to be assigned to FIB.
1644 :param table_id: VRF table ID.
1645 :param ipv6: Assign to IPv6 table. Default False.
1647 :type interface: str or int
1651 cmd = u"sw_interface_set_table"
1653 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1655 vrf_id=int(table_id)
1657 err_msg = f"Failed to assign interface {interface} to FIB table"
1658 with PapiSocketExecutor(node) as papi_exec:
1659 papi_exec.add(cmd, **args).get_reply(err_msg)
1662 def set_linux_interface_mac(
1663 node, interface, mac, namespace=None, vf_id=None):
1664 """Set MAC address for interface in linux.
1666 :param node: Node where to execute command.
1667 :param interface: Interface in namespace.
1668 :param mac: MAC to be assigned to interface.
1669 :param namespace: Execute command in namespace. Optional
1670 :param vf_id: Virtual Function id. Optional
1672 :type interface: str
1674 :type namespace: str
1677 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1678 else f"address {mac}"
1679 ns_str = f"ip netns exec {namespace}" if namespace else u""
1681 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1682 exec_cmd_no_error(node, cmd, sudo=True)
1685 def set_linux_interface_promisc(
1686 node, interface, namespace=None, vf_id=None, state=u"on"):
1687 """Set promisc state for interface in linux.
1689 :param node: Node where to execute command.
1690 :param interface: Interface in namespace.
1691 :param namespace: Exec command in namespace. (Optional, Default: None)
1692 :param vf_id: Virtual Function id. (Optional, Default: None)
1693 :param state: State of feature. (Optional, Default: on)
1695 :type interface: str
1696 :type namespace: str
1700 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1701 else f"promisc {state}"
1702 ns_str = f"ip netns exec {namespace}" if namespace else u""
1704 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1705 exec_cmd_no_error(node, cmd, sudo=True)
1708 def set_linux_interface_trust_on(
1709 node, interface, namespace=None, vf_id=None):
1710 """Set trust on (promisc) for interface in linux.
1712 :param node: Node where to execute command.
1713 :param interface: Interface in namespace.
1714 :param namespace: Execute command in namespace. Optional
1715 :param vf_id: Virtual Function id. Optional
1717 :type interface: str
1718 :type namespace: str
1721 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1722 ns_str = f"ip netns exec {namespace}" if namespace else u""
1724 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1725 exec_cmd_no_error(node, cmd, sudo=True)
1728 def set_linux_interface_spoof_off(
1729 node, interface, namespace=None, vf_id=None):
1730 """Set spoof off for interface in linux.
1732 :param node: Node where to execute command.
1733 :param interface: Interface in namespace.
1734 :param namespace: Execute command in namespace. Optional
1735 :param vf_id: Virtual Function id. Optional
1737 :type interface: str
1738 :type namespace: str
1741 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1743 ns_str = f"ip netns exec {namespace}" if namespace else u""
1745 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1746 exec_cmd_no_error(node, cmd, sudo=True)
1749 def set_linux_interface_state(
1750 node, interface, namespace=None, state=u"up"):
1751 """Set operational state for interface in linux.
1753 :param node: Node where to execute command.
1754 :param interface: Interface in namespace.
1755 :param namespace: Execute command in namespace. Optional
1756 :param state: Up/Down.
1758 :type interface: str
1759 :type namespace: str
1762 ns_str = f"ip netns exec {namespace}" if namespace else u""
1764 cmd = f"{ns_str} ip link set dev {interface} {state}"
1765 exec_cmd_no_error(node, cmd, sudo=True)
1768 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1769 """Init PCI device. Check driver compatibility and bind to proper
1770 drivers. Optionally create NIC VFs.
1772 :param node: DUT node.
1773 :param ifc_key: Interface key from topology file.
1774 :param driver: Base driver to use.
1775 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1776 :param osi_layer: OSI Layer type to initialize TG with.
1777 Default value "L2" sets linux interface spoof off.
1782 :type osi_layer: str
1783 :returns: Virtual Function topology interface keys.
1785 :raises RuntimeError: If a reason preventing initialization is found.
1787 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1789 if driver == u"avf":
1790 if kernel_driver not in (
1791 u"ice", u"iavf", u"i40e", u"i40evf"):
1793 f"AVF needs ice or i40e compatible driver, not "
1794 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1796 vf_keys = InterfaceUtil.init_generic_interface(
1797 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1799 elif driver == u"af_xdp":
1800 if kernel_driver not in (
1801 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1804 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1805 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1807 vf_keys = InterfaceUtil.init_generic_interface(
1808 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1810 elif driver == u"rdma-core":
1811 vf_keys = InterfaceUtil.init_generic_interface(
1812 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1817 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1818 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1820 :param node: DUT node.
1821 :param ifc_key: Interface key from topology file.
1822 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1823 :param osi_layer: OSI Layer type to initialize TG with.
1824 Default value "L2" sets linux interface spoof off.
1828 :type osi_layer: str
1829 :returns: Virtual Function topology interface keys.
1831 :raises RuntimeError: If a reason preventing initialization is found.
1833 # Read PCI address and driver.
1834 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1835 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1836 uio_driver = Topology.get_uio_driver(node)
1837 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1838 current_driver = DUTSetup.get_pci_dev_driver(
1839 node, pf_pci_addr.replace(u":", r"\:"))
1840 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1842 VPPUtil.stop_vpp_service(node)
1843 if current_driver != kernel_driver:
1844 # PCI device must be re-bound to kernel driver before creating VFs.
1845 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1846 # Stop VPP to prevent deadlock.
1847 # Unbind from current driver if bound.
1849 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1850 # Bind to kernel driver.
1851 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1853 # Initialize PCI VFs.
1854 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs)
1857 if osi_layer == u"L2":
1858 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1861 # Set MAC address and bind each virtual function to uio driver.
1862 for vf_id in range(numvfs):
1863 vf_mac_addr = u":".join(
1864 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1865 pf_mac_addr[5], f"{vf_id:02x}"
1869 InterfaceUtil.set_linux_interface_trust_on(
1870 node, pf_dev, vf_id=vf_id
1872 if osi_layer == u"L2":
1873 InterfaceUtil.set_linux_interface_spoof_off(
1874 node, pf_dev, vf_id=vf_id
1876 InterfaceUtil.set_linux_interface_mac(
1877 node, pf_dev, vf_mac_addr, vf_id=vf_id
1879 InterfaceUtil.set_linux_interface_state(
1880 node, pf_dev, state=u"up"
1883 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1884 current_driver = DUTSetup.get_pci_dev_driver(
1885 node, vf_pci_addr.replace(":", r"\:")
1888 DUTSetup.pci_vf_driver_unbind(
1889 node, pf_pci_addr, vf_id
1891 DUTSetup.pci_vf_driver_bind(
1892 node, pf_pci_addr, vf_id, uio_driver
1895 # Add newly created ports into topology file
1896 vf_ifc_name = f"{ifc_key}_vif"
1897 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1898 Topology.update_interface_name(
1899 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1901 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1902 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1903 Topology.set_interface_numa_node(
1904 node, vf_ifc_key, Topology.get_interface_numa_node(
1908 vf_ifc_keys.append(vf_ifc_key)
1913 def vpp_sw_interface_rx_placement_dump(node):
1914 """Dump VPP interface RX placement on node.
1916 :param node: Node to run command on.
1918 :returns: Thread mapping information as a list of dictionaries.
1921 cmd = u"sw_interface_rx_placement_dump"
1922 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1923 with PapiSocketExecutor(node) as papi_exec:
1924 for ifc in node[u"interfaces"].values():
1925 if ifc[u"vpp_sw_index"] is not None:
1926 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1927 details = papi_exec.get_details(err_msg)
1928 return sorted(details, key=lambda k: k[u"sw_if_index"])
1931 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1932 """Dump VPP interface RX placement on all given nodes.
1934 :param nodes: Nodes to run command on.
1936 :returns: Thread mapping information as a list of dictionaries.
1939 for node in nodes.values():
1940 if node[u"type"] == NodeType.DUT:
1941 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1944 def vpp_sw_interface_set_rx_placement(
1945 node, sw_if_index, queue_id, worker_id):
1946 """Set interface RX placement to worker on node.
1948 :param node: Node to run command on.
1949 :param sw_if_index: VPP SW interface index.
1950 :param queue_id: VPP interface queue ID.
1951 :param worker_id: VPP worker ID (indexing from 0).
1953 :type sw_if_index: int
1955 :type worker_id: int
1956 :raises RuntimeError: If failed to run command on host or if no API
1959 cmd = u"sw_interface_set_rx_placement"
1960 err_msg = f"Failed to set interface RX placement to worker " \
1961 f"on host {node[u'host']}!"
1963 sw_if_index=sw_if_index,
1965 worker_id=worker_id,
1968 with PapiSocketExecutor(node) as papi_exec:
1969 papi_exec.add(cmd, **args).get_reply(err_msg)
1972 def vpp_round_robin_rx_placement(
1973 node, prefix, workers=None):
1974 """Set Round Robin interface RX placement on all worker threads
1977 If specified, workers limits the number of physical cores used
1978 for data plane I/O work. Other cores are presumed to do something else,
1979 e.g. asynchronous crypto processing.
1980 None means all workers are used for data plane work.
1982 :param node: Topology nodes.
1983 :param prefix: Interface name prefix.
1984 :param workers: Comma separated worker index numbers intended for
1990 thread_data = VPPUtil.vpp_show_threads(node)
1991 worker_cnt = len(thread_data) - 1
1996 for item in thread_data:
1997 if str(item.cpu_id) in workers.split(u","):
1998 worker_ids.append(item.id)
2000 for item in thread_data:
2001 if u"vpp_main" not in item.name:
2002 worker_ids.append(item.id)
2005 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
2006 for interface in node[u"interfaces"].values():
2007 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
2008 and prefix in interface[u"name"]:
2009 InterfaceUtil.vpp_sw_interface_set_rx_placement(
2010 node, placement[u"sw_if_index"], placement[u"queue_id"],
2011 worker_ids[worker_idx % len(worker_ids)] - 1
2016 def vpp_round_robin_rx_placement_on_all_duts(
2017 nodes, prefix, use_dp_cores=False):
2018 """Set Round Robin interface RX placement on worker threads
2021 If specified, workers limits the number of physical cores used
2022 for data plane I/O work. Other cores are presumed to do something else,
2023 e.g. asynchronous crypto processing.
2024 None means all cores are used for data plane work.
2026 :param nodes: Topology nodes.
2027 :param prefix: Interface name prefix.
2028 :param use_dp_cores: Limit to dataplane cores.
2031 :type use_dp_cores: bool
2033 for node_name, node in nodes.items():
2034 if node["type"] == NodeType.DUT:
2037 workers = BuiltIn().get_variable_value(
2038 f"${{{node_name}_cpu_dp}}"
2040 InterfaceUtil.vpp_round_robin_rx_placement(
2041 node, prefix, workers