1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
215 if sw_if_index is None:
217 f"Interface index for {interface} not assigned by VPP."
220 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
221 elif state == u"down":
224 raise ValueError(f"Unexpected interface state: {state}")
225 cmd = u"sw_interface_set_flags"
226 err_msg = f"Failed to set interface state on host {node[u'host']}"
228 sw_if_index=int(sw_if_index),
231 with PapiSocketExecutor(node) as papi_exec:
232 papi_exec.add(cmd, **args).get_reply(err_msg)
233 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
234 cmd = f"ip link set {iface_name} {state}"
235 exec_cmd_no_error(node, cmd, sudo=True)
238 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
242 def set_interface_state_pci(
243 node, pf_pcis, namespace=None, state=u"up"):
244 """Set operational state for interface specified by PCI address.
246 :param node: Topology node.
247 :param pf_pcis: List of node's interfaces PCI addresses.
248 :param namespace: Exec command in namespace. (Optional, Default: none)
249 :param state: Up/Down. (Optional, default: up)
255 for pf_pci in pf_pcis:
256 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
257 InterfaceUtil.set_linux_interface_state(
258 node, pf_eth, namespace=namespace, state=state
262 def set_interface_mtu(node, pf_pcis, mtu=9200):
263 """Set Ethernet MTU for specified interfaces.
265 :param node: Topology node.
266 :param pf_pcis: List of node's interfaces PCI addresses.
267 :param mtu: MTU to set. Default: 9200.
271 :raises RuntimeError: If failed to set MTU on interface.
273 for pf_pci in pf_pcis:
274 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
275 cmd = f"ip link set {pf_eth} mtu {mtu}"
276 exec_cmd_no_error(node, cmd, sudo=True)
279 def set_interface_channels(
280 node, pf_pcis, num_queues=1, channel=u"combined"):
281 """Set interface channels for specified interfaces.
283 :param node: Topology node.
284 :param pf_pcis: List of node's interfaces PCI addresses.
285 :param num_queues: Number of channels. (Optional, Default: 1)
286 :param channel: Channel type. (Optional, Default: combined)
289 :type num_queues: int
292 for pf_pci in pf_pcis:
293 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
294 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
295 exec_cmd_no_error(node, cmd, sudo=True)
298 def set_interface_xdp_off(node, pf_pcis):
299 """Detaches any currently attached XDP/BPF program from the specified
302 :param node: Topology node.
303 :param pf_pcis: List of node's interfaces PCI addresses.
307 for pf_pci in pf_pcis:
308 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
309 cmd = f"ip link set dev {pf_eth} xdp off"
310 exec_cmd_no_error(node, cmd, sudo=True)
313 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
314 """Set Ethernet flow control for specified interfaces.
316 :param node: Topology node.
317 :param pf_pcis: List of node's interfaces PCI addresses.
318 :param rxf: RX flow. (Optional, Default: off).
319 :param txf: TX flow. (Optional, Default: off).
325 for pf_pci in pf_pcis:
326 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
327 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
328 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
329 if int(ret_code) not in (0, 78):
330 raise RuntimeError("Failed to set flow control on {pf_eth}!")
333 def set_pci_parameter(node, pf_pcis, key, value):
334 """Set PCI parameter for specified interfaces.
336 :param node: Topology node.
337 :param pf_pcis: List of node's interfaces PCI addresses.
338 :param key: Key to set.
339 :param value: Value to set.
345 for pf_pci in pf_pcis:
346 cmd = f"setpci -s {pf_pci} {key}={value}"
347 exec_cmd_no_error(node, cmd, sudo=True)
350 def vpp_set_interface_mtu(node, interface, mtu=9200):
351 """Set Ethernet MTU on interface.
353 :param node: VPP node.
354 :param interface: Interface to setup MTU. Default: 9200.
355 :param mtu: Ethernet MTU size in Bytes.
357 :type interface: str or int
360 if isinstance(interface, str):
361 sw_if_index = Topology.get_interface_sw_index(node, interface)
363 sw_if_index = interface
365 cmd = u"hw_interface_set_mtu"
366 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
368 sw_if_index=sw_if_index,
372 with PapiSocketExecutor(node) as papi_exec:
373 papi_exec.add(cmd, **args).get_reply(err_msg)
374 except AssertionError as err:
375 logger.debug(f"Setting MTU failed.\n{err}")
378 def vpp_node_interfaces_ready_wait(node, retries=15):
379 """Wait until all interfaces with admin-up are in link-up state.
381 :param node: Node to wait on.
382 :param retries: Number of retries to check interface status (optional,
387 :raises RuntimeError: If any interface is not in link-up state after
388 defined number of retries.
390 for _ in range(0, retries):
392 out = InterfaceUtil.vpp_get_interface_data(node)
393 for interface in out:
394 if interface.get(u"flags") == 1:
395 not_ready.append(interface.get(u"interface_name"))
398 f"Interfaces still not in link-up state:\n{not_ready}"
404 err = f"Timeout, interfaces not up:\n{not_ready}" \
405 if u"not_ready" in locals() else u"No check executed!"
406 raise RuntimeError(err)
409 def all_vpp_interfaces_ready_wait(nodes, retries=15):
410 """Wait until all interfaces with admin-up are in link-up state for all
411 nodes in the topology.
413 :param nodes: Nodes in the topology.
414 :param retries: Number of retries to check interface status (optional,
420 for node in nodes.values():
421 if node[u"type"] == NodeType.DUT:
422 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
425 def vpp_get_interface_data(node, interface=None):
426 """Get all interface data from a VPP node. If a name or
427 sw_interface_index is provided, return only data for the matching
430 :param node: VPP node to get interface data from.
431 :param interface: Numeric index or name string of a specific interface.
433 :type interface: int or str
434 :returns: List of dictionaries containing data for each interface, or a
435 single dictionary for the specified interface.
437 :raises TypeError: if the data type of interface is neither basestring
440 def process_if_dump(if_dump):
441 """Process interface dump.
443 :param if_dump: Interface dump.
445 :returns: Processed interface dump.
448 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
449 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
450 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
451 if_dump[u"flags"] = if_dump[u"flags"].value
452 if_dump[u"type"] = if_dump[u"type"].value
453 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
454 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
455 if hasattr(if_dump[u"sub_if_flags"], u"value") \
456 else int(if_dump[u"sub_if_flags"])
460 if interface is not None:
461 if isinstance(interface, str):
462 param = u"interface_name"
463 elif isinstance(interface, int):
464 param = u"sw_if_index"
466 raise TypeError(f"Wrong interface format {interface}")
470 cmd = u"sw_interface_dump"
472 name_filter_valid=False,
475 err_msg = f"Failed to get interface dump on host {node[u'host']}"
477 with PapiSocketExecutor(node) as papi_exec:
478 details = papi_exec.add(cmd, **args).get_details(err_msg)
479 logger.debug(f"Received data:\n{details!r}")
481 data = list() if interface is None else dict()
483 if interface is None:
484 data.append(process_if_dump(dump))
485 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
486 data = process_if_dump(dump)
489 logger.debug(f"Interface data:\n{data}")
493 def vpp_get_interface_name(node, sw_if_index):
494 """Get interface name for the given SW interface index from actual
497 :param node: VPP node to get interface data from.
498 :param sw_if_index: SW interface index of the specific interface.
500 :type sw_if_index: int
501 :returns: Name of the given interface.
504 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
505 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
506 if_data = InterfaceUtil.vpp_get_interface_data(
507 node, if_data[u"sup_sw_if_index"]
510 return if_data.get(u"interface_name")
513 def vpp_get_interface_sw_index(node, interface_name):
514 """Get interface name for the given SW interface index from actual
517 :param node: VPP node to get interface data from.
518 :param interface_name: Interface name.
520 :type interface_name: str
521 :returns: Name of the given interface.
524 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
526 return if_data.get(u"sw_if_index")
529 def vpp_get_interface_mac(node, interface):
530 """Get MAC address for the given interface from actual interface dump.
532 :param node: VPP node to get interface data from.
533 :param interface: Numeric index or name string of a specific interface.
535 :type interface: int or str
536 :returns: MAC address.
539 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
540 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
541 if_data = InterfaceUtil.vpp_get_interface_data(
542 node, if_data[u"sup_sw_if_index"])
544 return if_data.get(u"l2_address")
547 def vpp_set_interface_mac(node, interface, mac):
548 """Set MAC address for the given interface.
550 :param node: VPP node to set interface MAC.
551 :param interface: Numeric index or name string of a specific interface.
552 :param mac: Required MAC address.
554 :type interface: int or str
557 cmd = u"sw_interface_set_mac_address"
559 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
560 mac_address=L2Util.mac_to_bin(mac)
562 err_msg = f"Failed to set MAC address of interface {interface}" \
563 f"on host {node[u'host']}"
564 with PapiSocketExecutor(node) as papi_exec:
565 papi_exec.add(cmd, **args).get_reply(err_msg)
568 def tg_set_interface_driver(node, pci_addr, driver):
569 """Set interface driver on the TG node.
571 :param node: Node to set interface driver on (must be TG node).
572 :param pci_addr: PCI address of the interface.
573 :param driver: Driver name.
577 :raises RuntimeError: If unbinding from the current driver fails.
578 :raises RuntimeError: If binding to the new driver fails.
580 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
581 if old_driver == driver:
587 # Unbind from current driver
588 if old_driver is not None:
589 cmd = f"sh -c \"echo {pci_addr} > " \
590 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
591 ret_code, _, _ = ssh.exec_command_sudo(cmd)
592 if int(ret_code) != 0:
593 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
595 # Bind to the new driver
596 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
597 ret_code, _, _ = ssh.exec_command_sudo(cmd)
598 if int(ret_code) != 0:
599 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
602 def tg_get_interface_driver(node, pci_addr):
603 """Get interface driver from the TG node.
605 :param node: Node to get interface driver on (must be TG node).
606 :param pci_addr: PCI address of the interface.
609 :returns: Interface driver or None if not found.
611 :raises RuntimeError: If PCI rescan or lspci command execution failed.
613 return DUTSetup.get_pci_dev_driver(node, pci_addr)
616 def tg_set_interfaces_default_driver(node):
617 """Set interfaces default driver specified in topology yaml file.
619 :param node: Node to setup interfaces driver on (must be TG node).
622 for interface in node[u"interfaces"].values():
623 InterfaceUtil.tg_set_interface_driver(
624 node, interface[u"pci_address"], interface[u"driver"]
628 def update_vpp_interface_data_on_node(node):
629 """Update vpp generated interface data for a given node in DICT__nodes.
631 Updates interface names, software if index numbers and any other details
632 generated specifically by vpp that are unknown before testcase run.
633 It does this by dumping interface list from all devices using python
634 api, and pairing known information from topology (mac address) to state
637 :param node: Node selected from DICT__nodes.
640 interface_list = InterfaceUtil.vpp_get_interface_data(node)
641 interface_dict = dict()
642 for ifc in interface_list:
643 interface_dict[ifc[u"l2_address"]] = ifc
645 for if_name, if_data in node[u"interfaces"].items():
646 ifc_dict = interface_dict.get(if_data[u"mac_address"])
647 if ifc_dict is not None:
648 if_data[u"name"] = ifc_dict[u"interface_name"]
649 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
650 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
652 f"Interface {if_name} found by MAC "
653 f"{if_data[u'mac_address']}"
657 f"Interface {if_name} not found by MAC "
658 f"{if_data[u'mac_address']}"
660 if_data[u"vpp_sw_index"] = None
663 def update_nic_interface_names(node):
664 """Update interface names based on nic type and PCI address.
666 This method updates interface names in the same format as VPP does.
668 :param node: Node dictionary.
671 for ifc in node[u"interfaces"].values():
672 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
673 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
674 f"{int(if_pci[3], 16):x}"
675 if ifc[u"model"] == u"Intel-XL710":
676 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
677 elif ifc[u"model"] == u"Intel-X710":
678 ifc[u"name"] = f"TenGigabitEthernet{loc}"
679 elif ifc[u"model"] == u"Intel-X520-DA2":
680 ifc[u"name"] = f"TenGigabitEthernet{loc}"
681 elif ifc[u"model"] == u"Cisco-VIC-1385":
682 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
683 elif ifc[u"model"] == u"Cisco-VIC-1227":
684 ifc[u"name"] = f"TenGigabitEthernet{loc}"
686 ifc[u"name"] = f"UnknownEthernet{loc}"
689 def update_nic_interface_names_on_all_duts(nodes):
690 """Update interface names based on nic type and PCI address on all DUTs.
692 This method updates interface names in the same format as VPP does.
694 :param nodes: Topology nodes.
697 for node in nodes.values():
698 if node[u"type"] == NodeType.DUT:
699 InterfaceUtil.update_nic_interface_names(node)
702 def update_tg_interface_data_on_node(node):
703 """Update interface name for TG/linux node in DICT__nodes.
706 # for dev in `ls /sys/class/net/`;
707 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
708 "52:54:00:9f:82:63": "eth0"
709 "52:54:00:77:ae:a9": "eth1"
710 "52:54:00:e1:8a:0f": "eth2"
711 "00:00:00:00:00:00": "lo"
713 :param node: Node selected from DICT__nodes.
715 :raises RuntimeError: If getting of interface name and MAC fails.
717 # First setup interface driver specified in yaml file
718 InterfaceUtil.tg_set_interfaces_default_driver(node)
720 # Get interface names
724 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
725 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
727 ret_code, stdout, _ = ssh.exec_command(cmd)
728 if int(ret_code) != 0:
729 raise RuntimeError(u"Get interface name and MAC failed")
730 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
732 interfaces = JsonParser().parse_data(tmp)
733 for interface in node[u"interfaces"].values():
734 name = interfaces.get(interface[u"mac_address"])
737 interface[u"name"] = name
740 def iface_update_numa_node(node):
741 """For all interfaces from topology file update numa node based on
742 information from the node.
744 :param node: Node from topology.
747 :raises ValueError: If numa node ia less than 0.
748 :raises RuntimeError: If update of numa node failed.
751 for if_key in Topology.get_node_interfaces(node):
752 if_pci = Topology.get_interface_pci_addr(node, if_key)
754 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
756 ret, out, _ = ssh.exec_command(cmd)
759 numa_node = 0 if int(out) < 0 else int(out)
762 f"Reading numa location failed for: {if_pci}"
765 Topology.set_interface_numa_node(
766 node, if_key, numa_node
770 raise RuntimeError(f"Update numa node failed for: {if_pci}")
773 def update_all_interface_data_on_all_nodes(
774 nodes, skip_tg=False, skip_vpp=False):
775 """Update interface names on all nodes in DICT__nodes.
777 This method updates the topology dictionary by querying interface lists
778 of all nodes mentioned in the topology dictionary.
780 :param nodes: Nodes in the topology.
781 :param skip_tg: Skip TG node.
782 :param skip_vpp: Skip VPP node.
787 for node in nodes.values():
788 if node[u"type"] == NodeType.DUT and not skip_vpp:
789 InterfaceUtil.update_vpp_interface_data_on_node(node)
790 elif node[u"type"] == NodeType.TG and not skip_tg:
791 InterfaceUtil.update_tg_interface_data_on_node(node)
792 InterfaceUtil.iface_update_numa_node(node)
795 def create_vlan_subinterface(node, interface, vlan):
796 """Create VLAN sub-interface on node.
798 :param node: Node to add VLAN subinterface on.
799 :param interface: Interface name or index on which create VLAN
801 :param vlan: VLAN ID of the subinterface to be created.
803 :type interface: str on int
805 :returns: Name and index of created subinterface.
807 :raises RuntimeError: if it is unable to create VLAN subinterface on the
808 node or interface cannot be converted.
810 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
812 cmd = u"create_vlan_subif"
814 sw_if_index=sw_if_index,
817 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
819 with PapiSocketExecutor(node) as papi_exec:
820 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
822 if_key = Topology.add_new_port(node, u"vlan_subif")
823 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
824 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
825 Topology.update_interface_name(node, if_key, ifc_name)
827 return f"{interface}.{vlan}", sw_if_index
830 def create_vxlan_interface(node, vni, source_ip, destination_ip):
831 """Create VXLAN interface and return sw if index of created interface.
833 :param node: Node where to create VXLAN interface.
834 :param vni: VXLAN Network Identifier.
835 :param source_ip: Source IP of a VXLAN Tunnel End Point.
836 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
840 :type destination_ip: str
841 :returns: SW IF INDEX of created interface.
843 :raises RuntimeError: if it is unable to create VxLAN interface on the
846 cmd = u"vxlan_add_del_tunnel_v3"
849 instance=Constants.BITWISE_NON_ZERO,
850 src_address=IPAddress.create_ip_address_object(
851 ip_address(source_ip)
853 dst_address=IPAddress.create_ip_address_object(
854 ip_address(destination_ip)
856 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
858 decap_next_index=Constants.BITWISE_NON_ZERO,
861 err_msg = f"Failed to create VXLAN tunnel interface " \
862 f"on host {node[u'host']}"
863 with PapiSocketExecutor(node) as papi_exec:
864 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
866 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
867 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
868 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
869 Topology.update_interface_name(node, if_key, ifc_name)
874 def set_vxlan_bypass(node, interface=None):
875 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
877 By adding the IPv4 vxlan-bypass graph node to an interface, the node
878 checks for and validate input vxlan packet and bypass ip4-lookup,
879 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
880 This node will cause extra overhead to for non-vxlan packets which is
883 :param node: Node where to set VXLAN bypass.
884 :param interface: Numeric index or name string of a specific interface.
886 :type interface: int or str
887 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
889 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
891 cmd = u"sw_interface_set_vxlan_bypass"
894 sw_if_index=sw_if_index,
897 err_msg = f"Failed to set VXLAN bypass on interface " \
898 f"on host {node[u'host']}"
899 with PapiSocketExecutor(node) as papi_exec:
900 papi_exec.add(cmd, **args).get_reply(err_msg)
903 def vxlan_dump(node, interface=None):
904 """Get VxLAN data for the given interface.
906 :param node: VPP node to get interface data from.
907 :param interface: Numeric index or name string of a specific interface.
908 If None, information about all VxLAN interfaces is returned.
910 :type interface: int or str
911 :returns: Dictionary containing data for the given VxLAN interface or if
912 interface=None, the list of dictionaries with all VxLAN interfaces.
914 :raises TypeError: if the data type of interface is neither basestring
917 def process_vxlan_dump(vxlan_dump):
918 """Process vxlan dump.
920 :param vxlan_dump: Vxlan interface dump.
921 :type vxlan_dump: dict
922 :returns: Processed vxlan interface dump.
925 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
926 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
929 if interface is not None:
930 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
932 sw_if_index = int(Constants.BITWISE_NON_ZERO)
934 cmd = u"vxlan_tunnel_dump"
936 sw_if_index=sw_if_index
938 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
940 with PapiSocketExecutor(node) as papi_exec:
941 details = papi_exec.add(cmd, **args).get_details(err_msg)
943 data = list() if interface is None else dict()
945 if interface is None:
946 data.append(process_vxlan_dump(dump))
947 elif dump[u"sw_if_index"] == sw_if_index:
948 data = process_vxlan_dump(dump)
951 logger.debug(f"VXLAN data:\n{data}")
955 def create_subinterface(
956 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
958 """Create sub-interface on node. It is possible to set required
959 sub-interface type and VLAN tag(s).
961 :param node: Node to add sub-interface.
962 :param interface: Interface name on which create sub-interface.
963 :param sub_id: ID of the sub-interface to be created.
964 :param outer_vlan_id: Optional outer VLAN ID.
965 :param inner_vlan_id: Optional inner VLAN ID.
966 :param type_subif: Optional type of sub-interface. Values supported by
967 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
970 :type interface: str or int
972 :type outer_vlan_id: int
973 :type inner_vlan_id: int
974 :type type_subif: str
975 :returns: Name and index of created sub-interface.
977 :raises RuntimeError: If it is not possible to create sub-interface.
979 subif_types = type_subif.split()
982 if u"no_tags" in subif_types:
983 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
984 if u"one_tag" in subif_types:
985 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
986 if u"two_tags" in subif_types:
987 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
988 if u"dot1ad" in subif_types:
989 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
990 if u"exact_match" in subif_types:
991 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
992 if u"default_sub" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
994 if type_subif == u"default_sub":
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
996 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
998 cmd = u"create_subif"
1000 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1002 sub_if_flags=flags.value if hasattr(flags, u"value")
1004 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1005 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1007 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1008 with PapiSocketExecutor(node) as papi_exec:
1009 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1011 if_key = Topology.add_new_port(node, u"subinterface")
1012 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1013 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1014 Topology.update_interface_name(node, if_key, ifc_name)
1016 return f"{interface}.{sub_id}", sw_if_index
1019 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1020 """Create GRE tunnel interface on node.
1022 :param node: VPP node to add tunnel interface.
1023 :param source_ip: Source of the GRE tunnel.
1024 :param destination_ip: Destination of the GRE tunnel.
1026 :type source_ip: str
1027 :type destination_ip: str
1028 :returns: Name and index of created GRE tunnel interface.
1030 :raises RuntimeError: If unable to create GRE tunnel interface.
1032 cmd = u"gre_tunnel_add_del"
1035 instance=Constants.BITWISE_NON_ZERO,
1037 dst=str(destination_ip),
1045 err_msg = f"Failed to create GRE tunnel interface " \
1046 f"on host {node[u'host']}"
1047 with PapiSocketExecutor(node) as papi_exec:
1048 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1050 if_key = Topology.add_new_port(node, u"gre_tunnel")
1051 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1052 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1053 Topology.update_interface_name(node, if_key, ifc_name)
1055 return ifc_name, sw_if_index
1058 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1059 """Create GTPU interface and return sw if index of created interface.
1061 :param node: Node where to create GTPU interface.
1062 :param teid: GTPU Tunnel Endpoint Identifier.
1063 :param source_ip: Source IP of a GTPU Tunnel End Point.
1064 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1067 :type source_ip: str
1068 :type destination_ip: str
1069 :returns: SW IF INDEX of created interface.
1071 :raises RuntimeError: if it is unable to create GTPU interface on the
1074 cmd = u"gtpu_add_del_tunnel"
1077 src_address=IPAddress.create_ip_address_object(
1078 ip_address(source_ip)
1080 dst_address=IPAddress.create_ip_address_object(
1081 ip_address(destination_ip)
1083 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1088 err_msg = f"Failed to create GTPU tunnel interface " \
1089 f"on host {node[u'host']}"
1090 with PapiSocketExecutor(node) as papi_exec:
1091 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1093 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1094 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1095 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1096 Topology.update_interface_name(node, if_key, ifc_name)
1101 def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
1102 """Enable GTPU offload RX onto interface.
1104 :param node: Node to run command on.
1105 :param interface: Name of the specific interface.
1106 :param gtpu_if_index: Index of GTPU tunnel interface.
1109 :type interface: str
1110 :type gtpu_interface: int
1112 sw_if_index = Topology.get_interface_sw_index(node, interface)
1114 cmd = u"gtpu_offload_rx"
1116 hw_if_index=sw_if_index,
1117 sw_if_index=gtpu_if_index,
1121 err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
1122 with PapiSocketExecutor(node) as papi_exec:
1123 papi_exec.add(cmd, **args).get_reply(err_msg)
1126 def vpp_create_loopback(node, mac=None):
1127 """Create loopback interface on VPP node.
1129 :param node: Node to create loopback interface on.
1130 :param mac: Optional MAC address for loopback interface.
1133 :returns: SW interface index.
1135 :raises RuntimeError: If it is not possible to create loopback on the
1138 cmd = u"create_loopback_instance"
1140 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1144 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1145 with PapiSocketExecutor(node) as papi_exec:
1146 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1148 if_key = Topology.add_new_port(node, u"loopback")
1149 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1150 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1151 Topology.update_interface_name(node, if_key, ifc_name)
1153 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1154 Topology.update_interface_mac_address(node, if_key, mac)
1159 def vpp_create_bond_interface(
1160 node, mode, load_balance=None, mac=None, gso=False):
1161 """Create bond interface on VPP node.
1163 :param node: DUT node from topology.
1164 :param mode: Link bonding mode.
1165 :param load_balance: Load balance (optional, valid for xor and lacp
1166 modes, otherwise ignored). Default: None.
1167 :param mac: MAC address to assign to the bond interface (optional).
1169 :param gso: Enable GSO support (optional). Default: False.
1172 :type load_balance: str
1175 :returns: Interface key (name) in topology.
1177 :raises RuntimeError: If it is not possible to create bond interface on
1180 cmd = u"bond_create2"
1182 id=int(Constants.BITWISE_NON_ZERO),
1183 use_custom_mac=bool(mac is not None),
1184 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1187 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1189 lb=0 if load_balance is None else getattr(
1190 LinkBondLoadBalanceAlgo,
1191 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1196 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1197 with PapiSocketExecutor(node) as papi_exec:
1198 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1200 InterfaceUtil.add_eth_interface(
1201 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1203 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1208 def add_eth_interface(
1209 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1211 """Add ethernet interface to current topology.
1213 :param node: DUT node from topology.
1214 :param ifc_name: Name of the interface.
1215 :param sw_if_index: SW interface index.
1216 :param ifc_pfx: Interface key prefix.
1217 :param host_if_key: Host interface key from topology file.
1220 :type sw_if_index: int
1222 :type host_if_key: str
1224 if_key = Topology.add_new_port(node, ifc_pfx)
1226 if ifc_name and sw_if_index is None:
1227 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1229 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1230 if sw_if_index and ifc_name is None:
1231 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1232 Topology.update_interface_name(node, if_key, ifc_name)
1233 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1234 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1235 if host_if_key is not None:
1236 Topology.set_interface_numa_node(
1237 node, if_key, Topology.get_interface_numa_node(
1241 Topology.update_interface_pci_address(
1242 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1246 def vpp_create_avf_interface(
1247 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1248 """Create AVF interface on VPP node.
1250 :param node: DUT node from topology.
1251 :param if_key: Interface key from topology file of interface
1252 to be bound to i40evf driver.
1253 :param num_rx_queues: Number of RX queues.
1254 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1255 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1258 :type num_rx_queues: int
1261 :returns: AVF interface key (name) in topology.
1263 :raises RuntimeError: If it is not possible to create AVF interface on
1266 PapiSocketExecutor.run_cli_cmd(
1267 node, u"set logging class avf level debug"
1271 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1273 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1275 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1279 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1281 # FIXME: Remove once the fw/driver is upgraded.
1283 with PapiSocketExecutor(node) as papi_exec:
1285 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1289 except AssertionError:
1290 logger.error(err_msg)
1292 raise AssertionError(err_msg)
1294 InterfaceUtil.add_eth_interface(
1295 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1299 return Topology.get_interface_by_sw_index(node, sw_if_index)
1302 def vpp_create_af_xdp_interface(
1303 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1305 """Create AF_XDP interface on VPP node.
1307 :param node: DUT node from topology.
1308 :param if_key: Physical interface key from topology file of interface
1309 to be bound to compatible driver.
1310 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1311 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1312 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1313 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1316 :type num_rx_queues: int
1320 :returns: Interface key (name) in topology file.
1322 :raises RuntimeError: If it is not possible to create AF_XDP interface
1325 PapiSocketExecutor.run_cli_cmd(
1326 node, u"set logging class af_xdp level debug"
1329 cmd = u"af_xdp_create_v2"
1330 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1332 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1333 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1334 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1337 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1339 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1340 with PapiSocketExecutor(node) as papi_exec:
1341 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1343 InterfaceUtil.vpp_set_interface_mac(
1344 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1346 InterfaceUtil.add_eth_interface(
1347 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1351 return Topology.get_interface_by_sw_index(node, sw_if_index)
1354 def vpp_create_rdma_interface(
1355 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1357 """Create RDMA interface on VPP node.
1359 :param node: DUT node from topology.
1360 :param if_key: Physical interface key from topology file of interface
1361 to be bound to rdma-core driver.
1362 :param num_rx_queues: Number of RX queues.
1363 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1364 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1365 :param mode: RDMA interface mode - auto/ibv/dv.
1368 :type num_rx_queues: int
1372 :returns: Interface key (name) in topology file.
1374 :raises RuntimeError: If it is not possible to create RDMA interface on
1377 PapiSocketExecutor.run_cli_cmd(
1378 node, u"set logging class rdma level debug"
1381 cmd = u"rdma_create_v3"
1382 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1384 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1385 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1386 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1389 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1390 # Note: Set True for non-jumbo packets.
1393 # TODO: Apply desired RSS flags.
1395 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1396 with PapiSocketExecutor(node) as papi_exec:
1397 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1399 InterfaceUtil.vpp_set_interface_mac(
1400 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1402 InterfaceUtil.add_eth_interface(
1403 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1407 return Topology.get_interface_by_sw_index(node, sw_if_index)
1410 def vpp_add_bond_member(node, interface, bond_if):
1411 """Add member interface to bond interface on VPP node.
1413 :param node: DUT node from topology.
1414 :param interface: Physical interface key from topology file.
1415 :param bond_if: Load balance
1417 :type interface: str
1419 :raises RuntimeError: If it is not possible to add member to bond
1420 interface on the node.
1422 cmd = u"bond_add_member"
1424 sw_if_index=Topology.get_interface_sw_index(node, interface),
1425 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1427 is_long_timeout=False
1429 err_msg = f"Failed to add member {interface} to bond interface " \
1430 f"{bond_if} on host {node[u'host']}"
1431 with PapiSocketExecutor(node) as papi_exec:
1432 papi_exec.add(cmd, **args).get_reply(err_msg)
1435 def vpp_show_bond_data_on_node(node, verbose=False):
1436 """Show (detailed) bond information on VPP node.
1438 :param node: DUT node from topology.
1439 :param verbose: If detailed information is required or not.
1443 cmd = u"sw_bond_interface_dump"
1444 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1446 data = f"Bond data on node {node[u'host']}:\n"
1447 with PapiSocketExecutor(node) as papi_exec:
1448 details = papi_exec.add(cmd).get_details(err_msg)
1450 for bond in details:
1451 data += f"{bond[u'interface_name']}\n"
1452 data += u" mode: {m}\n".format(
1453 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1455 data += u" load balance: {lb}\n".format(
1456 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1458 data += f" number of active members: {bond[u'active_members']}\n"
1460 member_data = InterfaceUtil.vpp_bond_member_dump(
1461 node, Topology.get_interface_by_sw_index(
1462 node, bond[u"sw_if_index"]
1465 for member in member_data:
1466 if not member[u"is_passive"]:
1467 data += f" {member[u'interface_name']}\n"
1468 data += f" number of members: {bond[u'members']}\n"
1470 for member in member_data:
1471 data += f" {member[u'interface_name']}\n"
1472 data += f" interface id: {bond[u'id']}\n"
1473 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1477 def vpp_bond_member_dump(node, interface):
1478 """Get bond interface slave(s) data on VPP node.
1480 :param node: DUT node from topology.
1481 :param interface: Physical interface key from topology file.
1483 :type interface: str
1484 :returns: Bond slave interface data.
1487 cmd = u"sw_member_interface_dump"
1489 sw_if_index=Topology.get_interface_sw_index(node, interface)
1491 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1493 with PapiSocketExecutor(node) as papi_exec:
1494 details = papi_exec.add(cmd, **args).get_details(err_msg)
1496 logger.debug(f"Member data:\n{details}")
1500 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1501 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1503 :param nodes: Nodes in the topology.
1504 :param verbose: If detailed information is required or not.
1508 for node_data in nodes.values():
1509 if node_data[u"type"] == NodeType.DUT:
1510 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1513 def vpp_enable_input_acl_interface(
1514 node, interface, ip_version, table_index):
1515 """Enable input acl on interface.
1517 :param node: VPP node to setup interface for input acl.
1518 :param interface: Interface to setup input acl.
1519 :param ip_version: Version of IP protocol.
1520 :param table_index: Classify table index.
1522 :type interface: str or int
1523 :type ip_version: str
1524 :type table_index: int
1526 cmd = u"input_acl_set_interface"
1528 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1529 ip4_table_index=table_index if ip_version == u"ip4"
1530 else Constants.BITWISE_NON_ZERO,
1531 ip6_table_index=table_index if ip_version == u"ip6"
1532 else Constants.BITWISE_NON_ZERO,
1533 l2_table_index=table_index if ip_version == u"l2"
1534 else Constants.BITWISE_NON_ZERO,
1536 err_msg = f"Failed to enable input acl on interface {interface}"
1537 with PapiSocketExecutor(node) as papi_exec:
1538 papi_exec.add(cmd, **args).get_reply(err_msg)
1541 def get_interface_classify_table(node, interface):
1542 """Get name of classify table for the given interface.
1544 TODO: Move to Classify.py.
1546 :param node: VPP node to get data from.
1547 :param interface: Name or sw_if_index of a specific interface.
1549 :type interface: str or int
1550 :returns: Classify table name.
1553 if isinstance(interface, str):
1554 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1556 sw_if_index = interface
1558 cmd = u"classify_table_by_interface"
1560 sw_if_index=sw_if_index
1562 err_msg = f"Failed to get classify table name by interface {interface}"
1563 with PapiSocketExecutor(node) as papi_exec:
1564 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1569 def get_sw_if_index(node, interface_name):
1570 """Get sw_if_index for the given interface from actual interface dump.
1572 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1574 :param node: VPP node to get interface data from.
1575 :param interface_name: Name of the specific interface.
1577 :type interface_name: str
1578 :returns: sw_if_index of the given interface.
1581 interface_data = InterfaceUtil.vpp_get_interface_data(
1582 node, interface=interface_name
1584 return interface_data.get(u"sw_if_index")
1587 def vxlan_gpe_dump(node, interface_name=None):
1588 """Get VxLAN GPE data for the given interface.
1590 :param node: VPP node to get interface data from.
1591 :param interface_name: Name of the specific interface. If None,
1592 information about all VxLAN GPE interfaces is returned.
1594 :type interface_name: str
1595 :returns: Dictionary containing data for the given VxLAN GPE interface
1596 or if interface=None, the list of dictionaries with all VxLAN GPE
1598 :rtype: dict or list
1600 def process_vxlan_gpe_dump(vxlan_dump):
1601 """Process vxlan_gpe dump.
1603 :param vxlan_dump: Vxlan_gpe nterface dump.
1604 :type vxlan_dump: dict
1605 :returns: Processed vxlan_gpe interface dump.
1608 if vxlan_dump[u"is_ipv6"]:
1609 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1610 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1612 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1613 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1616 if interface_name is not None:
1617 sw_if_index = InterfaceUtil.get_interface_index(
1618 node, interface_name
1621 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1623 cmd = u"vxlan_gpe_tunnel_dump"
1625 sw_if_index=sw_if_index
1627 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1628 with PapiSocketExecutor(node) as papi_exec:
1629 details = papi_exec.add(cmd, **args).get_details(err_msg)
1631 data = list() if interface_name is None else dict()
1632 for dump in details:
1633 if interface_name is None:
1634 data.append(process_vxlan_gpe_dump(dump))
1635 elif dump[u"sw_if_index"] == sw_if_index:
1636 data = process_vxlan_gpe_dump(dump)
1639 logger.debug(f"VXLAN-GPE data:\n{data}")
1643 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1644 """Assign VPP interface to specific VRF/FIB table.
1646 :param node: VPP node where the FIB and interface are located.
1647 :param interface: Interface to be assigned to FIB.
1648 :param table_id: VRF table ID.
1649 :param ipv6: Assign to IPv6 table. Default False.
1651 :type interface: str or int
1655 cmd = u"sw_interface_set_table"
1657 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1659 vrf_id=int(table_id)
1661 err_msg = f"Failed to assign interface {interface} to FIB table"
1662 with PapiSocketExecutor(node) as papi_exec:
1663 papi_exec.add(cmd, **args).get_reply(err_msg)
1666 def set_linux_interface_mac(
1667 node, interface, mac, namespace=None, vf_id=None):
1668 """Set MAC address for interface in linux.
1670 :param node: Node where to execute command.
1671 :param interface: Interface in namespace.
1672 :param mac: MAC to be assigned to interface.
1673 :param namespace: Execute command in namespace. Optional
1674 :param vf_id: Virtual Function id. Optional
1676 :type interface: str
1678 :type namespace: str
1681 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1682 else f"address {mac}"
1683 ns_str = f"ip netns exec {namespace}" if namespace else u""
1685 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1686 exec_cmd_no_error(node, cmd, sudo=True)
1689 def set_linux_interface_promisc(
1690 node, interface, namespace=None, vf_id=None, state=u"on"):
1691 """Set promisc state for interface in linux.
1693 :param node: Node where to execute command.
1694 :param interface: Interface in namespace.
1695 :param namespace: Exec command in namespace. (Optional, Default: None)
1696 :param vf_id: Virtual Function id. (Optional, Default: None)
1697 :param state: State of feature. (Optional, Default: on)
1699 :type interface: str
1700 :type namespace: str
1704 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1705 else f"promisc {state}"
1706 ns_str = f"ip netns exec {namespace}" if namespace else u""
1708 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1709 exec_cmd_no_error(node, cmd, sudo=True)
1712 def set_linux_interface_trust_on(
1713 node, interface, namespace=None, vf_id=None):
1714 """Set trust on (promisc) for interface in linux.
1716 :param node: Node where to execute command.
1717 :param interface: Interface in namespace.
1718 :param namespace: Execute command in namespace. Optional
1719 :param vf_id: Virtual Function id. Optional
1721 :type interface: str
1722 :type namespace: str
1725 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1726 ns_str = f"ip netns exec {namespace}" if namespace else u""
1728 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1729 exec_cmd_no_error(node, cmd, sudo=True)
1732 def set_linux_interface_spoof_off(
1733 node, interface, namespace=None, vf_id=None):
1734 """Set spoof off for interface in linux.
1736 :param node: Node where to execute command.
1737 :param interface: Interface in namespace.
1738 :param namespace: Execute command in namespace. Optional
1739 :param vf_id: Virtual Function id. Optional
1741 :type interface: str
1742 :type namespace: str
1745 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1747 ns_str = f"ip netns exec {namespace}" if namespace else u""
1749 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1750 exec_cmd_no_error(node, cmd, sudo=True)
1753 def set_linux_interface_state(
1754 node, interface, namespace=None, state=u"up"):
1755 """Set operational state for interface in linux.
1757 :param node: Node where to execute command.
1758 :param interface: Interface in namespace.
1759 :param namespace: Execute command in namespace. Optional
1760 :param state: Up/Down.
1762 :type interface: str
1763 :type namespace: str
1766 ns_str = f"ip netns exec {namespace}" if namespace else u""
1768 cmd = f"{ns_str} ip link set dev {interface} {state}"
1769 exec_cmd_no_error(node, cmd, sudo=True)
1772 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1773 """Init PCI device. Check driver compatibility and bind to proper
1774 drivers. Optionally create NIC VFs.
1776 :param node: DUT node.
1777 :param ifc_key: Interface key from topology file.
1778 :param driver: Base driver to use.
1779 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1780 :param osi_layer: OSI Layer type to initialize TG with.
1781 Default value "L2" sets linux interface spoof off.
1786 :type osi_layer: str
1787 :returns: Virtual Function topology interface keys.
1789 :raises RuntimeError: If a reason preventing initialization is found.
1791 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1793 if driver == u"avf":
1794 if kernel_driver not in (
1795 u"ice", u"iavf", u"i40e", u"i40evf"):
1797 f"AVF needs ice or i40e compatible driver, not "
1798 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1800 vf_keys = InterfaceUtil.init_generic_interface(
1801 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1803 elif driver == u"af_xdp":
1804 if kernel_driver not in (
1805 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1808 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1809 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1811 vf_keys = InterfaceUtil.init_generic_interface(
1812 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1814 elif driver == u"rdma-core":
1815 vf_keys = InterfaceUtil.init_generic_interface(
1816 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1821 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1822 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1824 :param node: DUT node.
1825 :param ifc_key: Interface key from topology file.
1826 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1827 :param osi_layer: OSI Layer type to initialize TG with.
1828 Default value "L2" sets linux interface spoof off.
1832 :type osi_layer: str
1833 :returns: Virtual Function topology interface keys.
1835 :raises RuntimeError: If a reason preventing initialization is found.
1837 # Read PCI address and driver.
1838 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1839 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1840 uio_driver = Topology.get_uio_driver(node)
1841 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1842 current_driver = DUTSetup.get_pci_dev_driver(
1843 node, pf_pci_addr.replace(u":", r"\:"))
1844 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1846 VPPUtil.stop_vpp_service(node)
1847 if current_driver != kernel_driver:
1848 # PCI device must be re-bound to kernel driver before creating VFs.
1849 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1850 # Stop VPP to prevent deadlock.
1851 # Unbind from current driver if bound.
1853 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1854 # Bind to kernel driver.
1855 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1857 # Initialize PCI VFs.
1858 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1861 if osi_layer == u"L2":
1862 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1865 # Set MAC address and bind each virtual function to uio driver.
1866 for vf_id in range(numvfs):
1867 vf_mac_addr = u":".join(
1868 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1869 pf_mac_addr[5], f"{vf_id:02x}"
1873 InterfaceUtil.set_linux_interface_trust_on(
1874 node, pf_dev, vf_id=vf_id
1876 if osi_layer == u"L2":
1877 InterfaceUtil.set_linux_interface_spoof_off(
1878 node, pf_dev, vf_id=vf_id
1880 InterfaceUtil.set_linux_interface_mac(
1881 node, pf_dev, vf_mac_addr, vf_id=vf_id
1883 InterfaceUtil.set_linux_interface_state(
1884 node, pf_dev, state=u"up"
1887 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1888 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1890 # Add newly created ports into topology file
1891 vf_ifc_name = f"{ifc_key}_vif"
1892 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1893 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1894 Topology.update_interface_name(
1895 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1897 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1898 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1899 Topology.set_interface_numa_node(
1900 node, vf_ifc_key, Topology.get_interface_numa_node(
1904 vf_ifc_keys.append(vf_ifc_key)
1909 def vpp_sw_interface_rx_placement_dump(node):
1910 """Dump VPP interface RX placement on node.
1912 :param node: Node to run command on.
1914 :returns: Thread mapping information as a list of dictionaries.
1917 cmd = u"sw_interface_rx_placement_dump"
1918 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1919 with PapiSocketExecutor(node) as papi_exec:
1920 for ifc in node[u"interfaces"].values():
1921 if ifc[u"vpp_sw_index"] is not None:
1922 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1923 details = papi_exec.get_details(err_msg)
1924 return sorted(details, key=lambda k: k[u"sw_if_index"])
1927 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1928 """Dump VPP interface RX placement on all given nodes.
1930 :param nodes: Nodes to run command on.
1932 :returns: Thread mapping information as a list of dictionaries.
1935 for node in nodes.values():
1936 if node[u"type"] == NodeType.DUT:
1937 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1940 def vpp_sw_interface_set_rx_placement(
1941 node, sw_if_index, queue_id, worker_id):
1942 """Set interface RX placement to worker on node.
1944 :param node: Node to run command on.
1945 :param sw_if_index: VPP SW interface index.
1946 :param queue_id: VPP interface queue ID.
1947 :param worker_id: VPP worker ID (indexing from 0).
1949 :type sw_if_index: int
1951 :type worker_id: int
1952 :raises RuntimeError: If failed to run command on host or if no API
1955 cmd = u"sw_interface_set_rx_placement"
1956 err_msg = f"Failed to set interface RX placement to worker " \
1957 f"on host {node[u'host']}!"
1959 sw_if_index=sw_if_index,
1961 worker_id=worker_id,
1964 with PapiSocketExecutor(node) as papi_exec:
1965 papi_exec.add(cmd, **args).get_reply(err_msg)
1968 def vpp_round_robin_rx_placement(
1969 node, prefix, workers=None):
1970 """Set Round Robin interface RX placement on all worker threads
1973 If specified, workers limits the number of physical cores used
1974 for data plane I/O work. Other cores are presumed to do something else,
1975 e.g. asynchronous crypto processing.
1976 None means all workers are used for data plane work.
1978 :param node: Topology nodes.
1979 :param prefix: Interface name prefix.
1980 :param workers: Comma separated worker index numbers intended for
1986 thread_data = VPPUtil.vpp_show_threads(node)
1987 worker_cnt = len(thread_data) - 1
1992 for item in thread_data:
1993 if str(item.cpu_id) in workers.split(u","):
1994 worker_ids.append(item.id)
1996 for item in thread_data:
1997 if u"vpp_main" not in item.name:
1998 worker_ids.append(item.id)
2001 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
2002 for interface in node[u"interfaces"].values():
2003 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
2004 and prefix in interface[u"name"]:
2005 InterfaceUtil.vpp_sw_interface_set_rx_placement(
2006 node, placement[u"sw_if_index"], placement[u"queue_id"],
2007 worker_ids[worker_idx % len(worker_ids)] - 1
2012 def vpp_round_robin_rx_placement_on_all_duts(
2013 nodes, prefix, workers=None):
2014 """Set Round Robin interface RX placement on worker threads
2017 If specified, workers limits the number of physical cores used
2018 for data plane I/O work. Other cores are presumed to do something else,
2019 e.g. asynchronous crypto processing.
2020 None means all cores are used for data plane work.
2022 :param nodes: Topology nodes.
2023 :param prefix: Interface name prefix.
2024 :param workers: Comma separated worker index numbers intended for
2030 for node in nodes.values():
2031 if node[u"type"] == NodeType.DUT:
2032 InterfaceUtil.vpp_round_robin_rx_placement(
2033 node, prefix, workers