1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.IPAddress import IPAddress
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class InterfaceStatusFlags(IntEnum):
35 """Interface status flags."""
36 IF_STATUS_API_FLAG_ADMIN_UP = 1
37 IF_STATUS_API_FLAG_LINK_UP = 2
40 class MtuProto(IntEnum):
45 MTU_PROTO_API_MPLS = 3
49 class LinkDuplex(IntEnum):
51 LINK_DUPLEX_API_UNKNOWN = 0
52 LINK_DUPLEX_API_HALF = 1
53 LINK_DUPLEX_API_FULL = 2
56 class SubInterfaceFlags(IntEnum):
57 """Sub-interface flags."""
58 SUB_IF_API_FLAG_NO_TAGS = 1
59 SUB_IF_API_FLAG_ONE_TAG = 2
60 SUB_IF_API_FLAG_TWO_TAGS = 4
61 SUB_IF_API_FLAG_DOT1AD = 8
62 SUB_IF_API_FLAG_EXACT_MATCH = 16
63 SUB_IF_API_FLAG_DEFAULT = 32
64 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
65 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
66 SUB_IF_API_FLAG_DOT1AH = 256
69 class RxMode(IntEnum):
71 RX_MODE_API_UNKNOWN = 0
72 RX_MODE_API_POLLING = 1
73 RX_MODE_API_INTERRUPT = 2
74 RX_MODE_API_ADAPTIVE = 3
75 RX_MODE_API_DEFAULT = 4
78 class IfType(IntEnum):
81 IF_API_TYPE_HARDWARE = 0
88 class LinkBondLoadBalanceAlgo(IntEnum):
89 """Link bonding load balance algorithm."""
90 BOND_API_LB_ALGO_L2 = 0
91 BOND_API_LB_ALGO_L34 = 1
92 BOND_API_LB_ALGO_L23 = 2
93 BOND_API_LB_ALGO_RR = 3
94 BOND_API_LB_ALGO_BC = 4
95 BOND_API_LB_ALGO_AB = 5
98 class LinkBondMode(IntEnum):
99 """Link bonding mode."""
100 BOND_API_MODE_ROUND_ROBIN = 1
101 BOND_API_MODE_ACTIVE_BACKUP = 2
102 BOND_API_MODE_XOR = 3
103 BOND_API_MODE_BROADCAST = 4
104 BOND_API_MODE_LACP = 5
107 class RdmaMode(IntEnum):
108 """RDMA interface mode."""
109 RDMA_API_MODE_AUTO = 0
110 RDMA_API_MODE_IBV = 1
114 class AfXdpMode(IntEnum):
115 """AF_XDP interface mode."""
116 AF_XDP_API_MODE_AUTO = 0
117 AF_XDP_API_MODE_COPY = 1
118 AF_XDP_API_MODE_ZERO_COPY = 2
122 """General utilities for managing interfaces"""
125 def pci_to_int(pci_str):
126 """Convert PCI address from string format (0000:18:0a.0) to
127 integer representation (169345024).
129 :param pci_str: PCI address in string representation.
131 :returns: Integer representation of PCI address.
134 pci = list(pci_str.split(u":")[0:2])
135 pci.extend(pci_str.split(u":")[2].split(u"."))
137 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
138 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
141 def pci_to_eth(node, pci_str):
142 """Convert PCI address on DUT to Linux ethernet name.
144 :param node: DUT node
145 :param pci_str: PCI address.
148 :returns: Ethernet name.
151 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
153 stdout, _ = exec_cmd_no_error(node, cmd)
155 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
157 return stdout.strip()
160 def get_interface_index(node, interface):
161 """Get interface sw_if_index from topology file.
163 :param node: Node where the interface is.
164 :param interface: Numeric index or name string of a specific interface.
166 :type interface: str or int
167 :returns: SW interface index.
171 sw_if_index = int(interface)
173 sw_if_index = Topology.get_interface_sw_index(node, interface)
174 if sw_if_index is None:
176 Topology.get_interface_sw_index_by_name(node, interface)
177 except TypeError as err:
178 raise TypeError(f"Wrong interface format {interface}") from err
183 def set_interface_state(node, interface, state, if_type=u"key"):
184 """Set interface state on a node.
186 Function can be used for DUTs as well as for TGs.
188 :param node: Node where the interface is.
189 :param interface: Interface key or sw_if_index or name.
190 :param state: One of 'up' or 'down'.
191 :param if_type: Interface type
193 :type interface: str or int
197 :raises ValueError: If the interface type is unknown.
198 :raises ValueError: If the state of interface is unexpected.
199 :raises ValueError: If the node has an unknown node type.
201 if if_type == u"key":
202 if isinstance(interface, str):
203 sw_if_index = Topology.get_interface_sw_index(node, interface)
204 iface_name = Topology.get_interface_name(node, interface)
206 sw_if_index = interface
207 elif if_type == u"name":
208 iface_key = Topology.get_interface_by_name(node, interface)
209 if iface_key is not None:
210 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
211 iface_name = interface
213 raise ValueError(f"Unknown if_type: {if_type}")
215 if node[u"type"] == NodeType.DUT:
217 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
218 elif state == u"down":
221 raise ValueError(f"Unexpected interface state: {state}")
222 cmd = u"sw_interface_set_flags"
223 err_msg = f"Failed to set interface state on host {node[u'host']}"
225 sw_if_index=int(sw_if_index),
228 with PapiSocketExecutor(node) as papi_exec:
229 papi_exec.add(cmd, **args).get_reply(err_msg)
230 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
231 cmd = f"ip link set {iface_name} {state}"
232 exec_cmd_no_error(node, cmd, sudo=True)
235 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
239 def set_interface_state_pci(
240 node, pf_pcis, namespace=None, state=u"up"):
241 """Set operational state for interface specified by PCI address.
243 :param node: Topology node.
244 :param pf_pcis: List of node's interfaces PCI addresses.
245 :param namespace: Exec command in namespace. (Optional, Default: none)
246 :param state: Up/Down. (Optional, default: up)
252 for pf_pci in pf_pcis:
253 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
254 InterfaceUtil.set_linux_interface_state(
255 node, pf_eth, namespace=namespace, state=state
259 def set_interface_mtu(node, pf_pcis, mtu=9200):
260 """Set Ethernet MTU for specified interfaces.
262 :param node: Topology node.
263 :param pf_pcis: List of node's interfaces PCI addresses.
264 :param mtu: MTU to set. Default: 9200.
268 :raises RuntimeError: If failed to set MTU on interface.
270 for pf_pci in pf_pcis:
271 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
272 cmd = f"ip link set {pf_eth} mtu {mtu}"
273 exec_cmd_no_error(node, cmd, sudo=True)
276 def set_interface_channels(
277 node, pf_pcis, num_queues=1, channel=u"combined"):
278 """Set interface channels for specified interfaces.
280 :param node: Topology node.
281 :param pf_pcis: List of node's interfaces PCI addresses.
282 :param num_queues: Number of channels. (Optional, Default: 1)
283 :param channel: Channel type. (Optional, Default: combined)
286 :type num_queues: int
289 for pf_pci in pf_pcis:
290 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
291 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
292 exec_cmd_no_error(node, cmd, sudo=True)
295 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
296 """Set Ethernet flow control for specified interfaces.
298 :param node: Topology node.
299 :param pf_pcis: List of node's interfaces PCI addresses.
300 :param rxf: RX flow. (Optional, Default: off).
301 :param txf: TX flow. (Optional, Default: off).
307 for pf_pci in pf_pcis:
308 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
309 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
310 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
311 if int(ret_code) not in (0, 78):
312 raise RuntimeError("Failed to set flow control on {pf_eth}!")
315 def set_pci_parameter(node, pf_pcis, key, value):
316 """Set PCI parameter for specified interfaces.
318 :param node: Topology node.
319 :param pf_pcis: List of node's interfaces PCI addresses.
320 :param key: Key to set.
321 :param value: Value to set.
327 for pf_pci in pf_pcis:
328 cmd = f"setpci -s {pf_pci} {key}={value}"
329 exec_cmd_no_error(node, cmd, sudo=True)
332 def vpp_set_interface_mtu(node, interface, mtu=9200):
333 """Set Ethernet MTU on interface.
335 :param node: VPP node.
336 :param interface: Interface to setup MTU. Default: 9200.
337 :param mtu: Ethernet MTU size in Bytes.
339 :type interface: str or int
342 if isinstance(interface, str):
343 sw_if_index = Topology.get_interface_sw_index(node, interface)
345 sw_if_index = interface
347 cmd = u"hw_interface_set_mtu"
348 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
350 sw_if_index=sw_if_index,
354 with PapiSocketExecutor(node) as papi_exec:
355 papi_exec.add(cmd, **args).get_reply(err_msg)
356 except AssertionError as err:
357 logger.debug(f"Setting MTU failed.\n{err}")
360 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
361 """Set Ethernet MTU on all interfaces.
363 :param node: VPP node.
364 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
368 for interface in node[u"interfaces"]:
369 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
372 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
373 """Set Ethernet MTU on all interfaces on all DUTs.
375 :param nodes: VPP nodes.
376 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
380 for node in nodes.values():
381 if node[u"type"] == NodeType.DUT:
382 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
385 def vpp_node_interfaces_ready_wait(node, retries=15):
386 """Wait until all interfaces with admin-up are in link-up state.
388 :param node: Node to wait on.
389 :param retries: Number of retries to check interface status (optional,
394 :raises RuntimeError: If any interface is not in link-up state after
395 defined number of retries.
397 for _ in range(0, retries):
399 out = InterfaceUtil.vpp_get_interface_data(node)
400 for interface in out:
401 if interface.get(u"flags") == 1:
402 not_ready.append(interface.get(u"interface_name"))
405 f"Interfaces still not in link-up state:\n{not_ready}"
411 err = f"Timeout, interfaces not up:\n{not_ready}" \
412 if u"not_ready" in locals() else u"No check executed!"
413 raise RuntimeError(err)
416 def all_vpp_interfaces_ready_wait(nodes, retries=15):
417 """Wait until all interfaces with admin-up are in link-up state for all
418 nodes in the topology.
420 :param nodes: Nodes in the topology.
421 :param retries: Number of retries to check interface status (optional,
427 for node in nodes.values():
428 if node[u"type"] == NodeType.DUT:
429 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
432 def vpp_get_interface_data(node, interface=None):
433 """Get all interface data from a VPP node. If a name or
434 sw_interface_index is provided, return only data for the matching
437 :param node: VPP node to get interface data from.
438 :param interface: Numeric index or name string of a specific interface.
440 :type interface: int or str
441 :returns: List of dictionaries containing data for each interface, or a
442 single dictionary for the specified interface.
444 :raises TypeError: if the data type of interface is neither basestring
447 def process_if_dump(if_dump):
448 """Process interface dump.
450 :param if_dump: Interface dump.
452 :returns: Processed interface dump.
455 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
456 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
457 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
458 if_dump[u"flags"] = if_dump[u"flags"].value
459 if_dump[u"type"] = if_dump[u"type"].value
460 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
461 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
462 if hasattr(if_dump[u"sub_if_flags"], u"value") \
463 else int(if_dump[u"sub_if_flags"])
467 if interface is not None:
468 if isinstance(interface, str):
469 param = u"interface_name"
470 elif isinstance(interface, int):
471 param = u"sw_if_index"
473 raise TypeError(f"Wrong interface format {interface}")
477 cmd = u"sw_interface_dump"
479 name_filter_valid=False,
482 err_msg = f"Failed to get interface dump on host {node[u'host']}"
484 with PapiSocketExecutor(node) as papi_exec:
485 details = papi_exec.add(cmd, **args).get_details(err_msg)
486 logger.debug(f"Received data:\n{details!r}")
488 data = list() if interface is None else dict()
490 if interface is None:
491 data.append(process_if_dump(dump))
492 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
493 data = process_if_dump(dump)
496 logger.debug(f"Interface data:\n{data}")
500 def vpp_get_interface_name(node, sw_if_index):
501 """Get interface name for the given SW interface index from actual
504 :param node: VPP node to get interface data from.
505 :param sw_if_index: SW interface index of the specific interface.
507 :type sw_if_index: int
508 :returns: Name of the given interface.
511 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
512 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
513 if_data = InterfaceUtil.vpp_get_interface_data(
514 node, if_data[u"sup_sw_if_index"]
517 return if_data.get(u"interface_name")
520 def vpp_get_interface_sw_index(node, interface_name):
521 """Get interface name for the given SW interface index from actual
524 :param node: VPP node to get interface data from.
525 :param interface_name: Interface name.
527 :type interface_name: str
528 :returns: Name of the given interface.
531 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
533 return if_data.get(u"sw_if_index")
536 def vpp_get_interface_mac(node, interface):
537 """Get MAC address for the given interface from actual interface dump.
539 :param node: VPP node to get interface data from.
540 :param interface: Numeric index or name string of a specific interface.
542 :type interface: int or str
543 :returns: MAC address.
546 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
547 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
548 if_data = InterfaceUtil.vpp_get_interface_data(
549 node, if_data[u"sup_sw_if_index"])
551 return if_data.get(u"l2_address")
554 def vpp_set_interface_mac(node, interface, mac):
555 """Set MAC address for the given interface.
557 :param node: VPP node to set interface MAC.
558 :param interface: Numeric index or name string of a specific interface.
559 :param mac: Required MAC address.
561 :type interface: int or str
564 cmd = u"sw_interface_set_mac_address"
566 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
567 mac_address=L2Util.mac_to_bin(mac)
569 err_msg = f"Failed to set MAC address of interface {interface}" \
570 f"on host {node[u'host']}"
571 with PapiSocketExecutor(node) as papi_exec:
572 papi_exec.add(cmd, **args).get_reply(err_msg)
575 def tg_set_interface_driver(node, pci_addr, driver):
576 """Set interface driver on the TG node.
578 :param node: Node to set interface driver on (must be TG node).
579 :param pci_addr: PCI address of the interface.
580 :param driver: Driver name.
584 :raises RuntimeError: If unbinding from the current driver fails.
585 :raises RuntimeError: If binding to the new driver fails.
587 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
588 if old_driver == driver:
594 # Unbind from current driver
595 if old_driver is not None:
596 cmd = f"sh -c \"echo {pci_addr} > " \
597 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
598 ret_code, _, _ = ssh.exec_command_sudo(cmd)
599 if int(ret_code) != 0:
600 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
602 # Bind to the new driver
603 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
604 ret_code, _, _ = ssh.exec_command_sudo(cmd)
605 if int(ret_code) != 0:
606 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
609 def tg_get_interface_driver(node, pci_addr):
610 """Get interface driver from the TG node.
612 :param node: Node to get interface driver on (must be TG node).
613 :param pci_addr: PCI address of the interface.
616 :returns: Interface driver or None if not found.
618 :raises RuntimeError: If PCI rescan or lspci command execution failed.
620 return DUTSetup.get_pci_dev_driver(node, pci_addr)
623 def tg_set_interfaces_default_driver(node):
624 """Set interfaces default driver specified in topology yaml file.
626 :param node: Node to setup interfaces driver on (must be TG node).
629 for interface in node[u"interfaces"].values():
630 InterfaceUtil.tg_set_interface_driver(
631 node, interface[u"pci_address"], interface[u"driver"]
635 def update_vpp_interface_data_on_node(node):
636 """Update vpp generated interface data for a given node in DICT__nodes.
638 Updates interface names, software if index numbers and any other details
639 generated specifically by vpp that are unknown before testcase run.
640 It does this by dumping interface list from all devices using python
641 api, and pairing known information from topology (mac address) to state
644 :param node: Node selected from DICT__nodes.
647 interface_list = InterfaceUtil.vpp_get_interface_data(node)
648 interface_dict = dict()
649 for ifc in interface_list:
650 interface_dict[ifc[u"l2_address"]] = ifc
652 for if_name, if_data in node[u"interfaces"].items():
653 ifc_dict = interface_dict.get(if_data[u"mac_address"])
654 if ifc_dict is not None:
655 if_data[u"name"] = ifc_dict[u"interface_name"]
656 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
657 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
659 f"Interface {if_name} found by MAC "
660 f"{if_data[u'mac_address']}"
664 f"Interface {if_name} not found by MAC "
665 f"{if_data[u'mac_address']}"
667 if_data[u"vpp_sw_index"] = None
670 def update_nic_interface_names(node):
671 """Update interface names based on nic type and PCI address.
673 This method updates interface names in the same format as VPP does.
675 :param node: Node dictionary.
678 for ifc in node[u"interfaces"].values():
679 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
680 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
681 f"{int(if_pci[3], 16):x}"
682 if ifc[u"model"] == u"Intel-XL710":
683 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
684 elif ifc[u"model"] == u"Intel-X710":
685 ifc[u"name"] = f"TenGigabitEthernet{loc}"
686 elif ifc[u"model"] == u"Intel-X520-DA2":
687 ifc[u"name"] = f"TenGigabitEthernet{loc}"
688 elif ifc[u"model"] == u"Cisco-VIC-1385":
689 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
690 elif ifc[u"model"] == u"Cisco-VIC-1227":
691 ifc[u"name"] = f"TenGigabitEthernet{loc}"
693 ifc[u"name"] = f"UnknownEthernet{loc}"
696 def update_nic_interface_names_on_all_duts(nodes):
697 """Update interface names based on nic type and PCI address on all DUTs.
699 This method updates interface names in the same format as VPP does.
701 :param nodes: Topology nodes.
704 for node in nodes.values():
705 if node[u"type"] == NodeType.DUT:
706 InterfaceUtil.update_nic_interface_names(node)
709 def update_tg_interface_data_on_node(node):
710 """Update interface name for TG/linux node in DICT__nodes.
713 # for dev in `ls /sys/class/net/`;
714 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
715 "52:54:00:9f:82:63": "eth0"
716 "52:54:00:77:ae:a9": "eth1"
717 "52:54:00:e1:8a:0f": "eth2"
718 "00:00:00:00:00:00": "lo"
720 :param node: Node selected from DICT__nodes.
722 :raises RuntimeError: If getting of interface name and MAC fails.
724 # First setup interface driver specified in yaml file
725 InterfaceUtil.tg_set_interfaces_default_driver(node)
727 # Get interface names
731 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
732 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
734 ret_code, stdout, _ = ssh.exec_command(cmd)
735 if int(ret_code) != 0:
736 raise RuntimeError(u"Get interface name and MAC failed")
737 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
739 interfaces = JsonParser().parse_data(tmp)
740 for interface in node[u"interfaces"].values():
741 name = interfaces.get(interface[u"mac_address"])
744 interface[u"name"] = name
747 def iface_update_numa_node(node):
748 """For all interfaces from topology file update numa node based on
749 information from the node.
751 :param node: Node from topology.
754 :raises ValueError: If numa node ia less than 0.
755 :raises RuntimeError: If update of numa node failed.
758 for if_key in Topology.get_node_interfaces(node):
759 if_pci = Topology.get_interface_pci_addr(node, if_key)
761 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
763 ret, out, _ = ssh.exec_command(cmd)
766 numa_node = 0 if int(out) < 0 else int(out)
769 f"Reading numa location failed for: {if_pci}"
772 Topology.set_interface_numa_node(
773 node, if_key, numa_node
777 raise RuntimeError(f"Update numa node failed for: {if_pci}")
780 def update_all_interface_data_on_all_nodes(
781 nodes, skip_tg=False, skip_vpp=False):
782 """Update interface names on all nodes in DICT__nodes.
784 This method updates the topology dictionary by querying interface lists
785 of all nodes mentioned in the topology dictionary.
787 :param nodes: Nodes in the topology.
788 :param skip_tg: Skip TG node.
789 :param skip_vpp: Skip VPP node.
794 for node in nodes.values():
795 if node[u"type"] == NodeType.DUT and not skip_vpp:
796 InterfaceUtil.update_vpp_interface_data_on_node(node)
797 elif node[u"type"] == NodeType.TG and not skip_tg:
798 InterfaceUtil.update_tg_interface_data_on_node(node)
799 InterfaceUtil.iface_update_numa_node(node)
802 def create_vlan_subinterface(node, interface, vlan):
803 """Create VLAN sub-interface on node.
805 :param node: Node to add VLAN subinterface on.
806 :param interface: Interface name or index on which create VLAN
808 :param vlan: VLAN ID of the subinterface to be created.
810 :type interface: str on int
812 :returns: Name and index of created subinterface.
814 :raises RuntimeError: if it is unable to create VLAN subinterface on the
815 node or interface cannot be converted.
817 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
819 cmd = u"create_vlan_subif"
821 sw_if_index=sw_if_index,
824 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
826 with PapiSocketExecutor(node) as papi_exec:
827 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
829 if_key = Topology.add_new_port(node, u"vlan_subif")
830 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
831 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
832 Topology.update_interface_name(node, if_key, ifc_name)
834 return f"{interface}.{vlan}", sw_if_index
837 def create_vxlan_interface(node, vni, source_ip, destination_ip):
838 """Create VXLAN interface and return sw if index of created interface.
840 :param node: Node where to create VXLAN interface.
841 :param vni: VXLAN Network Identifier.
842 :param source_ip: Source IP of a VXLAN Tunnel End Point.
843 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
847 :type destination_ip: str
848 :returns: SW IF INDEX of created interface.
850 :raises RuntimeError: if it is unable to create VxLAN interface on the
853 cmd = u"vxlan_add_del_tunnel"
856 instance=Constants.BITWISE_NON_ZERO,
857 src_address=IPAddress.create_ip_address_object(
858 ip_address(source_ip)
860 dst_address=IPAddress.create_ip_address_object(
861 ip_address(destination_ip)
863 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
865 decap_next_index=Constants.BITWISE_NON_ZERO,
868 err_msg = f"Failed to create VXLAN tunnel interface " \
869 f"on host {node[u'host']}"
870 with PapiSocketExecutor(node) as papi_exec:
871 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
873 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
874 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
875 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
876 Topology.update_interface_name(node, if_key, ifc_name)
881 def set_vxlan_bypass(node, interface=None):
882 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
884 By adding the IPv4 vxlan-bypass graph node to an interface, the node
885 checks for and validate input vxlan packet and bypass ip4-lookup,
886 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
887 This node will cause extra overhead to for non-vxlan packets which is
890 :param node: Node where to set VXLAN bypass.
891 :param interface: Numeric index or name string of a specific interface.
893 :type interface: int or str
894 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
896 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
898 cmd = u"sw_interface_set_vxlan_bypass"
901 sw_if_index=sw_if_index,
904 err_msg = f"Failed to set VXLAN bypass on interface " \
905 f"on host {node[u'host']}"
906 with PapiSocketExecutor(node) as papi_exec:
907 papi_exec.add(cmd, **args).get_replies(err_msg)
910 def vxlan_dump(node, interface=None):
911 """Get VxLAN data for the given interface.
913 :param node: VPP node to get interface data from.
914 :param interface: Numeric index or name string of a specific interface.
915 If None, information about all VxLAN interfaces is returned.
917 :type interface: int or str
918 :returns: Dictionary containing data for the given VxLAN interface or if
919 interface=None, the list of dictionaries with all VxLAN interfaces.
921 :raises TypeError: if the data type of interface is neither basestring
924 def process_vxlan_dump(vxlan_dump):
925 """Process vxlan dump.
927 :param vxlan_dump: Vxlan interface dump.
928 :type vxlan_dump: dict
929 :returns: Processed vxlan interface dump.
932 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
933 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
936 if interface is not None:
937 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
939 sw_if_index = int(Constants.BITWISE_NON_ZERO)
941 cmd = u"vxlan_tunnel_dump"
943 sw_if_index=sw_if_index
945 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
947 with PapiSocketExecutor(node) as papi_exec:
948 details = papi_exec.add(cmd, **args).get_details(err_msg)
950 data = list() if interface is None else dict()
952 if interface is None:
953 data.append(process_vxlan_dump(dump))
954 elif dump[u"sw_if_index"] == sw_if_index:
955 data = process_vxlan_dump(dump)
958 logger.debug(f"VXLAN data:\n{data}")
962 def create_subinterface(
963 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
965 """Create sub-interface on node. It is possible to set required
966 sub-interface type and VLAN tag(s).
968 :param node: Node to add sub-interface.
969 :param interface: Interface name on which create sub-interface.
970 :param sub_id: ID of the sub-interface to be created.
971 :param outer_vlan_id: Optional outer VLAN ID.
972 :param inner_vlan_id: Optional inner VLAN ID.
973 :param type_subif: Optional type of sub-interface. Values supported by
974 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
977 :type interface: str or int
979 :type outer_vlan_id: int
980 :type inner_vlan_id: int
981 :type type_subif: str
982 :returns: Name and index of created sub-interface.
984 :raises RuntimeError: If it is not possible to create sub-interface.
986 subif_types = type_subif.split()
989 if u"no_tags" in subif_types:
990 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
991 if u"one_tag" in subif_types:
992 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
993 if u"two_tags" in subif_types:
994 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
995 if u"dot1ad" in subif_types:
996 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
997 if u"exact_match" in subif_types:
998 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
999 if u"default_sub" in subif_types:
1000 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1001 if type_subif == u"default_sub":
1002 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1003 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1005 cmd = u"create_subif"
1007 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1009 sub_if_flags=flags.value if hasattr(flags, u"value")
1011 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1012 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1014 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1015 with PapiSocketExecutor(node) as papi_exec:
1016 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1018 if_key = Topology.add_new_port(node, u"subinterface")
1019 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1020 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1021 Topology.update_interface_name(node, if_key, ifc_name)
1023 return f"{interface}.{sub_id}", sw_if_index
1026 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1027 """Create GRE tunnel interface on node.
1029 :param node: VPP node to add tunnel interface.
1030 :param source_ip: Source of the GRE tunnel.
1031 :param destination_ip: Destination of the GRE tunnel.
1033 :type source_ip: str
1034 :type destination_ip: str
1035 :returns: Name and index of created GRE tunnel interface.
1037 :raises RuntimeError: If unable to create GRE tunnel interface.
1039 cmd = u"gre_tunnel_add_del"
1042 instance=Constants.BITWISE_NON_ZERO,
1044 dst=str(destination_ip),
1052 err_msg = f"Failed to create GRE tunnel interface " \
1053 f"on host {node[u'host']}"
1054 with PapiSocketExecutor(node) as papi_exec:
1055 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1057 if_key = Topology.add_new_port(node, u"gre_tunnel")
1058 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1059 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1060 Topology.update_interface_name(node, if_key, ifc_name)
1062 return ifc_name, sw_if_index
1065 def vpp_create_loopback(node, mac=None):
1066 """Create loopback interface on VPP node.
1068 :param node: Node to create loopback interface on.
1069 :param mac: Optional MAC address for loopback interface.
1072 :returns: SW interface index.
1074 :raises RuntimeError: If it is not possible to create loopback on the
1077 cmd = u"create_loopback_instance"
1079 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1083 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1084 with PapiSocketExecutor(node) as papi_exec:
1085 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1087 if_key = Topology.add_new_port(node, u"loopback")
1088 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1089 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1090 Topology.update_interface_name(node, if_key, ifc_name)
1092 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1093 Topology.update_interface_mac_address(node, if_key, mac)
1098 def vpp_create_bond_interface(
1099 node, mode, load_balance=None, mac=None, gso=False):
1100 """Create bond interface on VPP node.
1102 :param node: DUT node from topology.
1103 :param mode: Link bonding mode.
1104 :param load_balance: Load balance (optional, valid for xor and lacp
1105 modes, otherwise ignored). Default: None.
1106 :param mac: MAC address to assign to the bond interface (optional).
1108 :param gso: Enable GSO support (optional). Default: False.
1111 :type load_balance: str
1114 :returns: Interface key (name) in topology.
1116 :raises RuntimeError: If it is not possible to create bond interface on
1119 cmd = u"bond_create2"
1121 id=int(Constants.BITWISE_NON_ZERO),
1122 use_custom_mac=bool(mac is not None),
1123 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1126 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1128 lb=0 if load_balance is None else getattr(
1129 LinkBondLoadBalanceAlgo,
1130 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1135 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1136 with PapiSocketExecutor(node) as papi_exec:
1137 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1139 InterfaceUtil.add_eth_interface(
1140 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1142 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1147 def add_eth_interface(
1148 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1150 """Add ethernet interface to current topology.
1152 :param node: DUT node from topology.
1153 :param ifc_name: Name of the interface.
1154 :param sw_if_index: SW interface index.
1155 :param ifc_pfx: Interface key prefix.
1156 :param host_if_key: Host interface key from topology file.
1159 :type sw_if_index: int
1161 :type host_if_key: str
1163 if_key = Topology.add_new_port(node, ifc_pfx)
1165 if ifc_name and sw_if_index is None:
1166 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1168 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1169 if sw_if_index and ifc_name is None:
1170 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1171 Topology.update_interface_name(node, if_key, ifc_name)
1172 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1173 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1174 if host_if_key is not None:
1175 Topology.set_interface_numa_node(
1176 node, if_key, Topology.get_interface_numa_node(
1180 Topology.update_interface_pci_address(
1181 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1185 def vpp_create_avf_interface(
1186 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1187 """Create AVF interface on VPP node.
1189 :param node: DUT node from topology.
1190 :param if_key: Interface key from topology file of interface
1191 to be bound to i40evf driver.
1192 :param num_rx_queues: Number of RX queues.
1193 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1194 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1197 :type num_rx_queues: int
1200 :returns: AVF interface key (name) in topology.
1202 :raises RuntimeError: If it is not possible to create AVF interface on
1205 PapiSocketExecutor.run_cli_cmd(
1206 node, u"set logging class avf level debug"
1210 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1212 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1214 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1218 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1219 with PapiSocketExecutor(node) as papi_exec:
1220 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1222 InterfaceUtil.add_eth_interface(
1223 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1227 return Topology.get_interface_by_sw_index(node, sw_if_index)
1230 def vpp_create_af_xdp_interface(
1231 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1233 """Create AF_XDP interface on VPP node.
1235 :param node: DUT node from topology.
1236 :param if_key: Physical interface key from topology file of interface
1237 to be bound to compatible driver.
1238 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1239 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1240 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1241 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1244 :type num_rx_queues: int
1248 :returns: Interface key (name) in topology file.
1250 :raises RuntimeError: If it is not possible to create AF_XDP interface
1253 PapiSocketExecutor.run_cli_cmd(
1254 node, u"set logging class af_xdp level debug"
1257 cmd = u"af_xdp_create"
1258 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1260 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1261 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1262 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1265 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1267 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1268 with PapiSocketExecutor(node) as papi_exec:
1269 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1271 InterfaceUtil.vpp_set_interface_mac(
1272 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1274 InterfaceUtil.add_eth_interface(
1275 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1279 return Topology.get_interface_by_sw_index(node, sw_if_index)
1282 def vpp_create_rdma_interface(
1283 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1285 """Create RDMA interface on VPP node.
1287 :param node: DUT node from topology.
1288 :param if_key: Physical interface key from topology file of interface
1289 to be bound to rdma-core driver.
1290 :param num_rx_queues: Number of RX queues.
1291 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1292 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1293 :param mode: RDMA interface mode - auto/ibv/dv.
1296 :type num_rx_queues: int
1300 :returns: Interface key (name) in topology file.
1302 :raises RuntimeError: If it is not possible to create RDMA interface on
1305 PapiSocketExecutor.run_cli_cmd(
1306 node, u"set logging class rdma level debug"
1309 cmd = u"rdma_create_v2"
1310 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1312 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1313 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1314 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1317 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1318 # Note: Set True for non-jumbo packets.
1322 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1323 with PapiSocketExecutor(node) as papi_exec:
1324 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1326 InterfaceUtil.vpp_set_interface_mac(
1327 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1329 InterfaceUtil.add_eth_interface(
1330 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1334 return Topology.get_interface_by_sw_index(node, sw_if_index)
1337 def vpp_add_bond_member(node, interface, bond_if):
1338 """Add member interface to bond interface on VPP node.
1340 :param node: DUT node from topology.
1341 :param interface: Physical interface key from topology file.
1342 :param bond_if: Load balance
1344 :type interface: str
1346 :raises RuntimeError: If it is not possible to add member to bond
1347 interface on the node.
1349 cmd = u"bond_add_member"
1351 sw_if_index=Topology.get_interface_sw_index(node, interface),
1352 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1354 is_long_timeout=False
1356 err_msg = f"Failed to add member {interface} to bond interface " \
1357 f"{bond_if} on host {node[u'host']}"
1358 with PapiSocketExecutor(node) as papi_exec:
1359 papi_exec.add(cmd, **args).get_reply(err_msg)
1362 def vpp_show_bond_data_on_node(node, verbose=False):
1363 """Show (detailed) bond information on VPP node.
1365 :param node: DUT node from topology.
1366 :param verbose: If detailed information is required or not.
1370 cmd = u"sw_bond_interface_dump"
1371 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1373 data = f"Bond data on node {node[u'host']}:\n"
1374 with PapiSocketExecutor(node) as papi_exec:
1375 details = papi_exec.add(cmd).get_details(err_msg)
1377 for bond in details:
1378 data += f"{bond[u'interface_name']}\n"
1379 data += u" mode: {m}\n".format(
1380 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1382 data += u" load balance: {lb}\n".format(
1383 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1385 data += f" number of active members: {bond[u'active_members']}\n"
1387 member_data = InterfaceUtil.vpp_bond_member_dump(
1388 node, Topology.get_interface_by_sw_index(
1389 node, bond[u"sw_if_index"]
1392 for member in member_data:
1393 if not member[u"is_passive"]:
1394 data += f" {member[u'interface_name']}\n"
1395 data += f" number of members: {bond[u'members']}\n"
1397 for member in member_data:
1398 data += f" {member[u'interface_name']}\n"
1399 data += f" interface id: {bond[u'id']}\n"
1400 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1404 def vpp_bond_member_dump(node, interface):
1405 """Get bond interface slave(s) data on VPP node.
1407 :param node: DUT node from topology.
1408 :param interface: Physical interface key from topology file.
1410 :type interface: str
1411 :returns: Bond slave interface data.
1414 cmd = u"sw_member_interface_dump"
1416 sw_if_index=Topology.get_interface_sw_index(node, interface)
1418 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1420 with PapiSocketExecutor(node) as papi_exec:
1421 details = papi_exec.add(cmd, **args).get_details(err_msg)
1423 logger.debug(f"Member data:\n{details}")
1427 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1428 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1430 :param nodes: Nodes in the topology.
1431 :param verbose: If detailed information is required or not.
1435 for node_data in nodes.values():
1436 if node_data[u"type"] == NodeType.DUT:
1437 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1440 def vpp_enable_input_acl_interface(
1441 node, interface, ip_version, table_index):
1442 """Enable input acl on interface.
1444 :param node: VPP node to setup interface for input acl.
1445 :param interface: Interface to setup input acl.
1446 :param ip_version: Version of IP protocol.
1447 :param table_index: Classify table index.
1449 :type interface: str or int
1450 :type ip_version: str
1451 :type table_index: int
1453 cmd = u"input_acl_set_interface"
1455 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1456 ip4_table_index=table_index if ip_version == u"ip4"
1457 else Constants.BITWISE_NON_ZERO,
1458 ip6_table_index=table_index if ip_version == u"ip6"
1459 else Constants.BITWISE_NON_ZERO,
1460 l2_table_index=table_index if ip_version == u"l2"
1461 else Constants.BITWISE_NON_ZERO,
1463 err_msg = f"Failed to enable input acl on interface {interface}"
1464 with PapiSocketExecutor(node) as papi_exec:
1465 papi_exec.add(cmd, **args).get_reply(err_msg)
1468 def get_interface_classify_table(node, interface):
1469 """Get name of classify table for the given interface.
1471 TODO: Move to Classify.py.
1473 :param node: VPP node to get data from.
1474 :param interface: Name or sw_if_index of a specific interface.
1476 :type interface: str or int
1477 :returns: Classify table name.
1480 if isinstance(interface, str):
1481 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1483 sw_if_index = interface
1485 cmd = u"classify_table_by_interface"
1487 sw_if_index=sw_if_index
1489 err_msg = f"Failed to get classify table name by interface {interface}"
1490 with PapiSocketExecutor(node) as papi_exec:
1491 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1496 def get_sw_if_index(node, interface_name):
1497 """Get sw_if_index for the given interface from actual interface dump.
1499 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1501 :param node: VPP node to get interface data from.
1502 :param interface_name: Name of the specific interface.
1504 :type interface_name: str
1505 :returns: sw_if_index of the given interface.
1508 interface_data = InterfaceUtil.vpp_get_interface_data(
1509 node, interface=interface_name
1511 return interface_data.get(u"sw_if_index")
1514 def vxlan_gpe_dump(node, interface_name=None):
1515 """Get VxLAN GPE data for the given interface.
1517 :param node: VPP node to get interface data from.
1518 :param interface_name: Name of the specific interface. If None,
1519 information about all VxLAN GPE interfaces is returned.
1521 :type interface_name: str
1522 :returns: Dictionary containing data for the given VxLAN GPE interface
1523 or if interface=None, the list of dictionaries with all VxLAN GPE
1525 :rtype: dict or list
1527 def process_vxlan_gpe_dump(vxlan_dump):
1528 """Process vxlan_gpe dump.
1530 :param vxlan_dump: Vxlan_gpe nterface dump.
1531 :type vxlan_dump: dict
1532 :returns: Processed vxlan_gpe interface dump.
1535 if vxlan_dump[u"is_ipv6"]:
1536 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1537 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1539 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1540 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1543 if interface_name is not None:
1544 sw_if_index = InterfaceUtil.get_interface_index(
1545 node, interface_name
1548 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1550 cmd = u"vxlan_gpe_tunnel_dump"
1552 sw_if_index=sw_if_index
1554 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1555 with PapiSocketExecutor(node) as papi_exec:
1556 details = papi_exec.add(cmd, **args).get_details(err_msg)
1558 data = list() if interface_name is None else dict()
1559 for dump in details:
1560 if interface_name is None:
1561 data.append(process_vxlan_gpe_dump(dump))
1562 elif dump[u"sw_if_index"] == sw_if_index:
1563 data = process_vxlan_gpe_dump(dump)
1566 logger.debug(f"VXLAN-GPE data:\n{data}")
1570 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1571 """Assign VPP interface to specific VRF/FIB table.
1573 :param node: VPP node where the FIB and interface are located.
1574 :param interface: Interface to be assigned to FIB.
1575 :param table_id: VRF table ID.
1576 :param ipv6: Assign to IPv6 table. Default False.
1578 :type interface: str or int
1582 cmd = u"sw_interface_set_table"
1584 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1586 vrf_id=int(table_id)
1588 err_msg = f"Failed to assign interface {interface} to FIB table"
1589 with PapiSocketExecutor(node) as papi_exec:
1590 papi_exec.add(cmd, **args).get_reply(err_msg)
1593 def set_linux_interface_mac(
1594 node, interface, mac, namespace=None, vf_id=None):
1595 """Set MAC address for interface in linux.
1597 :param node: Node where to execute command.
1598 :param interface: Interface in namespace.
1599 :param mac: MAC to be assigned to interface.
1600 :param namespace: Execute command in namespace. Optional
1601 :param vf_id: Virtual Function id. Optional
1603 :type interface: str
1605 :type namespace: str
1608 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1609 else f"address {mac}"
1610 ns_str = f"ip netns exec {namespace}" if namespace else u""
1612 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1613 exec_cmd_no_error(node, cmd, sudo=True)
1616 def set_linux_interface_trust_on(
1617 node, interface, namespace=None, vf_id=None):
1618 """Set trust on (promisc) for interface in linux.
1620 :param node: Node where to execute command.
1621 :param interface: Interface in namespace.
1622 :param namespace: Execute command in namespace. Optional
1623 :param vf_id: Virtual Function id. Optional
1625 :type interface: str
1626 :type namespace: str
1629 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1630 ns_str = f"ip netns exec {namespace}" if namespace else u""
1632 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1633 exec_cmd_no_error(node, cmd, sudo=True)
1636 def set_linux_interface_spoof_off(
1637 node, interface, namespace=None, vf_id=None):
1638 """Set spoof off for interface in linux.
1640 :param node: Node where to execute command.
1641 :param interface: Interface in namespace.
1642 :param namespace: Execute command in namespace. Optional
1643 :param vf_id: Virtual Function id. Optional
1645 :type interface: str
1646 :type namespace: str
1649 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1651 ns_str = f"ip netns exec {namespace}" if namespace else u""
1653 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1654 exec_cmd_no_error(node, cmd, sudo=True)
1657 def set_linux_interface_state(
1658 node, interface, namespace=None, state=u"up"):
1659 """Set operational state for interface in linux.
1661 :param node: Node where to execute command.
1662 :param interface: Interface in namespace.
1663 :param namespace: Execute command in namespace. Optional
1664 :param state: Up/Down.
1666 :type interface: str
1667 :type namespace: str
1670 ns_str = f"ip netns exec {namespace}" if namespace else u""
1672 cmd = f"{ns_str} ip link set dev {interface} {state}"
1673 exec_cmd_no_error(node, cmd, sudo=True)
1676 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1677 """Init PCI device. Check driver compatibility and bind to proper
1678 drivers. Optionally create NIC VFs.
1680 :param node: DUT node.
1681 :param ifc_key: Interface key from topology file.
1682 :param driver: Base driver to use.
1683 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1684 :param osi_layer: OSI Layer type to initialize TG with.
1685 Default value "L2" sets linux interface spoof off.
1690 :type osi_layer: str
1691 :returns: Virtual Function topology interface keys.
1693 :raises RuntimeError: If a reason preventing initialization is found.
1695 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1697 if driver == u"avf":
1698 if kernel_driver not in (
1699 u"ice", u"iavf", u"i40e", u"i40evf"):
1701 f"AVF needs ice or i40e compatible driver, not "
1702 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1704 vf_keys = InterfaceUtil.init_generic_interface(
1705 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1707 elif driver == u"af_xdp":
1708 if kernel_driver not in (
1709 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
1711 f"AF_XDP needs ice or i40e or rdma compatible driver, not "
1712 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1714 vf_keys = InterfaceUtil.init_generic_interface(
1715 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1720 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1721 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1723 :param node: DUT node.
1724 :param ifc_key: Interface key from topology file.
1725 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1726 :param osi_layer: OSI Layer type to initialize TG with.
1727 Default value "L2" sets linux interface spoof off.
1731 :type osi_layer: str
1732 :returns: Virtual Function topology interface keys.
1734 :raises RuntimeError: If a reason preventing initialization is found.
1736 # Read PCI address and driver.
1737 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1738 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1739 uio_driver = Topology.get_uio_driver(node)
1740 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1741 current_driver = DUTSetup.get_pci_dev_driver(
1742 node, pf_pci_addr.replace(u":", r"\:"))
1744 VPPUtil.stop_vpp_service(node)
1745 if current_driver != kernel_driver:
1746 # PCI device must be re-bound to kernel driver before creating VFs.
1747 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1748 # Stop VPP to prevent deadlock.
1749 # Unbind from current driver.
1750 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1751 # Bind to kernel driver.
1752 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1754 # Initialize PCI VFs.
1755 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1758 # Set MAC address and bind each virtual function to uio driver.
1759 for vf_id in range(numvfs):
1760 vf_mac_addr = u":".join(
1761 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1762 pf_mac_addr[5], f"{vf_id:02x}"
1766 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1767 InterfaceUtil.set_linux_interface_trust_on(
1768 node, pf_dev, vf_id=vf_id
1770 if osi_layer == u"L2":
1771 InterfaceUtil.set_linux_interface_spoof_off(
1772 node, pf_dev, vf_id=vf_id
1774 InterfaceUtil.set_linux_interface_mac(
1775 node, pf_dev, vf_mac_addr, vf_id=vf_id
1777 InterfaceUtil.set_linux_interface_state(
1778 node, pf_dev, state=u"up"
1781 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1782 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1784 # Add newly created ports into topology file
1785 vf_ifc_name = f"{ifc_key}_vif"
1786 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1787 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1788 Topology.update_interface_name(
1789 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1791 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1792 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1793 Topology.set_interface_numa_node(
1794 node, vf_ifc_key, Topology.get_interface_numa_node(
1798 vf_ifc_keys.append(vf_ifc_key)
1803 def vpp_sw_interface_rx_placement_dump(node):
1804 """Dump VPP interface RX placement on node.
1806 :param node: Node to run command on.
1808 :returns: Thread mapping information as a list of dictionaries.
1811 cmd = u"sw_interface_rx_placement_dump"
1812 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1813 with PapiSocketExecutor(node) as papi_exec:
1814 for ifc in node[u"interfaces"].values():
1815 if ifc[u"vpp_sw_index"] is not None:
1816 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1817 details = papi_exec.get_details(err_msg)
1818 return sorted(details, key=lambda k: k[u"sw_if_index"])
1821 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1822 """Dump VPP interface RX placement on all given nodes.
1824 :param nodes: Nodes to run command on.
1826 :returns: Thread mapping information as a list of dictionaries.
1829 for node in nodes.values():
1830 if node[u"type"] == NodeType.DUT:
1831 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1834 def vpp_sw_interface_set_rx_placement(
1835 node, sw_if_index, queue_id, worker_id):
1836 """Set interface RX placement to worker on node.
1838 :param node: Node to run command on.
1839 :param sw_if_index: VPP SW interface index.
1840 :param queue_id: VPP interface queue ID.
1841 :param worker_id: VPP worker ID (indexing from 0).
1843 :type sw_if_index: int
1845 :type worker_id: int
1846 :raises RuntimeError: If failed to run command on host or if no API
1849 cmd = u"sw_interface_set_rx_placement"
1850 err_msg = f"Failed to set interface RX placement to worker " \
1851 f"on host {node[u'host']}!"
1853 sw_if_index=sw_if_index,
1855 worker_id=worker_id,
1858 with PapiSocketExecutor(node) as papi_exec:
1859 papi_exec.add(cmd, **args).get_reply(err_msg)
1862 def vpp_round_robin_rx_placement(
1863 node, prefix, dp_worker_limit=None):
1864 """Set Round Robin interface RX placement on all worker threads
1867 If specified, dp_core_limit limits the number of physical cores used
1868 for data plane I/O work. Other cores are presumed to do something else,
1869 e.g. asynchronous crypto processing.
1870 None means all workers are used for data plane work.
1871 Note this keyword specifies workers, not cores.
1873 :param node: Topology nodes.
1874 :param prefix: Interface name prefix.
1875 :param dp_worker_limit: How many cores for data plane work.
1878 :type dp_worker_limit: Optional[int]
1881 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1882 if dp_worker_limit is not None:
1883 worker_cnt = min(worker_cnt, dp_worker_limit)
1886 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1887 for interface in node[u"interfaces"].values():
1888 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1889 and prefix in interface[u"name"]:
1890 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1891 node, placement[u"sw_if_index"], placement[u"queue_id"],
1892 worker_id % worker_cnt
1897 def vpp_round_robin_rx_placement_on_all_duts(
1898 nodes, prefix, dp_core_limit=None):
1899 """Set Round Robin interface RX placement on all worker threads
1902 If specified, dp_core_limit limits the number of physical cores used
1903 for data plane I/O work. Other cores are presumed to do something else,
1904 e.g. asynchronous crypto processing.
1905 None means all cores are used for data plane work.
1906 Note this keyword specifies cores, not workers.
1908 :param nodes: Topology nodes.
1909 :param prefix: Interface name prefix.
1910 :param dp_worker_limit: How many cores for data plane work.
1913 :type dp_worker_limit: Optional[int]
1915 for node in nodes.values():
1916 if node[u"type"] == NodeType.DUT:
1917 dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
1918 phy_cores=dp_core_limit,
1919 smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
1921 InterfaceUtil.vpp_round_robin_rx_placement(
1922 node, prefix, dp_worker_limit