1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.IPAddress import IPAddress
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class InterfaceStatusFlags(IntEnum):
35 """Interface status flags."""
36 IF_STATUS_API_FLAG_ADMIN_UP = 1
37 IF_STATUS_API_FLAG_LINK_UP = 2
40 class MtuProto(IntEnum):
45 MTU_PROTO_API_MPLS = 3
49 class LinkDuplex(IntEnum):
51 LINK_DUPLEX_API_UNKNOWN = 0
52 LINK_DUPLEX_API_HALF = 1
53 LINK_DUPLEX_API_FULL = 2
56 class SubInterfaceFlags(IntEnum):
57 """Sub-interface flags."""
58 SUB_IF_API_FLAG_NO_TAGS = 1
59 SUB_IF_API_FLAG_ONE_TAG = 2
60 SUB_IF_API_FLAG_TWO_TAGS = 4
61 SUB_IF_API_FLAG_DOT1AD = 8
62 SUB_IF_API_FLAG_EXACT_MATCH = 16
63 SUB_IF_API_FLAG_DEFAULT = 32
64 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
65 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
66 SUB_IF_API_FLAG_DOT1AH = 256
69 class RxMode(IntEnum):
71 RX_MODE_API_UNKNOWN = 0
72 RX_MODE_API_POLLING = 1
73 RX_MODE_API_INTERRUPT = 2
74 RX_MODE_API_ADAPTIVE = 3
75 RX_MODE_API_DEFAULT = 4
78 class IfType(IntEnum):
81 IF_API_TYPE_HARDWARE = 0
88 class LinkBondLoadBalanceAlgo(IntEnum):
89 """Link bonding load balance algorithm."""
90 BOND_API_LB_ALGO_L2 = 0
91 BOND_API_LB_ALGO_L34 = 1
92 BOND_API_LB_ALGO_L23 = 2
93 BOND_API_LB_ALGO_RR = 3
94 BOND_API_LB_ALGO_BC = 4
95 BOND_API_LB_ALGO_AB = 5
98 class LinkBondMode(IntEnum):
99 """Link bonding mode."""
100 BOND_API_MODE_ROUND_ROBIN = 1
101 BOND_API_MODE_ACTIVE_BACKUP = 2
102 BOND_API_MODE_XOR = 3
103 BOND_API_MODE_BROADCAST = 4
104 BOND_API_MODE_LACP = 5
107 class RdmaMode(IntEnum):
108 """RDMA interface mode."""
109 RDMA_API_MODE_AUTO = 0
110 RDMA_API_MODE_IBV = 1
114 class AfXdpMode(IntEnum):
115 """AF_XDP interface mode."""
116 AF_XDP_API_MODE_AUTO = 0
117 AF_XDP_API_MODE_COPY = 1
118 AF_XDP_API_MODE_ZERO_COPY = 2
122 """General utilities for managing interfaces"""
125 def pci_to_int(pci_str):
126 """Convert PCI address from string format (0000:18:0a.0) to
127 integer representation (169345024).
129 :param pci_str: PCI address in string representation.
131 :returns: Integer representation of PCI address.
134 pci = list(pci_str.split(u":")[0:2])
135 pci.extend(pci_str.split(u":")[2].split(u"."))
137 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
138 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
141 def pci_to_eth(node, pci_str):
142 """Convert PCI address on DUT to Linux ethernet name.
144 :param node: DUT node
145 :param pci_str: PCI address.
148 :returns: Ethernet name.
151 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
153 stdout, _ = exec_cmd_no_error(node, cmd)
155 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
157 return stdout.strip()
160 def get_interface_index(node, interface):
161 """Get interface sw_if_index from topology file.
163 :param node: Node where the interface is.
164 :param interface: Numeric index or name string of a specific interface.
166 :type interface: str or int
167 :returns: SW interface index.
171 sw_if_index = int(interface)
173 sw_if_index = Topology.get_interface_sw_index(node, interface)
174 if sw_if_index is None:
176 Topology.get_interface_sw_index_by_name(node, interface)
177 except TypeError as err:
178 raise TypeError(f"Wrong interface format {interface}") from err
183 def set_interface_state(node, interface, state, if_type=u"key"):
184 """Set interface state on a node.
186 Function can be used for DUTs as well as for TGs.
188 :param node: Node where the interface is.
189 :param interface: Interface key or sw_if_index or name.
190 :param state: One of 'up' or 'down'.
191 :param if_type: Interface type
193 :type interface: str or int
197 :raises ValueError: If the interface type is unknown.
198 :raises ValueError: If the state of interface is unexpected.
199 :raises ValueError: If the node has an unknown node type.
201 if if_type == u"key":
202 if isinstance(interface, str):
203 sw_if_index = Topology.get_interface_sw_index(node, interface)
204 iface_name = Topology.get_interface_name(node, interface)
206 sw_if_index = interface
207 elif if_type == u"name":
208 iface_key = Topology.get_interface_by_name(node, interface)
209 if iface_key is not None:
210 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
211 iface_name = interface
213 raise ValueError(f"Unknown if_type: {if_type}")
215 if node[u"type"] == NodeType.DUT:
217 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
218 elif state == u"down":
221 raise ValueError(f"Unexpected interface state: {state}")
222 cmd = u"sw_interface_set_flags"
223 err_msg = f"Failed to set interface state on host {node[u'host']}"
225 sw_if_index=int(sw_if_index),
228 with PapiSocketExecutor(node) as papi_exec:
229 papi_exec.add(cmd, **args).get_reply(err_msg)
230 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
231 cmd = f"ip link set {iface_name} {state}"
232 exec_cmd_no_error(node, cmd, sudo=True)
235 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
239 def set_interface_state_pci(
240 node, pf_pcis, namespace=None, state=u"up"):
241 """Set operational state for interface specified by PCI address.
243 :param node: Topology node.
244 :param pf_pcis: List of node's interfaces PCI addresses.
245 :param namespace: Exec command in namespace. (Optional, Default: none)
246 :param state: Up/Down. (Optional, default: up)
252 for pf_pci in pf_pcis:
253 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
254 InterfaceUtil.set_linux_interface_state(
255 node, pf_eth, namespace=namespace, state=state
259 def set_interface_mtu(node, pf_pcis, mtu=9200):
260 """Set Ethernet MTU for specified interfaces.
262 :param node: Topology node.
263 :param pf_pcis: List of node's interfaces PCI addresses.
264 :param mtu: MTU to set. Default: 9200.
268 :raises RuntimeError: If failed to set MTU on interface.
270 for pf_pci in pf_pcis:
271 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
272 cmd = f"ip link set {pf_eth} mtu {mtu}"
273 exec_cmd_no_error(node, cmd, sudo=True)
276 def set_interface_channels(
277 node, pf_pcis, num_queues=1, channel=u"combined"):
278 """Set interface channels for specified interfaces.
280 :param node: Topology node.
281 :param pf_pcis: List of node's interfaces PCI addresses.
282 :param num_queues: Number of channels. (Optional, Default: 1)
283 :param channel: Channel type. (Optional, Default: combined)
286 :type num_queues: int
289 for pf_pci in pf_pcis:
290 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
291 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
292 exec_cmd_no_error(node, cmd, sudo=True)
295 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
296 """Set Ethernet flow control for specified interfaces.
298 :param node: Topology node.
299 :param pf_pcis: List of node's interfaces PCI addresses.
300 :param rxf: RX flow. (Optional, Default: off).
301 :param txf: TX flow. (Optional, Default: off).
307 for pf_pci in pf_pcis:
308 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
309 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
310 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
311 if int(ret_code) not in (0, 78):
312 raise RuntimeError("Failed to set flow control on {pf_eth}!")
315 def set_pci_parameter(node, pf_pcis, key, value):
316 """Set PCI parameter for specified interfaces.
318 :param node: Topology node.
319 :param pf_pcis: List of node's interfaces PCI addresses.
320 :param key: Key to set.
321 :param value: Value to set.
327 for pf_pci in pf_pcis:
328 cmd = f"setpci -s {pf_pci} {key}={value}"
329 exec_cmd_no_error(node, cmd, sudo=True)
332 def vpp_set_interface_mtu(node, interface, mtu=9200):
333 """Set Ethernet MTU on interface.
335 :param node: VPP node.
336 :param interface: Interface to setup MTU. Default: 9200.
337 :param mtu: Ethernet MTU size in Bytes.
339 :type interface: str or int
342 if isinstance(interface, str):
343 sw_if_index = Topology.get_interface_sw_index(node, interface)
345 sw_if_index = interface
347 cmd = u"hw_interface_set_mtu"
348 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
350 sw_if_index=sw_if_index,
354 with PapiSocketExecutor(node) as papi_exec:
355 papi_exec.add(cmd, **args).get_reply(err_msg)
356 except AssertionError as err:
357 logger.debug(f"Setting MTU failed.\n{err}")
360 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
361 """Set Ethernet MTU on all interfaces.
363 :param node: VPP node.
364 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
368 for interface in node[u"interfaces"]:
369 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
372 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
373 """Set Ethernet MTU on all interfaces on all DUTs.
375 :param nodes: VPP nodes.
376 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
380 for node in nodes.values():
381 if node[u"type"] == NodeType.DUT:
382 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
385 def vpp_node_interfaces_ready_wait(node, retries=15):
386 """Wait until all interfaces with admin-up are in link-up state.
388 :param node: Node to wait on.
389 :param retries: Number of retries to check interface status (optional,
394 :raises RuntimeError: If any interface is not in link-up state after
395 defined number of retries.
397 for _ in range(0, retries):
399 out = InterfaceUtil.vpp_get_interface_data(node)
400 for interface in out:
401 if interface.get(u"flags") == 1:
402 not_ready.append(interface.get(u"interface_name"))
405 f"Interfaces still not in link-up state:\n{not_ready}"
411 err = f"Timeout, interfaces not up:\n{not_ready}" \
412 if u"not_ready" in locals() else u"No check executed!"
413 raise RuntimeError(err)
416 def all_vpp_interfaces_ready_wait(nodes, retries=15):
417 """Wait until all interfaces with admin-up are in link-up state for all
418 nodes in the topology.
420 :param nodes: Nodes in the topology.
421 :param retries: Number of retries to check interface status (optional,
427 for node in nodes.values():
428 if node[u"type"] == NodeType.DUT:
429 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
432 def vpp_get_interface_data(node, interface=None):
433 """Get all interface data from a VPP node. If a name or
434 sw_interface_index is provided, return only data for the matching
437 :param node: VPP node to get interface data from.
438 :param interface: Numeric index or name string of a specific interface.
440 :type interface: int or str
441 :returns: List of dictionaries containing data for each interface, or a
442 single dictionary for the specified interface.
444 :raises TypeError: if the data type of interface is neither basestring
447 def process_if_dump(if_dump):
448 """Process interface dump.
450 :param if_dump: Interface dump.
452 :returns: Processed interface dump.
455 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
456 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
457 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
458 if_dump[u"flags"] = if_dump[u"flags"].value
459 if_dump[u"type"] = if_dump[u"type"].value
460 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
461 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
462 if hasattr(if_dump[u"sub_if_flags"], u"value") \
463 else int(if_dump[u"sub_if_flags"])
467 if interface is not None:
468 if isinstance(interface, str):
469 param = u"interface_name"
470 elif isinstance(interface, int):
471 param = u"sw_if_index"
473 raise TypeError(f"Wrong interface format {interface}")
477 cmd = u"sw_interface_dump"
479 name_filter_valid=False,
482 err_msg = f"Failed to get interface dump on host {node[u'host']}"
484 with PapiSocketExecutor(node) as papi_exec:
485 details = papi_exec.add(cmd, **args).get_details(err_msg)
486 logger.debug(f"Received data:\n{details!r}")
488 data = list() if interface is None else dict()
490 if interface is None:
491 data.append(process_if_dump(dump))
492 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
493 data = process_if_dump(dump)
496 logger.debug(f"Interface data:\n{data}")
500 def vpp_get_interface_name(node, sw_if_index):
501 """Get interface name for the given SW interface index from actual
504 :param node: VPP node to get interface data from.
505 :param sw_if_index: SW interface index of the specific interface.
507 :type sw_if_index: int
508 :returns: Name of the given interface.
511 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
512 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
513 if_data = InterfaceUtil.vpp_get_interface_data(
514 node, if_data[u"sup_sw_if_index"]
517 return if_data.get(u"interface_name")
520 def vpp_get_interface_sw_index(node, interface_name):
521 """Get interface name for the given SW interface index from actual
524 :param node: VPP node to get interface data from.
525 :param interface_name: Interface name.
527 :type interface_name: str
528 :returns: Name of the given interface.
531 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
533 return if_data.get(u"sw_if_index")
536 def vpp_get_interface_mac(node, interface):
537 """Get MAC address for the given interface from actual interface dump.
539 :param node: VPP node to get interface data from.
540 :param interface: Numeric index or name string of a specific interface.
542 :type interface: int or str
543 :returns: MAC address.
546 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
547 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
548 if_data = InterfaceUtil.vpp_get_interface_data(
549 node, if_data[u"sup_sw_if_index"])
551 return if_data.get(u"l2_address")
554 def vpp_set_interface_mac(node, interface, mac):
555 """Set MAC address for the given interface.
557 :param node: VPP node to set interface MAC.
558 :param interface: Numeric index or name string of a specific interface.
559 :param mac: Required MAC address.
561 :type interface: int or str
564 cmd = u"sw_interface_set_mac_address"
566 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
567 mac_address=L2Util.mac_to_bin(mac)
569 err_msg = f"Failed to set MAC address of interface {interface}" \
570 f"on host {node[u'host']}"
571 with PapiSocketExecutor(node) as papi_exec:
572 papi_exec.add(cmd, **args).get_reply(err_msg)
575 def tg_set_interface_driver(node, pci_addr, driver):
576 """Set interface driver on the TG node.
578 :param node: Node to set interface driver on (must be TG node).
579 :param pci_addr: PCI address of the interface.
580 :param driver: Driver name.
584 :raises RuntimeError: If unbinding from the current driver fails.
585 :raises RuntimeError: If binding to the new driver fails.
587 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
588 if old_driver == driver:
594 # Unbind from current driver
595 if old_driver is not None:
596 cmd = f"sh -c \"echo {pci_addr} > " \
597 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
598 ret_code, _, _ = ssh.exec_command_sudo(cmd)
599 if int(ret_code) != 0:
600 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
602 # Bind to the new driver
603 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
604 ret_code, _, _ = ssh.exec_command_sudo(cmd)
605 if int(ret_code) != 0:
606 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
609 def tg_get_interface_driver(node, pci_addr):
610 """Get interface driver from the TG node.
612 :param node: Node to get interface driver on (must be TG node).
613 :param pci_addr: PCI address of the interface.
616 :returns: Interface driver or None if not found.
618 :raises RuntimeError: If PCI rescan or lspci command execution failed.
620 return DUTSetup.get_pci_dev_driver(node, pci_addr)
623 def tg_set_interfaces_default_driver(node):
624 """Set interfaces default driver specified in topology yaml file.
626 :param node: Node to setup interfaces driver on (must be TG node).
629 for interface in node[u"interfaces"].values():
630 InterfaceUtil.tg_set_interface_driver(
631 node, interface[u"pci_address"], interface[u"driver"]
635 def update_vpp_interface_data_on_node(node):
636 """Update vpp generated interface data for a given node in DICT__nodes.
638 Updates interface names, software if index numbers and any other details
639 generated specifically by vpp that are unknown before testcase run.
640 It does this by dumping interface list from all devices using python
641 api, and pairing known information from topology (mac address) to state
644 :param node: Node selected from DICT__nodes.
647 interface_list = InterfaceUtil.vpp_get_interface_data(node)
648 interface_dict = dict()
649 for ifc in interface_list:
650 interface_dict[ifc[u"l2_address"]] = ifc
652 for if_name, if_data in node[u"interfaces"].items():
653 ifc_dict = interface_dict.get(if_data[u"mac_address"])
654 if ifc_dict is not None:
655 if_data[u"name"] = ifc_dict[u"interface_name"]
656 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
657 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
659 f"Interface {if_name} found by MAC "
660 f"{if_data[u'mac_address']}"
664 f"Interface {if_name} not found by MAC "
665 f"{if_data[u'mac_address']}"
667 if_data[u"vpp_sw_index"] = None
670 def update_nic_interface_names(node):
671 """Update interface names based on nic type and PCI address.
673 This method updates interface names in the same format as VPP does.
675 :param node: Node dictionary.
678 for ifc in node[u"interfaces"].values():
679 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
680 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
681 f"{int(if_pci[3], 16):x}"
682 if ifc[u"model"] == u"Intel-XL710":
683 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
684 elif ifc[u"model"] == u"Intel-X710":
685 ifc[u"name"] = f"TenGigabitEthernet{loc}"
686 elif ifc[u"model"] == u"Intel-X520-DA2":
687 ifc[u"name"] = f"TenGigabitEthernet{loc}"
688 elif ifc[u"model"] == u"Cisco-VIC-1385":
689 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
690 elif ifc[u"model"] == u"Cisco-VIC-1227":
691 ifc[u"name"] = f"TenGigabitEthernet{loc}"
693 ifc[u"name"] = f"UnknownEthernet{loc}"
696 def update_nic_interface_names_on_all_duts(nodes):
697 """Update interface names based on nic type and PCI address on all DUTs.
699 This method updates interface names in the same format as VPP does.
701 :param nodes: Topology nodes.
704 for node in nodes.values():
705 if node[u"type"] == NodeType.DUT:
706 InterfaceUtil.update_nic_interface_names(node)
709 def update_tg_interface_data_on_node(node):
710 """Update interface name for TG/linux node in DICT__nodes.
713 # for dev in `ls /sys/class/net/`;
714 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
715 "52:54:00:9f:82:63": "eth0"
716 "52:54:00:77:ae:a9": "eth1"
717 "52:54:00:e1:8a:0f": "eth2"
718 "00:00:00:00:00:00": "lo"
720 :param node: Node selected from DICT__nodes.
722 :raises RuntimeError: If getting of interface name and MAC fails.
724 # First setup interface driver specified in yaml file
725 InterfaceUtil.tg_set_interfaces_default_driver(node)
727 # Get interface names
731 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
732 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
734 ret_code, stdout, _ = ssh.exec_command(cmd)
735 if int(ret_code) != 0:
736 raise RuntimeError(u"Get interface name and MAC failed")
737 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
739 interfaces = JsonParser().parse_data(tmp)
740 for interface in node[u"interfaces"].values():
741 name = interfaces.get(interface[u"mac_address"])
744 interface[u"name"] = name
747 def iface_update_numa_node(node):
748 """For all interfaces from topology file update numa node based on
749 information from the node.
751 :param node: Node from topology.
754 :raises ValueError: If numa node ia less than 0.
755 :raises RuntimeError: If update of numa node failed.
758 for if_key in Topology.get_node_interfaces(node):
759 if_pci = Topology.get_interface_pci_addr(node, if_key)
761 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
763 ret, out, _ = ssh.exec_command(cmd)
766 numa_node = 0 if int(out) < 0 else int(out)
769 f"Reading numa location failed for: {if_pci}"
772 Topology.set_interface_numa_node(
773 node, if_key, numa_node
777 raise RuntimeError(f"Update numa node failed for: {if_pci}")
780 def update_all_interface_data_on_all_nodes(
781 nodes, skip_tg=False, skip_vpp=False):
782 """Update interface names on all nodes in DICT__nodes.
784 This method updates the topology dictionary by querying interface lists
785 of all nodes mentioned in the topology dictionary.
787 :param nodes: Nodes in the topology.
788 :param skip_tg: Skip TG node.
789 :param skip_vpp: Skip VPP node.
794 for node in nodes.values():
795 if node[u"type"] == NodeType.DUT and not skip_vpp:
796 InterfaceUtil.update_vpp_interface_data_on_node(node)
797 elif node[u"type"] == NodeType.TG and not skip_tg:
798 InterfaceUtil.update_tg_interface_data_on_node(node)
799 InterfaceUtil.iface_update_numa_node(node)
802 def create_vlan_subinterface(node, interface, vlan):
803 """Create VLAN sub-interface on node.
805 :param node: Node to add VLAN subinterface on.
806 :param interface: Interface name or index on which create VLAN
808 :param vlan: VLAN ID of the subinterface to be created.
810 :type interface: str on int
812 :returns: Name and index of created subinterface.
814 :raises RuntimeError: if it is unable to create VLAN subinterface on the
815 node or interface cannot be converted.
817 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
819 cmd = u"create_vlan_subif"
821 sw_if_index=sw_if_index,
824 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
826 with PapiSocketExecutor(node) as papi_exec:
827 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
829 if_key = Topology.add_new_port(node, u"vlan_subif")
830 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
831 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
832 Topology.update_interface_name(node, if_key, ifc_name)
834 return f"{interface}.{vlan}", sw_if_index
837 def create_vxlan_interface(node, vni, source_ip, destination_ip):
838 """Create VXLAN interface and return sw if index of created interface.
840 :param node: Node where to create VXLAN interface.
841 :param vni: VXLAN Network Identifier.
842 :param source_ip: Source IP of a VXLAN Tunnel End Point.
843 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
847 :type destination_ip: str
848 :returns: SW IF INDEX of created interface.
850 :raises RuntimeError: if it is unable to create VxLAN interface on the
853 cmd = u"vxlan_add_del_tunnel"
856 instance=Constants.BITWISE_NON_ZERO,
857 src_address=IPAddress.create_ip_address_object(
858 ip_address(source_ip)
860 dst_address=IPAddress.create_ip_address_object(
861 ip_address(destination_ip)
863 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
865 decap_next_index=Constants.BITWISE_NON_ZERO,
868 err_msg = f"Failed to create VXLAN tunnel interface " \
869 f"on host {node[u'host']}"
870 with PapiSocketExecutor(node) as papi_exec:
871 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
873 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
874 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
875 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
876 Topology.update_interface_name(node, if_key, ifc_name)
881 def set_vxlan_bypass(node, interface=None):
882 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
884 By adding the IPv4 vxlan-bypass graph node to an interface, the node
885 checks for and validate input vxlan packet and bypass ip4-lookup,
886 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
887 This node will cause extra overhead to for non-vxlan packets which is
890 :param node: Node where to set VXLAN bypass.
891 :param interface: Numeric index or name string of a specific interface.
893 :type interface: int or str
894 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
896 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
898 cmd = u"sw_interface_set_vxlan_bypass"
901 sw_if_index=sw_if_index,
904 err_msg = f"Failed to set VXLAN bypass on interface " \
905 f"on host {node[u'host']}"
906 with PapiSocketExecutor(node) as papi_exec:
907 papi_exec.add(cmd, **args).get_replies(err_msg)
910 def vxlan_dump(node, interface=None):
911 """Get VxLAN data for the given interface.
913 :param node: VPP node to get interface data from.
914 :param interface: Numeric index or name string of a specific interface.
915 If None, information about all VxLAN interfaces is returned.
917 :type interface: int or str
918 :returns: Dictionary containing data for the given VxLAN interface or if
919 interface=None, the list of dictionaries with all VxLAN interfaces.
921 :raises TypeError: if the data type of interface is neither basestring
924 def process_vxlan_dump(vxlan_dump):
925 """Process vxlan dump.
927 :param vxlan_dump: Vxlan interface dump.
928 :type vxlan_dump: dict
929 :returns: Processed vxlan interface dump.
932 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
933 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
936 if interface is not None:
937 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
939 sw_if_index = int(Constants.BITWISE_NON_ZERO)
941 cmd = u"vxlan_tunnel_dump"
943 sw_if_index=sw_if_index
945 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
947 with PapiSocketExecutor(node) as papi_exec:
948 details = papi_exec.add(cmd, **args).get_details(err_msg)
950 data = list() if interface is None else dict()
952 if interface is None:
953 data.append(process_vxlan_dump(dump))
954 elif dump[u"sw_if_index"] == sw_if_index:
955 data = process_vxlan_dump(dump)
958 logger.debug(f"VXLAN data:\n{data}")
962 def create_subinterface(
963 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
965 """Create sub-interface on node. It is possible to set required
966 sub-interface type and VLAN tag(s).
968 :param node: Node to add sub-interface.
969 :param interface: Interface name on which create sub-interface.
970 :param sub_id: ID of the sub-interface to be created.
971 :param outer_vlan_id: Optional outer VLAN ID.
972 :param inner_vlan_id: Optional inner VLAN ID.
973 :param type_subif: Optional type of sub-interface. Values supported by
974 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
977 :type interface: str or int
979 :type outer_vlan_id: int
980 :type inner_vlan_id: int
981 :type type_subif: str
982 :returns: Name and index of created sub-interface.
984 :raises RuntimeError: If it is not possible to create sub-interface.
986 subif_types = type_subif.split()
989 if u"no_tags" in subif_types:
990 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
991 if u"one_tag" in subif_types:
992 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
993 if u"two_tags" in subif_types:
994 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
995 if u"dot1ad" in subif_types:
996 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
997 if u"exact_match" in subif_types:
998 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
999 if u"default_sub" in subif_types:
1000 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1001 if type_subif == u"default_sub":
1002 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1003 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1005 cmd = u"create_subif"
1007 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1009 sub_if_flags=flags.value if hasattr(flags, u"value")
1011 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1012 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1014 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1015 with PapiSocketExecutor(node) as papi_exec:
1016 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1018 if_key = Topology.add_new_port(node, u"subinterface")
1019 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1020 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1021 Topology.update_interface_name(node, if_key, ifc_name)
1023 return f"{interface}.{sub_id}", sw_if_index
1026 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1027 """Create GRE tunnel interface on node.
1029 :param node: VPP node to add tunnel interface.
1030 :param source_ip: Source of the GRE tunnel.
1031 :param destination_ip: Destination of the GRE tunnel.
1033 :type source_ip: str
1034 :type destination_ip: str
1035 :returns: Name and index of created GRE tunnel interface.
1037 :raises RuntimeError: If unable to create GRE tunnel interface.
1039 cmd = u"gre_tunnel_add_del"
1042 instance=Constants.BITWISE_NON_ZERO,
1044 dst=str(destination_ip),
1052 err_msg = f"Failed to create GRE tunnel interface " \
1053 f"on host {node[u'host']}"
1054 with PapiSocketExecutor(node) as papi_exec:
1055 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1057 if_key = Topology.add_new_port(node, u"gre_tunnel")
1058 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1059 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1060 Topology.update_interface_name(node, if_key, ifc_name)
1062 return ifc_name, sw_if_index
1065 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1066 """Create GTPU interface and return sw if index of created interface.
1068 :param node: Node where to create GTPU interface.
1069 :param teid: GTPU Tunnel Endpoint Identifier.
1070 :param source_ip: Source IP of a GTPU Tunnel End Point.
1071 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1074 :type source_ip: str
1075 :type destination_ip: str
1076 :returns: SW IF INDEX of created interface.
1078 :raises RuntimeError: if it is unable to create GTPU interface on the
1081 cmd = u"gtpu_add_del_tunnel"
1084 src_address=IPAddress.create_ip_address_object(
1085 ip_address(source_ip)
1087 dst_address=IPAddress.create_ip_address_object(
1088 ip_address(destination_ip)
1090 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1095 err_msg = f"Failed to create GTPU tunnel interface " \
1096 f"on host {node[u'host']}"
1097 with PapiSocketExecutor(node) as papi_exec:
1098 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1100 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1101 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1102 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1103 Topology.update_interface_name(node, if_key, ifc_name)
1108 def vpp_create_loopback(node, mac=None):
1109 """Create loopback interface on VPP node.
1111 :param node: Node to create loopback interface on.
1112 :param mac: Optional MAC address for loopback interface.
1115 :returns: SW interface index.
1117 :raises RuntimeError: If it is not possible to create loopback on the
1120 cmd = u"create_loopback_instance"
1122 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1126 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1127 with PapiSocketExecutor(node) as papi_exec:
1128 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1130 if_key = Topology.add_new_port(node, u"loopback")
1131 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1132 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1133 Topology.update_interface_name(node, if_key, ifc_name)
1135 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1136 Topology.update_interface_mac_address(node, if_key, mac)
1141 def vpp_create_bond_interface(
1142 node, mode, load_balance=None, mac=None, gso=False):
1143 """Create bond interface on VPP node.
1145 :param node: DUT node from topology.
1146 :param mode: Link bonding mode.
1147 :param load_balance: Load balance (optional, valid for xor and lacp
1148 modes, otherwise ignored). Default: None.
1149 :param mac: MAC address to assign to the bond interface (optional).
1151 :param gso: Enable GSO support (optional). Default: False.
1154 :type load_balance: str
1157 :returns: Interface key (name) in topology.
1159 :raises RuntimeError: If it is not possible to create bond interface on
1162 cmd = u"bond_create2"
1164 id=int(Constants.BITWISE_NON_ZERO),
1165 use_custom_mac=bool(mac is not None),
1166 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1169 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1171 lb=0 if load_balance is None else getattr(
1172 LinkBondLoadBalanceAlgo,
1173 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1178 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1179 with PapiSocketExecutor(node) as papi_exec:
1180 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1182 InterfaceUtil.add_eth_interface(
1183 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1185 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1190 def add_eth_interface(
1191 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1193 """Add ethernet interface to current topology.
1195 :param node: DUT node from topology.
1196 :param ifc_name: Name of the interface.
1197 :param sw_if_index: SW interface index.
1198 :param ifc_pfx: Interface key prefix.
1199 :param host_if_key: Host interface key from topology file.
1202 :type sw_if_index: int
1204 :type host_if_key: str
1206 if_key = Topology.add_new_port(node, ifc_pfx)
1208 if ifc_name and sw_if_index is None:
1209 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1211 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1212 if sw_if_index and ifc_name is None:
1213 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1214 Topology.update_interface_name(node, if_key, ifc_name)
1215 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1216 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1217 if host_if_key is not None:
1218 Topology.set_interface_numa_node(
1219 node, if_key, Topology.get_interface_numa_node(
1223 Topology.update_interface_pci_address(
1224 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1228 def vpp_create_avf_interface(
1229 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1230 """Create AVF interface on VPP node.
1232 :param node: DUT node from topology.
1233 :param if_key: Interface key from topology file of interface
1234 to be bound to i40evf driver.
1235 :param num_rx_queues: Number of RX queues.
1236 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1237 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1240 :type num_rx_queues: int
1243 :returns: AVF interface key (name) in topology.
1245 :raises RuntimeError: If it is not possible to create AVF interface on
1248 PapiSocketExecutor.run_cli_cmd(
1249 node, u"set logging class avf level debug"
1253 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1255 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1257 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1261 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1263 # FIXME: Remove once the fw/driver is upgraded.
1265 with PapiSocketExecutor(node) as papi_exec:
1267 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1271 except AssertionError:
1272 logger.error(err_msg)
1274 raise AssertionError(err_msg)
1276 InterfaceUtil.add_eth_interface(
1277 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1281 return Topology.get_interface_by_sw_index(node, sw_if_index)
1284 def vpp_create_af_xdp_interface(
1285 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1287 """Create AF_XDP interface on VPP node.
1289 :param node: DUT node from topology.
1290 :param if_key: Physical interface key from topology file of interface
1291 to be bound to compatible driver.
1292 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1293 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1294 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1295 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1298 :type num_rx_queues: int
1302 :returns: Interface key (name) in topology file.
1304 :raises RuntimeError: If it is not possible to create AF_XDP interface
1307 PapiSocketExecutor.run_cli_cmd(
1308 node, u"set logging class af_xdp level debug"
1311 cmd = u"af_xdp_create"
1312 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1314 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1315 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1316 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1319 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1321 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1322 with PapiSocketExecutor(node) as papi_exec:
1323 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1325 InterfaceUtil.vpp_set_interface_mac(
1326 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1328 InterfaceUtil.add_eth_interface(
1329 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1333 return Topology.get_interface_by_sw_index(node, sw_if_index)
1336 def vpp_create_rdma_interface(
1337 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1339 """Create RDMA interface on VPP node.
1341 :param node: DUT node from topology.
1342 :param if_key: Physical interface key from topology file of interface
1343 to be bound to rdma-core driver.
1344 :param num_rx_queues: Number of RX queues.
1345 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1346 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1347 :param mode: RDMA interface mode - auto/ibv/dv.
1350 :type num_rx_queues: int
1354 :returns: Interface key (name) in topology file.
1356 :raises RuntimeError: If it is not possible to create RDMA interface on
1359 PapiSocketExecutor.run_cli_cmd(
1360 node, u"set logging class rdma level debug"
1363 cmd = u"rdma_create_v2"
1364 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1366 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1367 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1368 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1371 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1372 # Note: Set True for non-jumbo packets.
1376 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1377 with PapiSocketExecutor(node) as papi_exec:
1378 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1380 InterfaceUtil.vpp_set_interface_mac(
1381 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1383 InterfaceUtil.add_eth_interface(
1384 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1388 return Topology.get_interface_by_sw_index(node, sw_if_index)
1391 def vpp_add_bond_member(node, interface, bond_if):
1392 """Add member interface to bond interface on VPP node.
1394 :param node: DUT node from topology.
1395 :param interface: Physical interface key from topology file.
1396 :param bond_if: Load balance
1398 :type interface: str
1400 :raises RuntimeError: If it is not possible to add member to bond
1401 interface on the node.
1403 cmd = u"bond_add_member"
1405 sw_if_index=Topology.get_interface_sw_index(node, interface),
1406 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1408 is_long_timeout=False
1410 err_msg = f"Failed to add member {interface} to bond interface " \
1411 f"{bond_if} on host {node[u'host']}"
1412 with PapiSocketExecutor(node) as papi_exec:
1413 papi_exec.add(cmd, **args).get_reply(err_msg)
1416 def vpp_show_bond_data_on_node(node, verbose=False):
1417 """Show (detailed) bond information on VPP node.
1419 :param node: DUT node from topology.
1420 :param verbose: If detailed information is required or not.
1424 cmd = u"sw_bond_interface_dump"
1425 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1427 data = f"Bond data on node {node[u'host']}:\n"
1428 with PapiSocketExecutor(node) as papi_exec:
1429 details = papi_exec.add(cmd).get_details(err_msg)
1431 for bond in details:
1432 data += f"{bond[u'interface_name']}\n"
1433 data += u" mode: {m}\n".format(
1434 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1436 data += u" load balance: {lb}\n".format(
1437 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1439 data += f" number of active members: {bond[u'active_members']}\n"
1441 member_data = InterfaceUtil.vpp_bond_member_dump(
1442 node, Topology.get_interface_by_sw_index(
1443 node, bond[u"sw_if_index"]
1446 for member in member_data:
1447 if not member[u"is_passive"]:
1448 data += f" {member[u'interface_name']}\n"
1449 data += f" number of members: {bond[u'members']}\n"
1451 for member in member_data:
1452 data += f" {member[u'interface_name']}\n"
1453 data += f" interface id: {bond[u'id']}\n"
1454 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1458 def vpp_bond_member_dump(node, interface):
1459 """Get bond interface slave(s) data on VPP node.
1461 :param node: DUT node from topology.
1462 :param interface: Physical interface key from topology file.
1464 :type interface: str
1465 :returns: Bond slave interface data.
1468 cmd = u"sw_member_interface_dump"
1470 sw_if_index=Topology.get_interface_sw_index(node, interface)
1472 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1474 with PapiSocketExecutor(node) as papi_exec:
1475 details = papi_exec.add(cmd, **args).get_details(err_msg)
1477 logger.debug(f"Member data:\n{details}")
1481 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1482 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1484 :param nodes: Nodes in the topology.
1485 :param verbose: If detailed information is required or not.
1489 for node_data in nodes.values():
1490 if node_data[u"type"] == NodeType.DUT:
1491 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1494 def vpp_enable_input_acl_interface(
1495 node, interface, ip_version, table_index):
1496 """Enable input acl on interface.
1498 :param node: VPP node to setup interface for input acl.
1499 :param interface: Interface to setup input acl.
1500 :param ip_version: Version of IP protocol.
1501 :param table_index: Classify table index.
1503 :type interface: str or int
1504 :type ip_version: str
1505 :type table_index: int
1507 cmd = u"input_acl_set_interface"
1509 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1510 ip4_table_index=table_index if ip_version == u"ip4"
1511 else Constants.BITWISE_NON_ZERO,
1512 ip6_table_index=table_index if ip_version == u"ip6"
1513 else Constants.BITWISE_NON_ZERO,
1514 l2_table_index=table_index if ip_version == u"l2"
1515 else Constants.BITWISE_NON_ZERO,
1517 err_msg = f"Failed to enable input acl on interface {interface}"
1518 with PapiSocketExecutor(node) as papi_exec:
1519 papi_exec.add(cmd, **args).get_reply(err_msg)
1522 def get_interface_classify_table(node, interface):
1523 """Get name of classify table for the given interface.
1525 TODO: Move to Classify.py.
1527 :param node: VPP node to get data from.
1528 :param interface: Name or sw_if_index of a specific interface.
1530 :type interface: str or int
1531 :returns: Classify table name.
1534 if isinstance(interface, str):
1535 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1537 sw_if_index = interface
1539 cmd = u"classify_table_by_interface"
1541 sw_if_index=sw_if_index
1543 err_msg = f"Failed to get classify table name by interface {interface}"
1544 with PapiSocketExecutor(node) as papi_exec:
1545 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1550 def get_sw_if_index(node, interface_name):
1551 """Get sw_if_index for the given interface from actual interface dump.
1553 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1555 :param node: VPP node to get interface data from.
1556 :param interface_name: Name of the specific interface.
1558 :type interface_name: str
1559 :returns: sw_if_index of the given interface.
1562 interface_data = InterfaceUtil.vpp_get_interface_data(
1563 node, interface=interface_name
1565 return interface_data.get(u"sw_if_index")
1568 def vxlan_gpe_dump(node, interface_name=None):
1569 """Get VxLAN GPE data for the given interface.
1571 :param node: VPP node to get interface data from.
1572 :param interface_name: Name of the specific interface. If None,
1573 information about all VxLAN GPE interfaces is returned.
1575 :type interface_name: str
1576 :returns: Dictionary containing data for the given VxLAN GPE interface
1577 or if interface=None, the list of dictionaries with all VxLAN GPE
1579 :rtype: dict or list
1581 def process_vxlan_gpe_dump(vxlan_dump):
1582 """Process vxlan_gpe dump.
1584 :param vxlan_dump: Vxlan_gpe nterface dump.
1585 :type vxlan_dump: dict
1586 :returns: Processed vxlan_gpe interface dump.
1589 if vxlan_dump[u"is_ipv6"]:
1590 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1591 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1593 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1594 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1597 if interface_name is not None:
1598 sw_if_index = InterfaceUtil.get_interface_index(
1599 node, interface_name
1602 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1604 cmd = u"vxlan_gpe_tunnel_dump"
1606 sw_if_index=sw_if_index
1608 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1609 with PapiSocketExecutor(node) as papi_exec:
1610 details = papi_exec.add(cmd, **args).get_details(err_msg)
1612 data = list() if interface_name is None else dict()
1613 for dump in details:
1614 if interface_name is None:
1615 data.append(process_vxlan_gpe_dump(dump))
1616 elif dump[u"sw_if_index"] == sw_if_index:
1617 data = process_vxlan_gpe_dump(dump)
1620 logger.debug(f"VXLAN-GPE data:\n{data}")
1624 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1625 """Assign VPP interface to specific VRF/FIB table.
1627 :param node: VPP node where the FIB and interface are located.
1628 :param interface: Interface to be assigned to FIB.
1629 :param table_id: VRF table ID.
1630 :param ipv6: Assign to IPv6 table. Default False.
1632 :type interface: str or int
1636 cmd = u"sw_interface_set_table"
1638 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1640 vrf_id=int(table_id)
1642 err_msg = f"Failed to assign interface {interface} to FIB table"
1643 with PapiSocketExecutor(node) as papi_exec:
1644 papi_exec.add(cmd, **args).get_reply(err_msg)
1647 def set_linux_interface_mac(
1648 node, interface, mac, namespace=None, vf_id=None):
1649 """Set MAC address for interface in linux.
1651 :param node: Node where to execute command.
1652 :param interface: Interface in namespace.
1653 :param mac: MAC to be assigned to interface.
1654 :param namespace: Execute command in namespace. Optional
1655 :param vf_id: Virtual Function id. Optional
1657 :type interface: str
1659 :type namespace: str
1662 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1663 else f"address {mac}"
1664 ns_str = f"ip netns exec {namespace}" if namespace else u""
1666 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1667 exec_cmd_no_error(node, cmd, sudo=True)
1670 def set_linux_interface_promisc(
1671 node, interface, namespace=None, vf_id=None, state=u"on"):
1672 """Set promisc state for interface in linux.
1674 :param node: Node where to execute command.
1675 :param interface: Interface in namespace.
1676 :param namespace: Exec command in namespace. (Optional, Default: None)
1677 :param vf_id: Virtual Function id. (Optional, Default: None)
1678 :param state: State of feature. (Optional, Default: on)
1680 :type interface: str
1681 :type namespace: str
1685 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1686 else f"promisc {state}"
1687 ns_str = f"ip netns exec {namespace}" if namespace else u""
1689 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1690 exec_cmd_no_error(node, cmd, sudo=True)
1693 def set_linux_interface_trust_on(
1694 node, interface, namespace=None, vf_id=None):
1695 """Set trust on (promisc) for interface in linux.
1697 :param node: Node where to execute command.
1698 :param interface: Interface in namespace.
1699 :param namespace: Execute command in namespace. Optional
1700 :param vf_id: Virtual Function id. Optional
1702 :type interface: str
1703 :type namespace: str
1706 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1707 ns_str = f"ip netns exec {namespace}" if namespace else u""
1709 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1710 exec_cmd_no_error(node, cmd, sudo=True)
1713 def set_linux_interface_spoof_off(
1714 node, interface, namespace=None, vf_id=None):
1715 """Set spoof off for interface in linux.
1717 :param node: Node where to execute command.
1718 :param interface: Interface in namespace.
1719 :param namespace: Execute command in namespace. Optional
1720 :param vf_id: Virtual Function id. Optional
1722 :type interface: str
1723 :type namespace: str
1726 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1728 ns_str = f"ip netns exec {namespace}" if namespace else u""
1730 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1731 exec_cmd_no_error(node, cmd, sudo=True)
1734 def set_linux_interface_state(
1735 node, interface, namespace=None, state=u"up"):
1736 """Set operational state for interface in linux.
1738 :param node: Node where to execute command.
1739 :param interface: Interface in namespace.
1740 :param namespace: Execute command in namespace. Optional
1741 :param state: Up/Down.
1743 :type interface: str
1744 :type namespace: str
1747 ns_str = f"ip netns exec {namespace}" if namespace else u""
1749 cmd = f"{ns_str} ip link set dev {interface} {state}"
1750 exec_cmd_no_error(node, cmd, sudo=True)
1753 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1754 """Init PCI device. Check driver compatibility and bind to proper
1755 drivers. Optionally create NIC VFs.
1757 :param node: DUT node.
1758 :param ifc_key: Interface key from topology file.
1759 :param driver: Base driver to use.
1760 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1761 :param osi_layer: OSI Layer type to initialize TG with.
1762 Default value "L2" sets linux interface spoof off.
1767 :type osi_layer: str
1768 :returns: Virtual Function topology interface keys.
1770 :raises RuntimeError: If a reason preventing initialization is found.
1772 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1774 if driver == u"avf":
1775 if kernel_driver not in (
1776 u"ice", u"iavf", u"i40e", u"i40evf"):
1778 f"AVF needs ice or i40e compatible driver, not "
1779 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1781 vf_keys = InterfaceUtil.init_generic_interface(
1782 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1784 elif driver == u"af_xdp":
1785 if kernel_driver not in (
1786 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1789 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1790 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1792 vf_keys = InterfaceUtil.init_generic_interface(
1793 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1795 elif driver == u"rdma-core":
1796 vf_keys = InterfaceUtil.init_generic_interface(
1797 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1802 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1803 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1805 :param node: DUT node.
1806 :param ifc_key: Interface key from topology file.
1807 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1808 :param osi_layer: OSI Layer type to initialize TG with.
1809 Default value "L2" sets linux interface spoof off.
1813 :type osi_layer: str
1814 :returns: Virtual Function topology interface keys.
1816 :raises RuntimeError: If a reason preventing initialization is found.
1818 # Read PCI address and driver.
1819 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1820 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1821 uio_driver = Topology.get_uio_driver(node)
1822 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1823 current_driver = DUTSetup.get_pci_dev_driver(
1824 node, pf_pci_addr.replace(u":", r"\:"))
1825 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1827 VPPUtil.stop_vpp_service(node)
1828 if current_driver != kernel_driver:
1829 # PCI device must be re-bound to kernel driver before creating VFs.
1830 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1831 # Stop VPP to prevent deadlock.
1832 # Unbind from current driver.
1833 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1834 # Bind to kernel driver.
1835 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1837 # Initialize PCI VFs.
1838 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1841 if osi_layer == u"L2":
1842 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1845 # Set MAC address and bind each virtual function to uio driver.
1846 for vf_id in range(numvfs):
1847 vf_mac_addr = u":".join(
1848 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1849 pf_mac_addr[5], f"{vf_id:02x}"
1853 InterfaceUtil.set_linux_interface_trust_on(
1854 node, pf_dev, vf_id=vf_id
1856 if osi_layer == u"L2":
1857 InterfaceUtil.set_linux_interface_spoof_off(
1858 node, pf_dev, vf_id=vf_id
1860 InterfaceUtil.set_linux_interface_mac(
1861 node, pf_dev, vf_mac_addr, vf_id=vf_id
1863 InterfaceUtil.set_linux_interface_state(
1864 node, pf_dev, state=u"up"
1867 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1868 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1870 # Add newly created ports into topology file
1871 vf_ifc_name = f"{ifc_key}_vif"
1872 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1873 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1874 Topology.update_interface_name(
1875 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1877 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1878 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1879 Topology.set_interface_numa_node(
1880 node, vf_ifc_key, Topology.get_interface_numa_node(
1884 vf_ifc_keys.append(vf_ifc_key)
1889 def vpp_sw_interface_rx_placement_dump(node):
1890 """Dump VPP interface RX placement on node.
1892 :param node: Node to run command on.
1894 :returns: Thread mapping information as a list of dictionaries.
1897 cmd = u"sw_interface_rx_placement_dump"
1898 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1899 with PapiSocketExecutor(node) as papi_exec:
1900 for ifc in node[u"interfaces"].values():
1901 if ifc[u"vpp_sw_index"] is not None:
1902 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1903 details = papi_exec.get_details(err_msg)
1904 return sorted(details, key=lambda k: k[u"sw_if_index"])
1907 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1908 """Dump VPP interface RX placement on all given nodes.
1910 :param nodes: Nodes to run command on.
1912 :returns: Thread mapping information as a list of dictionaries.
1915 for node in nodes.values():
1916 if node[u"type"] == NodeType.DUT:
1917 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1920 def vpp_sw_interface_set_rx_placement(
1921 node, sw_if_index, queue_id, worker_id):
1922 """Set interface RX placement to worker on node.
1924 :param node: Node to run command on.
1925 :param sw_if_index: VPP SW interface index.
1926 :param queue_id: VPP interface queue ID.
1927 :param worker_id: VPP worker ID (indexing from 0).
1929 :type sw_if_index: int
1931 :type worker_id: int
1932 :raises RuntimeError: If failed to run command on host or if no API
1935 cmd = u"sw_interface_set_rx_placement"
1936 err_msg = f"Failed to set interface RX placement to worker " \
1937 f"on host {node[u'host']}!"
1939 sw_if_index=sw_if_index,
1941 worker_id=worker_id,
1944 with PapiSocketExecutor(node) as papi_exec:
1945 papi_exec.add(cmd, **args).get_reply(err_msg)
1948 def vpp_round_robin_rx_placement(
1949 node, prefix, dp_worker_limit=None):
1950 """Set Round Robin interface RX placement on all worker threads
1953 If specified, dp_core_limit limits the number of physical cores used
1954 for data plane I/O work. Other cores are presumed to do something else,
1955 e.g. asynchronous crypto processing.
1956 None means all workers are used for data plane work.
1957 Note this keyword specifies workers, not cores.
1959 :param node: Topology nodes.
1960 :param prefix: Interface name prefix.
1961 :param dp_worker_limit: How many cores for data plane work.
1964 :type dp_worker_limit: Optional[int]
1967 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1968 if dp_worker_limit is not None:
1969 worker_cnt = min(worker_cnt, dp_worker_limit)
1972 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1973 for interface in node[u"interfaces"].values():
1974 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1975 and prefix in interface[u"name"]:
1976 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1977 node, placement[u"sw_if_index"], placement[u"queue_id"],
1978 worker_id % worker_cnt
1983 def vpp_round_robin_rx_placement_on_all_duts(
1984 nodes, prefix, dp_core_limit=None):
1985 """Set Round Robin interface RX placement on all worker threads
1988 If specified, dp_core_limit limits the number of physical cores used
1989 for data plane I/O work. Other cores are presumed to do something else,
1990 e.g. asynchronous crypto processing.
1991 None means all cores are used for data plane work.
1992 Note this keyword specifies cores, not workers.
1994 :param nodes: Topology nodes.
1995 :param prefix: Interface name prefix.
1996 :param dp_worker_limit: How many cores for data plane work.
1999 :type dp_worker_limit: Optional[int]
2001 for node in nodes.values():
2002 if node[u"type"] == NodeType.DUT:
2003 dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
2004 phy_cores=dp_core_limit,
2005 smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
2007 InterfaceUtil.vpp_round_robin_rx_placement(
2008 node, prefix, dp_worker_limit