1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
114 """General utilities for managing interfaces"""
117 def pci_to_int(pci_str):
118 """Convert PCI address from string format (0000:18:0a.0) to
119 integer representation (169345024).
121 :param pci_str: PCI address in string representation.
123 :returns: Integer representation of PCI address.
126 pci = list(pci_str.split(u":")[0:2])
127 pci.extend(pci_str.split(u":")[2].split(u"."))
129 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
130 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
133 def pci_to_eth(node, pci_str):
134 """Convert PCI address on DUT to Linux ethernet name.
136 :param node: DUT node
137 :param pci_str: PCI address.
140 :returns: Ethernet name.
143 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
145 stdout, _ = exec_cmd_no_error(node, cmd)
147 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
149 return stdout.strip()
152 def get_interface_index(node, interface):
153 """Get interface sw_if_index from topology file.
155 :param node: Node where the interface is.
156 :param interface: Numeric index or name string of a specific interface.
158 :type interface: str or int
159 :returns: SW interface index.
163 sw_if_index = int(interface)
165 sw_if_index = Topology.get_interface_sw_index(node, interface)
166 if sw_if_index is None:
168 Topology.get_interface_sw_index_by_name(node, interface)
169 except TypeError as err:
170 raise TypeError(f"Wrong interface format {interface}") from err
175 def set_interface_state(node, interface, state, if_type=u"key"):
176 """Set interface state on a node.
178 Function can be used for DUTs as well as for TGs.
180 :param node: Node where the interface is.
181 :param interface: Interface key or sw_if_index or name.
182 :param state: One of 'up' or 'down'.
183 :param if_type: Interface type
185 :type interface: str or int
189 :raises ValueError: If the interface type is unknown.
190 :raises ValueError: If the state of interface is unexpected.
191 :raises ValueError: If the node has an unknown node type.
193 if if_type == u"key":
194 if isinstance(interface, str):
195 sw_if_index = Topology.get_interface_sw_index(node, interface)
196 iface_name = Topology.get_interface_name(node, interface)
198 sw_if_index = interface
199 elif if_type == u"name":
200 iface_key = Topology.get_interface_by_name(node, interface)
201 if iface_key is not None:
202 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
203 iface_name = interface
205 raise ValueError(f"Unknown if_type: {if_type}")
207 if node[u"type"] == NodeType.DUT:
209 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
210 elif state == u"down":
213 raise ValueError(f"Unexpected interface state: {state}")
214 cmd = u"sw_interface_set_flags"
215 err_msg = f"Failed to set interface state on host {node[u'host']}"
217 sw_if_index=int(sw_if_index),
220 with PapiSocketExecutor(node) as papi_exec:
221 papi_exec.add(cmd, **args).get_reply(err_msg)
222 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
223 cmd = f"ip link set {iface_name} {state}"
224 exec_cmd_no_error(node, cmd, sudo=True)
227 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
231 def set_interface_ethernet_mtu(node, iface_key, mtu):
232 """Set Ethernet MTU for specified interface.
234 Function can be used only for TGs.
236 :param node: Node where the interface is.
237 :param iface_key: Interface key from topology file.
238 :param mtu: MTU to set.
243 :raises ValueError: If the node type is "DUT".
244 :raises ValueError: If the node has an unknown node type.
246 if node[u"type"] == NodeType.DUT:
247 msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \
248 f"on DUT nodes not supported"
249 elif node[u"type"] != NodeType.TG:
250 msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
252 iface_name = Topology.get_interface_name(node, iface_key)
253 cmd = f"ip link set {iface_name} mtu {mtu}"
254 exec_cmd_no_error(node, cmd, sudo=True)
256 raise ValueError(msg)
259 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
260 """Set default Ethernet MTU on all interfaces on node.
262 Function can be used only for TGs.
264 :param node: Node where to set default MTU.
268 for ifc in node[u"interfaces"]:
269 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
272 def vpp_set_interface_mtu(node, interface, mtu=9200):
273 """Set Ethernet MTU on interface.
275 :param node: VPP node.
276 :param interface: Interface to setup MTU. Default: 9200.
277 :param mtu: Ethernet MTU size in Bytes.
279 :type interface: str or int
282 if isinstance(interface, str):
283 sw_if_index = Topology.get_interface_sw_index(node, interface)
285 sw_if_index = interface
287 cmd = u"hw_interface_set_mtu"
288 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
290 sw_if_index=sw_if_index,
294 with PapiSocketExecutor(node) as papi_exec:
295 papi_exec.add(cmd, **args).get_reply(err_msg)
296 except AssertionError as err:
297 # TODO: Make failure tolerance optional.
298 logger.debug(f"Setting MTU failed. Expected?\n{err}")
301 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
302 """Set Ethernet MTU on all interfaces.
304 :param node: VPP node.
305 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
309 for interface in node[u"interfaces"]:
310 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
313 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
314 """Set Ethernet MTU on all interfaces on all DUTs.
316 :param nodes: VPP nodes.
317 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
321 for node in nodes.values():
322 if node[u"type"] == NodeType.DUT:
323 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
326 def vpp_node_interfaces_ready_wait(node, retries=15):
327 """Wait until all interfaces with admin-up are in link-up state.
329 :param node: Node to wait on.
330 :param retries: Number of retries to check interface status (optional,
335 :raises RuntimeError: If any interface is not in link-up state after
336 defined number of retries.
338 for _ in range(0, retries):
340 out = InterfaceUtil.vpp_get_interface_data(node)
341 for interface in out:
342 if interface.get(u"flags") == 1:
343 not_ready.append(interface.get(u"interface_name"))
346 f"Interfaces still not in link-up state:\n{not_ready}"
352 err = f"Timeout, interfaces not up:\n{not_ready}" \
353 if u"not_ready" in locals() else u"No check executed!"
354 raise RuntimeError(err)
357 def all_vpp_interfaces_ready_wait(nodes, retries=15):
358 """Wait until all interfaces with admin-up are in link-up state for all
359 nodes in the topology.
361 :param nodes: Nodes in the topology.
362 :param retries: Number of retries to check interface status (optional,
368 for node in nodes.values():
369 if node[u"type"] == NodeType.DUT:
370 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
373 def vpp_get_interface_data(node, interface=None):
374 """Get all interface data from a VPP node. If a name or
375 sw_interface_index is provided, return only data for the matching
378 :param node: VPP node to get interface data from.
379 :param interface: Numeric index or name string of a specific interface.
381 :type interface: int or str
382 :returns: List of dictionaries containing data for each interface, or a
383 single dictionary for the specified interface.
385 :raises TypeError: if the data type of interface is neither basestring
388 def process_if_dump(if_dump):
389 """Process interface dump.
391 :param if_dump: Interface dump.
393 :returns: Processed interface dump.
396 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
397 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
398 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
399 if_dump[u"flags"] = if_dump[u"flags"].value
400 if_dump[u"type"] = if_dump[u"type"].value
401 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
402 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
403 if hasattr(if_dump[u"sub_if_flags"], u"value") \
404 else int(if_dump[u"sub_if_flags"])
408 if interface is not None:
409 if isinstance(interface, str):
410 param = u"interface_name"
411 elif isinstance(interface, int):
412 param = u"sw_if_index"
414 raise TypeError(f"Wrong interface format {interface}")
418 cmd = u"sw_interface_dump"
420 name_filter_valid=False,
423 err_msg = f"Failed to get interface dump on host {node[u'host']}"
425 with PapiSocketExecutor(node) as papi_exec:
426 details = papi_exec.add(cmd, **args).get_details(err_msg)
427 logger.debug(f"Received data:\n{details!r}")
429 data = list() if interface is None else dict()
431 if interface is None:
432 data.append(process_if_dump(dump))
433 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
434 data = process_if_dump(dump)
437 logger.debug(f"Interface data:\n{data}")
441 def vpp_get_interface_name(node, sw_if_index):
442 """Get interface name for the given SW interface index from actual
445 :param node: VPP node to get interface data from.
446 :param sw_if_index: SW interface index of the specific interface.
448 :type sw_if_index: int
449 :returns: Name of the given interface.
452 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
453 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
454 if_data = InterfaceUtil.vpp_get_interface_data(
455 node, if_data[u"sup_sw_if_index"]
458 return if_data.get(u"interface_name")
461 def vpp_get_interface_sw_index(node, interface_name):
462 """Get interface name for the given SW interface index from actual
465 :param node: VPP node to get interface data from.
466 :param interface_name: Interface name.
468 :type interface_name: str
469 :returns: Name of the given interface.
472 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
474 return if_data.get(u"sw_if_index")
477 def vpp_get_interface_mac(node, interface):
478 """Get MAC address for the given interface from actual interface dump.
480 :param node: VPP node to get interface data from.
481 :param interface: Numeric index or name string of a specific interface.
483 :type interface: int or str
484 :returns: MAC address.
487 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
488 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
489 if_data = InterfaceUtil.vpp_get_interface_data(
490 node, if_data[u"sup_sw_if_index"])
492 return if_data.get(u"l2_address")
495 def vpp_set_interface_mac(node, interface, mac):
496 """Set MAC address for the given interface.
498 :param node: VPP node to set interface MAC.
499 :param interface: Numeric index or name string of a specific interface.
500 :param mac: Required MAC address.
502 :type interface: int or str
505 cmd = u"sw_interface_set_mac_address"
507 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
508 mac_address=L2Util.mac_to_bin(mac)
510 err_msg = f"Failed to set MAC address of interface {interface}" \
511 f"on host {node[u'host']}"
512 with PapiSocketExecutor(node) as papi_exec:
513 papi_exec.add(cmd, **args).get_reply(err_msg)
516 def tg_set_interface_driver(node, pci_addr, driver):
517 """Set interface driver on the TG node.
519 :param node: Node to set interface driver on (must be TG node).
520 :param pci_addr: PCI address of the interface.
521 :param driver: Driver name.
525 :raises RuntimeError: If unbinding from the current driver fails.
526 :raises RuntimeError: If binding to the new driver fails.
528 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
529 if old_driver == driver:
535 # Unbind from current driver
536 if old_driver is not None:
537 cmd = f"sh -c \"echo {pci_addr} > " \
538 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
539 ret_code, _, _ = ssh.exec_command_sudo(cmd)
540 if int(ret_code) != 0:
541 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
543 # Bind to the new driver
544 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
545 ret_code, _, _ = ssh.exec_command_sudo(cmd)
546 if int(ret_code) != 0:
547 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
550 def tg_get_interface_driver(node, pci_addr):
551 """Get interface driver from the TG node.
553 :param node: Node to get interface driver on (must be TG node).
554 :param pci_addr: PCI address of the interface.
557 :returns: Interface driver or None if not found.
559 :raises RuntimeError: If PCI rescan or lspci command execution failed.
561 return DUTSetup.get_pci_dev_driver(node, pci_addr)
564 def tg_set_interfaces_default_driver(node):
565 """Set interfaces default driver specified in topology yaml file.
567 :param node: Node to setup interfaces driver on (must be TG node).
570 for interface in node[u"interfaces"].values():
571 InterfaceUtil.tg_set_interface_driver(
572 node, interface[u"pci_address"], interface[u"driver"]
576 def update_vpp_interface_data_on_node(node):
577 """Update vpp generated interface data for a given node in DICT__nodes.
579 Updates interface names, software if index numbers and any other details
580 generated specifically by vpp that are unknown before testcase run.
581 It does this by dumping interface list from all devices using python
582 api, and pairing known information from topology (mac address) to state
585 :param node: Node selected from DICT__nodes.
588 interface_list = InterfaceUtil.vpp_get_interface_data(node)
589 interface_dict = dict()
590 for ifc in interface_list:
591 interface_dict[ifc[u"l2_address"]] = ifc
593 for if_name, if_data in node[u"interfaces"].items():
594 ifc_dict = interface_dict.get(if_data[u"mac_address"])
595 if ifc_dict is not None:
596 if_data[u"name"] = ifc_dict[u"interface_name"]
597 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
598 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
600 f"Interface {if_name} found by MAC "
601 f"{if_data[u'mac_address']}"
605 f"Interface {if_name} not found by MAC "
606 f"{if_data[u'mac_address']}"
608 if_data[u"vpp_sw_index"] = None
611 def update_nic_interface_names(node):
612 """Update interface names based on nic type and PCI address.
614 This method updates interface names in the same format as VPP does.
616 :param node: Node dictionary.
619 for ifc in node[u"interfaces"].values():
620 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
621 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
622 f"{int(if_pci[3], 16):x}"
623 if ifc[u"model"] == u"Intel-XL710":
624 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
625 elif ifc[u"model"] == u"Intel-X710":
626 ifc[u"name"] = f"TenGigabitEthernet{loc}"
627 elif ifc[u"model"] == u"Intel-X520-DA2":
628 ifc[u"name"] = f"TenGigabitEthernet{loc}"
629 elif ifc[u"model"] == u"Cisco-VIC-1385":
630 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
631 elif ifc[u"model"] == u"Cisco-VIC-1227":
632 ifc[u"name"] = f"TenGigabitEthernet{loc}"
634 ifc[u"name"] = f"UnknownEthernet{loc}"
637 def update_nic_interface_names_on_all_duts(nodes):
638 """Update interface names based on nic type and PCI address on all DUTs.
640 This method updates interface names in the same format as VPP does.
642 :param nodes: Topology nodes.
645 for node in nodes.values():
646 if node[u"type"] == NodeType.DUT:
647 InterfaceUtil.update_nic_interface_names(node)
650 def update_tg_interface_data_on_node(node):
651 """Update interface name for TG/linux node in DICT__nodes.
654 # for dev in `ls /sys/class/net/`;
655 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
656 "52:54:00:9f:82:63": "eth0"
657 "52:54:00:77:ae:a9": "eth1"
658 "52:54:00:e1:8a:0f": "eth2"
659 "00:00:00:00:00:00": "lo"
661 :param node: Node selected from DICT__nodes.
663 :raises RuntimeError: If getting of interface name and MAC fails.
665 # First setup interface driver specified in yaml file
666 InterfaceUtil.tg_set_interfaces_default_driver(node)
668 # Get interface names
672 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
673 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
675 ret_code, stdout, _ = ssh.exec_command(cmd)
676 if int(ret_code) != 0:
677 raise RuntimeError(u"Get interface name and MAC failed")
678 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
680 interfaces = JsonParser().parse_data(tmp)
681 for interface in node[u"interfaces"].values():
682 name = interfaces.get(interface[u"mac_address"])
685 interface[u"name"] = name
688 def iface_update_numa_node(node):
689 """For all interfaces from topology file update numa node based on
690 information from the node.
692 :param node: Node from topology.
695 :raises ValueError: If numa node ia less than 0.
696 :raises RuntimeError: If update of numa node failed.
699 for if_key in Topology.get_node_interfaces(node):
700 if_pci = Topology.get_interface_pci_addr(node, if_key)
702 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
704 ret, out, _ = ssh.exec_command(cmd)
707 numa_node = 0 if int(out) < 0 else int(out)
710 f"Reading numa location failed for: {if_pci}"
713 Topology.set_interface_numa_node(
714 node, if_key, numa_node
718 raise RuntimeError(f"Update numa node failed for: {if_pci}")
721 def update_all_interface_data_on_all_nodes(
722 nodes, skip_tg=False, skip_vpp=False):
723 """Update interface names on all nodes in DICT__nodes.
725 This method updates the topology dictionary by querying interface lists
726 of all nodes mentioned in the topology dictionary.
728 :param nodes: Nodes in the topology.
729 :param skip_tg: Skip TG node.
730 :param skip_vpp: Skip VPP node.
735 for node in nodes.values():
736 if node[u"type"] == NodeType.DUT and not skip_vpp:
737 InterfaceUtil.update_vpp_interface_data_on_node(node)
738 elif node[u"type"] == NodeType.TG and not skip_tg:
739 InterfaceUtil.update_tg_interface_data_on_node(node)
740 InterfaceUtil.iface_update_numa_node(node)
743 def create_vlan_subinterface(node, interface, vlan):
744 """Create VLAN sub-interface on node.
746 :param node: Node to add VLAN subinterface on.
747 :param interface: Interface name or index on which create VLAN
749 :param vlan: VLAN ID of the subinterface to be created.
751 :type interface: str on int
753 :returns: Name and index of created subinterface.
755 :raises RuntimeError: if it is unable to create VLAN subinterface on the
756 node or interface cannot be converted.
758 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
760 cmd = u"create_vlan_subif"
762 sw_if_index=sw_if_index,
765 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
767 with PapiSocketExecutor(node) as papi_exec:
768 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
770 if_key = Topology.add_new_port(node, u"vlan_subif")
771 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
772 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
773 Topology.update_interface_name(node, if_key, ifc_name)
775 return f"{interface}.{vlan}", sw_if_index
778 def create_vxlan_interface(node, vni, source_ip, destination_ip):
779 """Create VXLAN interface and return sw if index of created interface.
781 :param node: Node where to create VXLAN interface.
782 :param vni: VXLAN Network Identifier.
783 :param source_ip: Source IP of a VXLAN Tunnel End Point.
784 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
788 :type destination_ip: str
789 :returns: SW IF INDEX of created interface.
791 :raises RuntimeError: if it is unable to create VxLAN interface on the
794 src_address = ip_address(source_ip)
795 dst_address = ip_address(destination_ip)
797 cmd = u"vxlan_add_del_tunnel"
800 is_ipv6=1 if src_address.version == 6 else 0,
801 instance=Constants.BITWISE_NON_ZERO,
802 src_address=src_address.packed,
803 dst_address=dst_address.packed,
804 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
806 decap_next_index=Constants.BITWISE_NON_ZERO,
809 err_msg = f"Failed to create VXLAN tunnel interface " \
810 f"on host {node[u'host']}"
811 with PapiSocketExecutor(node) as papi_exec:
812 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
814 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
815 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
816 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
817 Topology.update_interface_name(node, if_key, ifc_name)
822 def set_vxlan_bypass(node, interface=None):
823 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
825 By adding the IPv4 vxlan-bypass graph node to an interface, the node
826 checks for and validate input vxlan packet and bypass ip4-lookup,
827 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
828 This node will cause extra overhead to for non-vxlan packets which is
831 :param node: Node where to set VXLAN bypass.
832 :param interface: Numeric index or name string of a specific interface.
834 :type interface: int or str
835 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
837 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
839 cmd = u"sw_interface_set_vxlan_bypass"
842 sw_if_index=sw_if_index,
845 err_msg = f"Failed to set VXLAN bypass on interface " \
846 f"on host {node[u'host']}"
847 with PapiSocketExecutor(node) as papi_exec:
848 papi_exec.add(cmd, **args).get_replies(err_msg)
851 def vxlan_dump(node, interface=None):
852 """Get VxLAN data for the given interface.
854 :param node: VPP node to get interface data from.
855 :param interface: Numeric index or name string of a specific interface.
856 If None, information about all VxLAN interfaces is returned.
858 :type interface: int or str
859 :returns: Dictionary containing data for the given VxLAN interface or if
860 interface=None, the list of dictionaries with all VxLAN interfaces.
862 :raises TypeError: if the data type of interface is neither basestring
865 def process_vxlan_dump(vxlan_dump):
866 """Process vxlan dump.
868 :param vxlan_dump: Vxlan interface dump.
869 :type vxlan_dump: dict
870 :returns: Processed vxlan interface dump.
873 if vxlan_dump[u"is_ipv6"]:
874 vxlan_dump[u"src_address"] = \
875 ip_address(vxlan_dump[u"src_address"])
876 vxlan_dump[u"dst_address"] = \
877 ip_address(vxlan_dump[u"dst_address"])
879 vxlan_dump[u"src_address"] = \
880 ip_address(vxlan_dump[u"src_address"][0:4])
881 vxlan_dump[u"dst_address"] = \
882 ip_address(vxlan_dump[u"dst_address"][0:4])
885 if interface is not None:
886 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
888 sw_if_index = int(Constants.BITWISE_NON_ZERO)
890 cmd = u"vxlan_tunnel_dump"
892 sw_if_index=sw_if_index
894 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
896 with PapiSocketExecutor(node) as papi_exec:
897 details = papi_exec.add(cmd, **args).get_details(err_msg)
899 data = list() if interface is None else dict()
901 if interface is None:
902 data.append(process_vxlan_dump(dump))
903 elif dump[u"sw_if_index"] == sw_if_index:
904 data = process_vxlan_dump(dump)
907 logger.debug(f"VXLAN data:\n{data}")
911 def create_subinterface(
912 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
914 """Create sub-interface on node. It is possible to set required
915 sub-interface type and VLAN tag(s).
917 :param node: Node to add sub-interface.
918 :param interface: Interface name on which create sub-interface.
919 :param sub_id: ID of the sub-interface to be created.
920 :param outer_vlan_id: Optional outer VLAN ID.
921 :param inner_vlan_id: Optional inner VLAN ID.
922 :param type_subif: Optional type of sub-interface. Values supported by
923 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
926 :type interface: str or int
928 :type outer_vlan_id: int
929 :type inner_vlan_id: int
930 :type type_subif: str
931 :returns: Name and index of created sub-interface.
933 :raises RuntimeError: If it is not possible to create sub-interface.
935 subif_types = type_subif.split()
938 if u"no_tags" in subif_types:
939 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
940 if u"one_tag" in subif_types:
941 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
942 if u"two_tags" in subif_types:
943 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
944 if u"dot1ad" in subif_types:
945 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
946 if u"exact_match" in subif_types:
947 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
948 if u"default_sub" in subif_types:
949 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
950 if type_subif == u"default_sub":
951 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
952 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
954 cmd = u"create_subif"
956 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
958 sub_if_flags=flags.value if hasattr(flags, u"value")
960 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
961 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
963 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
964 with PapiSocketExecutor(node) as papi_exec:
965 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
967 if_key = Topology.add_new_port(node, u"subinterface")
968 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
969 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
970 Topology.update_interface_name(node, if_key, ifc_name)
972 return f"{interface}.{sub_id}", sw_if_index
975 def create_gre_tunnel_interface(node, source_ip, destination_ip):
976 """Create GRE tunnel interface on node.
978 :param node: VPP node to add tunnel interface.
979 :param source_ip: Source of the GRE tunnel.
980 :param destination_ip: Destination of the GRE tunnel.
983 :type destination_ip: str
984 :returns: Name and index of created GRE tunnel interface.
986 :raises RuntimeError: If unable to create GRE tunnel interface.
988 cmd = u"gre_tunnel_add_del"
991 instance=Constants.BITWISE_NON_ZERO,
993 dst=str(destination_ip),
1001 err_msg = f"Failed to create GRE tunnel interface " \
1002 f"on host {node[u'host']}"
1003 with PapiSocketExecutor(node) as papi_exec:
1004 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1006 if_key = Topology.add_new_port(node, u"gre_tunnel")
1007 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1008 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1009 Topology.update_interface_name(node, if_key, ifc_name)
1011 return ifc_name, sw_if_index
1014 def vpp_create_loopback(node, mac=None):
1015 """Create loopback interface on VPP node.
1017 :param node: Node to create loopback interface on.
1018 :param mac: Optional MAC address for loopback interface.
1021 :returns: SW interface index.
1023 :raises RuntimeError: If it is not possible to create loopback on the
1026 cmd = u"create_loopback"
1028 mac_address=L2Util.mac_to_bin(mac) if mac else 0
1030 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1031 with PapiSocketExecutor(node) as papi_exec:
1032 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1034 if_key = Topology.add_new_port(node, u"loopback")
1035 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1036 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1037 Topology.update_interface_name(node, if_key, ifc_name)
1039 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1040 Topology.update_interface_mac_address(node, if_key, mac)
1045 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1046 """Create bond interface on VPP node.
1048 :param node: DUT node from topology.
1049 :param mode: Link bonding mode.
1050 :param load_balance: Load balance (optional, valid for xor and lacp
1051 modes, otherwise ignored).
1052 :param mac: MAC address to assign to the bond interface (optional).
1055 :type load_balance: str
1057 :returns: Interface key (name) in topology.
1059 :raises RuntimeError: If it is not possible to create bond interface on
1062 cmd = u"bond_create"
1064 id=int(Constants.BITWISE_NON_ZERO),
1065 use_custom_mac=bool(mac is not None),
1066 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1069 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1071 lb=0 if load_balance is None else getattr(
1072 LinkBondLoadBalanceAlgo,
1073 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1077 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1078 with PapiSocketExecutor(node) as papi_exec:
1079 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1081 InterfaceUtil.add_eth_interface(
1082 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1084 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1089 def add_eth_interface(
1090 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1092 """Add ethernet interface to current topology.
1094 :param node: DUT node from topology.
1095 :param ifc_name: Name of the interface.
1096 :param sw_if_index: SW interface index.
1097 :param ifc_pfx: Interface key prefix.
1098 :param host_if_key: Host interface key from topology file.
1101 :type sw_if_index: int
1103 :type host_if_key: str
1105 if_key = Topology.add_new_port(node, ifc_pfx)
1107 if ifc_name and sw_if_index is None:
1108 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1110 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1111 if sw_if_index and ifc_name is None:
1112 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1113 Topology.update_interface_name(node, if_key, ifc_name)
1114 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1115 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1116 if host_if_key is not None:
1117 Topology.set_interface_numa_node(
1118 node, if_key, Topology.get_interface_numa_node(
1122 Topology.update_interface_pci_address(
1123 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1127 def vpp_create_avf_interface(
1128 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1129 """Create AVF interface on VPP node.
1131 :param node: DUT node from topology.
1132 :param if_key: Interface key from topology file of interface
1133 to be bound to i40evf driver.
1134 :param num_rx_queues: Number of RX queues.
1135 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1136 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1139 :type num_rx_queues: int
1142 :returns: AVF interface key (name) in topology.
1144 :raises RuntimeError: If it is not possible to create AVF interface on
1147 PapiSocketExecutor.run_cli_cmd(
1148 node, u"set logging class avf level debug"
1152 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1154 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1156 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1160 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1161 with PapiSocketExecutor(node) as papi_exec:
1162 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1164 InterfaceUtil.add_eth_interface(
1165 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1169 return Topology.get_interface_by_sw_index(node, sw_if_index)
1172 def vpp_create_rdma_interface(
1173 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1175 """Create RDMA interface on VPP node.
1177 :param node: DUT node from topology.
1178 :param if_key: Physical interface key from topology file of interface
1179 to be bound to rdma-core driver.
1180 :param num_rx_queues: Number of RX queues.
1181 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1182 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1183 :param mode: RDMA interface mode - auto/ibv/dv.
1186 :type num_rx_queues: int
1190 :returns: Interface key (name) in topology file.
1192 :raises RuntimeError: If it is not possible to create RDMA interface on
1195 cmd = u"rdma_create"
1196 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1198 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1199 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1200 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1203 mode=getattr(RdmaMode,f"RDMA_API_MODE_{mode.upper()}").value,
1205 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1206 with PapiSocketExecutor(node) as papi_exec:
1207 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1209 InterfaceUtil.vpp_set_interface_mac(
1210 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1212 InterfaceUtil.add_eth_interface(
1213 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1217 return Topology.get_interface_by_sw_index(node, sw_if_index)
1220 def vpp_enslave_physical_interface(node, interface, bond_if):
1221 """Enslave physical interface to bond interface on VPP node.
1223 :param node: DUT node from topology.
1224 :param interface: Physical interface key from topology file.
1225 :param bond_if: Load balance
1227 :type interface: str
1229 :raises RuntimeError: If it is not possible to enslave physical
1230 interface to bond interface on the node.
1232 cmd = u"bond_enslave"
1234 sw_if_index=Topology.get_interface_sw_index(node, interface),
1235 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1237 is_long_timeout=False
1239 err_msg = f"Failed to enslave physical interface {interface} to bond " \
1240 f"interface {bond_if} on host {node[u'host']}"
1241 with PapiSocketExecutor(node) as papi_exec:
1242 papi_exec.add(cmd, **args).get_reply(err_msg)
1245 def vpp_show_bond_data_on_node(node, verbose=False):
1246 """Show (detailed) bond information on VPP node.
1248 :param node: DUT node from topology.
1249 :param verbose: If detailed information is required or not.
1253 cmd = u"sw_interface_bond_dump"
1254 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1256 data = f"Bond data on node {node[u'host']}:\n"
1257 with PapiSocketExecutor(node) as papi_exec:
1258 details = papi_exec.add(cmd).get_details(err_msg)
1260 for bond in details:
1261 data += f"{bond[u'interface_name']}\n"
1262 data += u" mode: {m}\n".format(
1263 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1265 data += u" load balance: {lb}\n".format(
1266 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1268 data += f" number of active slaves: {bond[u'active_slaves']}\n"
1270 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1271 node, Topology.get_interface_by_sw_index(
1272 node, bond[u"sw_if_index"]
1275 for slave in slave_data:
1276 if not slave[u"is_passive"]:
1277 data += f" {slave[u'interface_name']}\n"
1278 data += f" number of slaves: {bond[u'slaves']}\n"
1280 for slave in slave_data:
1281 data += f" {slave[u'interface_name']}\n"
1282 data += f" interface id: {bond[u'id']}\n"
1283 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1287 def vpp_bond_slave_dump(node, interface):
1288 """Get bond interface slave(s) data on VPP node.
1290 :param node: DUT node from topology.
1291 :param interface: Physical interface key from topology file.
1293 :type interface: str
1294 :returns: Bond slave interface data.
1297 cmd = u"sw_interface_slave_dump"
1299 sw_if_index=Topology.get_interface_sw_index(node, interface)
1301 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1303 with PapiSocketExecutor(node) as papi_exec:
1304 details = papi_exec.add(cmd, **args).get_details(err_msg)
1306 logger.debug(f"Slave data:\n{details}")
1310 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1311 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1313 :param nodes: Nodes in the topology.
1314 :param verbose: If detailed information is required or not.
1318 for node_data in nodes.values():
1319 if node_data[u"type"] == NodeType.DUT:
1320 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1323 def vpp_enable_input_acl_interface(
1324 node, interface, ip_version, table_index):
1325 """Enable input acl on interface.
1327 :param node: VPP node to setup interface for input acl.
1328 :param interface: Interface to setup input acl.
1329 :param ip_version: Version of IP protocol.
1330 :param table_index: Classify table index.
1332 :type interface: str or int
1333 :type ip_version: str
1334 :type table_index: int
1336 cmd = u"input_acl_set_interface"
1338 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1339 ip4_table_index=table_index if ip_version == u"ip4"
1340 else Constants.BITWISE_NON_ZERO,
1341 ip6_table_index=table_index if ip_version == u"ip6"
1342 else Constants.BITWISE_NON_ZERO,
1343 l2_table_index=table_index if ip_version == u"l2"
1344 else Constants.BITWISE_NON_ZERO,
1346 err_msg = f"Failed to enable input acl on interface {interface}"
1347 with PapiSocketExecutor(node) as papi_exec:
1348 papi_exec.add(cmd, **args).get_reply(err_msg)
1351 def get_interface_classify_table(node, interface):
1352 """Get name of classify table for the given interface.
1354 TODO: Move to Classify.py.
1356 :param node: VPP node to get data from.
1357 :param interface: Name or sw_if_index of a specific interface.
1359 :type interface: str or int
1360 :returns: Classify table name.
1363 if isinstance(interface, str):
1364 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1366 sw_if_index = interface
1368 cmd = u"classify_table_by_interface"
1370 sw_if_index=sw_if_index
1372 err_msg = f"Failed to get classify table name by interface {interface}"
1373 with PapiSocketExecutor(node) as papi_exec:
1374 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1379 def get_sw_if_index(node, interface_name):
1380 """Get sw_if_index for the given interface from actual interface dump.
1382 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1384 :param node: VPP node to get interface data from.
1385 :param interface_name: Name of the specific interface.
1387 :type interface_name: str
1388 :returns: sw_if_index of the given interface.
1391 interface_data = InterfaceUtil.vpp_get_interface_data(
1392 node, interface=interface_name
1394 return interface_data.get(u"sw_if_index")
1397 def vxlan_gpe_dump(node, interface_name=None):
1398 """Get VxLAN GPE data for the given interface.
1400 :param node: VPP node to get interface data from.
1401 :param interface_name: Name of the specific interface. If None,
1402 information about all VxLAN GPE interfaces is returned.
1404 :type interface_name: str
1405 :returns: Dictionary containing data for the given VxLAN GPE interface
1406 or if interface=None, the list of dictionaries with all VxLAN GPE
1408 :rtype: dict or list
1410 def process_vxlan_gpe_dump(vxlan_dump):
1411 """Process vxlan_gpe dump.
1413 :param vxlan_dump: Vxlan_gpe nterface dump.
1414 :type vxlan_dump: dict
1415 :returns: Processed vxlan_gpe interface dump.
1418 if vxlan_dump[u"is_ipv6"]:
1419 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1420 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1422 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1423 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1426 if interface_name is not None:
1427 sw_if_index = InterfaceUtil.get_interface_index(
1428 node, interface_name
1431 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1433 cmd = u"vxlan_gpe_tunnel_dump"
1435 sw_if_index=sw_if_index
1437 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1438 with PapiSocketExecutor(node) as papi_exec:
1439 details = papi_exec.add(cmd, **args).get_details(err_msg)
1441 data = list() if interface_name is None else dict()
1442 for dump in details:
1443 if interface_name is None:
1444 data.append(process_vxlan_gpe_dump(dump))
1445 elif dump[u"sw_if_index"] == sw_if_index:
1446 data = process_vxlan_gpe_dump(dump)
1449 logger.debug(f"VXLAN-GPE data:\n{data}")
1453 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1454 """Assign VPP interface to specific VRF/FIB table.
1456 :param node: VPP node where the FIB and interface are located.
1457 :param interface: Interface to be assigned to FIB.
1458 :param table_id: VRF table ID.
1459 :param ipv6: Assign to IPv6 table. Default False.
1461 :type interface: str or int
1465 cmd = u"sw_interface_set_table"
1467 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1469 vrf_id=int(table_id)
1471 err_msg = f"Failed to assign interface {interface} to FIB table"
1472 with PapiSocketExecutor(node) as papi_exec:
1473 papi_exec.add(cmd, **args).get_reply(err_msg)
1476 def set_linux_interface_mac(
1477 node, interface, mac, namespace=None, vf_id=None):
1478 """Set MAC address for interface in linux.
1480 :param node: Node where to execute command.
1481 :param interface: Interface in namespace.
1482 :param mac: MAC to be assigned to interface.
1483 :param namespace: Execute command in namespace. Optional
1484 :param vf_id: Virtual Function id. Optional
1486 :type interface: str
1488 :type namespace: str
1491 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1492 else f"address {mac}"
1493 ns_str = f"ip netns exec {namespace}" if namespace else u""
1495 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1496 exec_cmd_no_error(node, cmd, sudo=True)
1499 def set_linux_interface_trust_on(
1500 node, interface, namespace=None, vf_id=None):
1501 """Set trust on (promisc) for interface in linux.
1503 :param node: Node where to execute command.
1504 :param interface: Interface in namespace.
1505 :param namespace: Execute command in namespace. Optional
1506 :param vf_id: Virtual Function id. Optional
1508 :type interface: str
1509 :type namespace: str
1512 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1513 ns_str = f"ip netns exec {namespace}" if namespace else u""
1515 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1516 exec_cmd_no_error(node, cmd, sudo=True)
1519 def set_linux_interface_spoof_off(
1520 node, interface, namespace=None, vf_id=None):
1521 """Set spoof off for interface in linux.
1523 :param node: Node where to execute command.
1524 :param interface: Interface in namespace.
1525 :param namespace: Execute command in namespace. Optional
1526 :param vf_id: Virtual Function id. Optional
1528 :type interface: str
1529 :type namespace: str
1532 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1534 ns_str = f"ip netns exec {namespace}" if namespace else u""
1536 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1537 exec_cmd_no_error(node, cmd, sudo=True)
1540 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
1541 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1542 driver testing on DUT.
1544 :param node: DUT node.
1545 :param ifc_key: Interface key from topology file.
1546 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1547 :param osi_layer: OSI Layer type to initialize TG with.
1548 Default value "L2" sets linux interface spoof off.
1552 :type osi_layer: str
1553 :returns: Virtual Function topology interface keys.
1555 :raises RuntimeError: If a reason preventing initialization is found.
1557 # Read PCI address and driver.
1558 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1559 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1560 uio_driver = Topology.get_uio_driver(node)
1561 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1562 if kernel_driver not in (u"i40e", u"i40evf"):
1564 f"AVF needs i40e-compatible driver, not {kernel_driver} "
1565 f"at node {node[u'host']} ifc {ifc_key}"
1567 current_driver = DUTSetup.get_pci_dev_driver(
1568 node, pf_pci_addr.replace(u":", r"\:"))
1570 VPPUtil.stop_vpp_service(node)
1571 if current_driver != kernel_driver:
1572 # PCI device must be re-bound to kernel driver before creating VFs.
1573 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1574 # Stop VPP to prevent deadlock.
1575 # Unbind from current driver.
1576 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1577 # Bind to kernel driver.
1578 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1580 # Initialize PCI VFs.
1581 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1584 # Set MAC address and bind each virtual function to uio driver.
1585 for vf_id in range(numvfs):
1586 vf_mac_addr = u":".join(
1587 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1588 pf_mac_addr[5], f"{vf_id:02x}"
1592 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1593 InterfaceUtil.set_linux_interface_trust_on(
1594 node, pf_dev, vf_id=vf_id
1596 if osi_layer == u"L2":
1597 InterfaceUtil.set_linux_interface_spoof_off(
1598 node, pf_dev, vf_id=vf_id
1600 InterfaceUtil.set_linux_interface_mac(
1601 node, pf_dev, vf_mac_addr, vf_id=vf_id
1604 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1605 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1607 # Add newly created ports into topology file
1608 vf_ifc_name = f"{ifc_key}_vif"
1609 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1610 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1611 Topology.update_interface_name(
1612 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1614 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1615 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1616 Topology.set_interface_numa_node(
1617 node, vf_ifc_key, Topology.get_interface_numa_node(
1621 vf_ifc_keys.append(vf_ifc_key)
1626 def vpp_sw_interface_rx_placement_dump(node):
1627 """Dump VPP interface RX placement on node.
1629 :param node: Node to run command on.
1631 :returns: Thread mapping information as a list of dictionaries.
1634 cmd = u"sw_interface_rx_placement_dump"
1635 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1636 with PapiSocketExecutor(node) as papi_exec:
1637 for ifc in node[u"interfaces"].values():
1638 if ifc[u"vpp_sw_index"] is not None:
1639 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1640 details = papi_exec.get_details(err_msg)
1641 return sorted(details, key=lambda k: k[u"sw_if_index"])
1644 def vpp_sw_interface_set_rx_placement(
1645 node, sw_if_index, queue_id, worker_id):
1646 """Set interface RX placement to worker on node.
1648 :param node: Node to run command on.
1649 :param sw_if_index: VPP SW interface index.
1650 :param queue_id: VPP interface queue ID.
1651 :param worker_id: VPP worker ID (indexing from 0).
1653 :type sw_if_index: int
1655 :type worker_id: int
1656 :raises RuntimeError: If failed to run command on host or if no API
1659 cmd = u"sw_interface_set_rx_placement"
1660 err_msg = f"Failed to set interface RX placement to worker " \
1661 f"on host {node[u'host']}!"
1663 sw_if_index=sw_if_index,
1665 worker_id=worker_id,
1668 with PapiSocketExecutor(node) as papi_exec:
1669 papi_exec.add(cmd, **args).get_reply(err_msg)
1672 def vpp_round_robin_rx_placement(node, prefix):
1673 """Set Round Robin interface RX placement on all worker threads
1676 :param node: Topology nodes.
1677 :param prefix: Interface name prefix.
1682 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1685 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1686 for interface in node[u"interfaces"].values():
1687 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1688 and prefix in interface[u"name"]:
1689 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1690 node, placement[u"sw_if_index"], placement[u"queue_id"],
1691 worker_id % worker_cnt
1696 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1697 """Set Round Robin interface RX placement on all worker threads
1700 :param nodes: Topology nodes.
1701 :param prefix: Interface name prefix.
1705 for node in nodes.values():
1706 if node[u"type"] == NodeType.DUT:
1707 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)