1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
114 """General utilities for managing interfaces"""
117 def pci_to_int(pci_str):
118 """Convert PCI address from string format (0000:18:0a.0) to
119 integer representation (169345024).
121 :param pci_str: PCI address in string representation.
123 :returns: Integer representation of PCI address.
126 pci = list(pci_str.split(u":")[0:2])
127 pci.extend(pci_str.split(u":")[2].split(u"."))
129 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
130 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
133 def pci_to_eth(node, pci_str):
134 """Convert PCI address on DUT to Linux ethernet name.
136 :param node: DUT node
137 :param pci_str: PCI address.
140 :returns: Ethernet name.
143 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
145 stdout, _ = exec_cmd_no_error(node, cmd)
147 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
149 return stdout.strip()
152 def get_interface_index(node, interface):
153 """Get interface sw_if_index from topology file.
155 :param node: Node where the interface is.
156 :param interface: Numeric index or name string of a specific interface.
158 :type interface: str or int
159 :returns: SW interface index.
163 sw_if_index = int(interface)
165 sw_if_index = Topology.get_interface_sw_index(node, interface)
166 if sw_if_index is None:
168 Topology.get_interface_sw_index_by_name(node, interface)
169 except TypeError as err:
170 raise TypeError(f"Wrong interface format {interface}") from err
175 def set_interface_state(node, interface, state, if_type=u"key"):
176 """Set interface state on a node.
178 Function can be used for DUTs as well as for TGs.
180 :param node: Node where the interface is.
181 :param interface: Interface key or sw_if_index or name.
182 :param state: One of 'up' or 'down'.
183 :param if_type: Interface type
185 :type interface: str or int
189 :raises ValueError: If the interface type is unknown.
190 :raises ValueError: If the state of interface is unexpected.
191 :raises ValueError: If the node has an unknown node type.
193 if if_type == u"key":
194 if isinstance(interface, str):
195 sw_if_index = Topology.get_interface_sw_index(node, interface)
196 iface_name = Topology.get_interface_name(node, interface)
198 sw_if_index = interface
199 elif if_type == u"name":
200 iface_key = Topology.get_interface_by_name(node, interface)
201 if iface_key is not None:
202 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
203 iface_name = interface
205 raise ValueError(f"Unknown if_type: {if_type}")
207 if node[u"type"] == NodeType.DUT:
209 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
210 elif state == u"down":
213 raise ValueError(f"Unexpected interface state: {state}")
214 cmd = u"sw_interface_set_flags"
215 err_msg = f"Failed to set interface state on host {node[u'host']}"
217 sw_if_index=int(sw_if_index),
220 with PapiSocketExecutor(node) as papi_exec:
221 papi_exec.add(cmd, **args).get_reply(err_msg)
222 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
223 cmd = f"ip link set {iface_name} {state}"
224 exec_cmd_no_error(node, cmd, sudo=True)
227 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
231 def set_interface_ethernet_mtu(node, iface_key, mtu):
232 """Set Ethernet MTU for specified interface.
234 Function can be used only for TGs.
236 :param node: Node where the interface is.
237 :param iface_key: Interface key from topology file.
238 :param mtu: MTU to set.
243 :raises ValueError: If the node type is "DUT".
244 :raises ValueError: If the node has an unknown node type.
246 if node[u"type"] == NodeType.DUT:
247 msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \
248 f"on DUT nodes not supported"
249 elif node[u"type"] != NodeType.TG:
250 msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
252 iface_name = Topology.get_interface_name(node, iface_key)
253 cmd = f"ip link set {iface_name} mtu {mtu}"
254 exec_cmd_no_error(node, cmd, sudo=True)
256 raise ValueError(msg)
259 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
260 """Set default Ethernet MTU on all interfaces on node.
262 Function can be used only for TGs.
264 :param node: Node where to set default MTU.
268 for ifc in node[u"interfaces"]:
269 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
272 def vpp_set_interface_mtu(node, interface, mtu=9200):
273 """Set Ethernet MTU on interface.
275 :param node: VPP node.
276 :param interface: Interface to setup MTU. Default: 9200.
277 :param mtu: Ethernet MTU size in Bytes.
279 :type interface: str or int
282 if isinstance(interface, str):
283 sw_if_index = Topology.get_interface_sw_index(node, interface)
285 sw_if_index = interface
287 cmd = u"hw_interface_set_mtu"
288 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
290 sw_if_index=sw_if_index,
294 with PapiSocketExecutor(node) as papi_exec:
295 papi_exec.add(cmd, **args).get_reply(err_msg)
296 except AssertionError as err:
297 # TODO: Make failure tolerance optional.
298 logger.debug(f"Setting MTU failed. Expected?\n{err}")
301 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
302 """Set Ethernet MTU on all interfaces.
304 :param node: VPP node.
305 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
309 for interface in node[u"interfaces"]:
310 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
313 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
314 """Set Ethernet MTU on all interfaces on all DUTs.
316 :param nodes: VPP nodes.
317 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
321 for node in nodes.values():
322 if node[u"type"] == NodeType.DUT:
323 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
326 def vpp_node_interfaces_ready_wait(node, retries=15):
327 """Wait until all interfaces with admin-up are in link-up state.
329 :param node: Node to wait on.
330 :param retries: Number of retries to check interface status (optional,
335 :raises RuntimeError: If any interface is not in link-up state after
336 defined number of retries.
338 for _ in range(0, retries):
340 out = InterfaceUtil.vpp_get_interface_data(node)
341 for interface in out:
342 if interface.get(u"flags") == 1:
343 not_ready.append(interface.get(u"interface_name"))
346 f"Interfaces still not in link-up state:\n{not_ready}"
352 err = f"Timeout, interfaces not up:\n{not_ready}" \
353 if u"not_ready" in locals() else u"No check executed!"
354 raise RuntimeError(err)
357 def all_vpp_interfaces_ready_wait(nodes, retries=15):
358 """Wait until all interfaces with admin-up are in link-up state for all
359 nodes in the topology.
361 :param nodes: Nodes in the topology.
362 :param retries: Number of retries to check interface status (optional,
368 for node in nodes.values():
369 if node[u"type"] == NodeType.DUT:
370 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
373 def vpp_get_interface_data(node, interface=None):
374 """Get all interface data from a VPP node. If a name or
375 sw_interface_index is provided, return only data for the matching
378 :param node: VPP node to get interface data from.
379 :param interface: Numeric index or name string of a specific interface.
381 :type interface: int or str
382 :returns: List of dictionaries containing data for each interface, or a
383 single dictionary for the specified interface.
385 :raises TypeError: if the data type of interface is neither basestring
388 def process_if_dump(if_dump):
389 """Process interface dump.
391 :param if_dump: Interface dump.
393 :returns: Processed interface dump.
396 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
397 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
398 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
399 if_dump[u"flags"] = if_dump[u"flags"].value
400 if_dump[u"type"] = if_dump[u"type"].value
401 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
402 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
403 if hasattr(if_dump[u"sub_if_flags"], u"value") \
404 else int(if_dump[u"sub_if_flags"])
408 if interface is not None:
409 if isinstance(interface, str):
410 param = u"interface_name"
411 elif isinstance(interface, int):
412 param = u"sw_if_index"
414 raise TypeError(f"Wrong interface format {interface}")
418 cmd = u"sw_interface_dump"
420 name_filter_valid=False,
423 err_msg = f"Failed to get interface dump on host {node[u'host']}"
425 with PapiSocketExecutor(node) as papi_exec:
426 details = papi_exec.add(cmd, **args).get_details(err_msg)
427 logger.debug(f"Received data:\n{details!r}")
429 data = list() if interface is None else dict()
431 if interface is None:
432 data.append(process_if_dump(dump))
433 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
434 data = process_if_dump(dump)
437 logger.debug(f"Interface data:\n{data}")
441 def vpp_get_interface_name(node, sw_if_index):
442 """Get interface name for the given SW interface index from actual
445 :param node: VPP node to get interface data from.
446 :param sw_if_index: SW interface index of the specific interface.
448 :type sw_if_index: int
449 :returns: Name of the given interface.
452 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
453 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
454 if_data = InterfaceUtil.vpp_get_interface_data(
455 node, if_data[u"sup_sw_if_index"]
458 return if_data.get(u"interface_name")
461 def vpp_get_interface_sw_index(node, interface_name):
462 """Get interface name for the given SW interface index from actual
465 :param node: VPP node to get interface data from.
466 :param interface_name: Interface name.
468 :type interface_name: str
469 :returns: Name of the given interface.
472 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
474 return if_data.get(u"sw_if_index")
477 def vpp_get_interface_mac(node, interface):
478 """Get MAC address for the given interface from actual interface dump.
480 :param node: VPP node to get interface data from.
481 :param interface: Numeric index or name string of a specific interface.
483 :type interface: int or str
484 :returns: MAC address.
487 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
488 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
489 if_data = InterfaceUtil.vpp_get_interface_data(
490 node, if_data[u"sup_sw_if_index"])
492 return if_data.get(u"l2_address")
495 def vpp_set_interface_mac(node, interface, mac):
496 """Set MAC address for the given interface.
498 :param node: VPP node to set interface MAC.
499 :param interface: Numeric index or name string of a specific interface.
500 :param mac: Required MAC address.
502 :type interface: int or str
505 cmd = u"sw_interface_set_mac_address"
507 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
508 mac_address=L2Util.mac_to_bin(mac)
510 err_msg = f"Failed to set MAC address of interface {interface}" \
511 f"on host {node[u'host']}"
512 with PapiSocketExecutor(node) as papi_exec:
513 papi_exec.add(cmd, **args).get_reply(err_msg)
516 def tg_set_interface_driver(node, pci_addr, driver):
517 """Set interface driver on the TG node.
519 :param node: Node to set interface driver on (must be TG node).
520 :param pci_addr: PCI address of the interface.
521 :param driver: Driver name.
525 :raises RuntimeError: If unbinding from the current driver fails.
526 :raises RuntimeError: If binding to the new driver fails.
528 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
529 if old_driver == driver:
535 # Unbind from current driver
536 if old_driver is not None:
537 cmd = f"sh -c \"echo {pci_addr} > " \
538 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
539 ret_code, _, _ = ssh.exec_command_sudo(cmd)
540 if int(ret_code) != 0:
541 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
543 # Bind to the new driver
544 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
545 ret_code, _, _ = ssh.exec_command_sudo(cmd)
546 if int(ret_code) != 0:
547 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
550 def tg_get_interface_driver(node, pci_addr):
551 """Get interface driver from the TG node.
553 :param node: Node to get interface driver on (must be TG node).
554 :param pci_addr: PCI address of the interface.
557 :returns: Interface driver or None if not found.
559 :raises RuntimeError: If PCI rescan or lspci command execution failed.
561 return DUTSetup.get_pci_dev_driver(node, pci_addr)
564 def tg_set_interfaces_default_driver(node):
565 """Set interfaces default driver specified in topology yaml file.
567 :param node: Node to setup interfaces driver on (must be TG node).
570 for interface in node[u"interfaces"].values():
571 InterfaceUtil.tg_set_interface_driver(
572 node, interface[u"pci_address"], interface[u"driver"]
576 def update_vpp_interface_data_on_node(node):
577 """Update vpp generated interface data for a given node in DICT__nodes.
579 Updates interface names, software if index numbers and any other details
580 generated specifically by vpp that are unknown before testcase run.
581 It does this by dumping interface list from all devices using python
582 api, and pairing known information from topology (mac address) to state
585 :param node: Node selected from DICT__nodes.
588 interface_list = InterfaceUtil.vpp_get_interface_data(node)
589 interface_dict = dict()
590 for ifc in interface_list:
591 interface_dict[ifc[u"l2_address"]] = ifc
593 for if_name, if_data in node[u"interfaces"].items():
594 ifc_dict = interface_dict.get(if_data[u"mac_address"])
595 if ifc_dict is not None:
596 if_data[u"name"] = ifc_dict[u"interface_name"]
597 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
598 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
600 f"Interface {if_name} found by MAC "
601 f"{if_data[u'mac_address']}"
605 f"Interface {if_name} not found by MAC "
606 f"{if_data[u'mac_address']}"
608 if_data[u"vpp_sw_index"] = None
611 def update_nic_interface_names(node):
612 """Update interface names based on nic type and PCI address.
614 This method updates interface names in the same format as VPP does.
616 :param node: Node dictionary.
619 for ifc in node[u"interfaces"].values():
620 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
621 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
622 f"{int(if_pci[3], 16):x}"
623 if ifc[u"model"] == u"Intel-XL710":
624 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
625 elif ifc[u"model"] == u"Intel-X710":
626 ifc[u"name"] = f"TenGigabitEthernet{loc}"
627 elif ifc[u"model"] == u"Intel-X520-DA2":
628 ifc[u"name"] = f"TenGigabitEthernet{loc}"
629 elif ifc[u"model"] == u"Cisco-VIC-1385":
630 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
631 elif ifc[u"model"] == u"Cisco-VIC-1227":
632 ifc[u"name"] = f"TenGigabitEthernet{loc}"
634 ifc[u"name"] = f"UnknownEthernet{loc}"
637 def update_nic_interface_names_on_all_duts(nodes):
638 """Update interface names based on nic type and PCI address on all DUTs.
640 This method updates interface names in the same format as VPP does.
642 :param nodes: Topology nodes.
645 for node in nodes.values():
646 if node[u"type"] == NodeType.DUT:
647 InterfaceUtil.update_nic_interface_names(node)
650 def update_tg_interface_data_on_node(node):
651 """Update interface name for TG/linux node in DICT__nodes.
654 # for dev in `ls /sys/class/net/`;
655 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
656 "52:54:00:9f:82:63": "eth0"
657 "52:54:00:77:ae:a9": "eth1"
658 "52:54:00:e1:8a:0f": "eth2"
659 "00:00:00:00:00:00": "lo"
661 :param node: Node selected from DICT__nodes.
663 :raises RuntimeError: If getting of interface name and MAC fails.
665 # First setup interface driver specified in yaml file
666 InterfaceUtil.tg_set_interfaces_default_driver(node)
668 # Get interface names
672 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
673 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
675 ret_code, stdout, _ = ssh.exec_command(cmd)
676 if int(ret_code) != 0:
677 raise RuntimeError(u"Get interface name and MAC failed")
678 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
680 interfaces = JsonParser().parse_data(tmp)
681 for interface in node[u"interfaces"].values():
682 name = interfaces.get(interface[u"mac_address"])
685 interface[u"name"] = name
688 def iface_update_numa_node(node):
689 """For all interfaces from topology file update numa node based on
690 information from the node.
692 :param node: Node from topology.
695 :raises ValueError: If numa node ia less than 0.
696 :raises RuntimeError: If update of numa node failed.
699 for if_key in Topology.get_node_interfaces(node):
700 if_pci = Topology.get_interface_pci_addr(node, if_key)
702 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
704 ret, out, _ = ssh.exec_command(cmd)
707 numa_node = 0 if int(out) < 0 else int(out)
710 f"Reading numa location failed for: {if_pci}"
713 Topology.set_interface_numa_node(
714 node, if_key, numa_node
718 raise RuntimeError(f"Update numa node failed for: {if_pci}")
721 def update_all_interface_data_on_all_nodes(
722 nodes, skip_tg=False, skip_vpp=False):
723 """Update interface names on all nodes in DICT__nodes.
725 This method updates the topology dictionary by querying interface lists
726 of all nodes mentioned in the topology dictionary.
728 :param nodes: Nodes in the topology.
729 :param skip_tg: Skip TG node.
730 :param skip_vpp: Skip VPP node.
735 for node in nodes.values():
736 if node[u"type"] == NodeType.DUT and not skip_vpp:
737 InterfaceUtil.update_vpp_interface_data_on_node(node)
738 elif node[u"type"] == NodeType.TG and not skip_tg:
739 InterfaceUtil.update_tg_interface_data_on_node(node)
740 InterfaceUtil.iface_update_numa_node(node)
743 def create_vlan_subinterface(node, interface, vlan):
744 """Create VLAN sub-interface on node.
746 :param node: Node to add VLAN subinterface on.
747 :param interface: Interface name or index on which create VLAN
749 :param vlan: VLAN ID of the subinterface to be created.
751 :type interface: str on int
753 :returns: Name and index of created subinterface.
755 :raises RuntimeError: if it is unable to create VLAN subinterface on the
756 node or interface cannot be converted.
758 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
760 cmd = u"create_vlan_subif"
762 sw_if_index=sw_if_index,
765 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
767 with PapiSocketExecutor(node) as papi_exec:
768 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
770 if_key = Topology.add_new_port(node, u"vlan_subif")
771 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
772 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
773 Topology.update_interface_name(node, if_key, ifc_name)
775 return f"{interface}.{vlan}", sw_if_index
778 def create_vxlan_interface(node, vni, source_ip, destination_ip):
779 """Create VXLAN interface and return sw if index of created interface.
781 :param node: Node where to create VXLAN interface.
782 :param vni: VXLAN Network Identifier.
783 :param source_ip: Source IP of a VXLAN Tunnel End Point.
784 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
788 :type destination_ip: str
789 :returns: SW IF INDEX of created interface.
791 :raises RuntimeError: if it is unable to create VxLAN interface on the
794 cmd = u"vxlan_add_del_tunnel"
797 instance=Constants.BITWISE_NON_ZERO,
798 src_address=IPAddress.create_ip_address_object(
799 ip_address(source_ip)
801 dst_address=IPAddress.create_ip_address_object(
802 ip_address(destination_ip)
804 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
806 decap_next_index=Constants.BITWISE_NON_ZERO,
809 err_msg = f"Failed to create VXLAN tunnel interface " \
810 f"on host {node[u'host']}"
811 with PapiSocketExecutor(node) as papi_exec:
812 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
814 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
815 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
816 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
817 Topology.update_interface_name(node, if_key, ifc_name)
822 def set_vxlan_bypass(node, interface=None):
823 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
825 By adding the IPv4 vxlan-bypass graph node to an interface, the node
826 checks for and validate input vxlan packet and bypass ip4-lookup,
827 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
828 This node will cause extra overhead to for non-vxlan packets which is
831 :param node: Node where to set VXLAN bypass.
832 :param interface: Numeric index or name string of a specific interface.
834 :type interface: int or str
835 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
837 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
839 cmd = u"sw_interface_set_vxlan_bypass"
842 sw_if_index=sw_if_index,
845 err_msg = f"Failed to set VXLAN bypass on interface " \
846 f"on host {node[u'host']}"
847 with PapiSocketExecutor(node) as papi_exec:
848 papi_exec.add(cmd, **args).get_replies(err_msg)
851 def vxlan_dump(node, interface=None):
852 """Get VxLAN data for the given interface.
854 :param node: VPP node to get interface data from.
855 :param interface: Numeric index or name string of a specific interface.
856 If None, information about all VxLAN interfaces is returned.
858 :type interface: int or str
859 :returns: Dictionary containing data for the given VxLAN interface or if
860 interface=None, the list of dictionaries with all VxLAN interfaces.
862 :raises TypeError: if the data type of interface is neither basestring
865 def process_vxlan_dump(vxlan_dump):
866 """Process vxlan dump.
868 :param vxlan_dump: Vxlan interface dump.
869 :type vxlan_dump: dict
870 :returns: Processed vxlan interface dump.
873 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
874 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
877 if interface is not None:
878 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
880 sw_if_index = int(Constants.BITWISE_NON_ZERO)
882 cmd = u"vxlan_tunnel_dump"
884 sw_if_index=sw_if_index
886 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
888 with PapiSocketExecutor(node) as papi_exec:
889 details = papi_exec.add(cmd, **args).get_details(err_msg)
891 data = list() if interface is None else dict()
893 if interface is None:
894 data.append(process_vxlan_dump(dump))
895 elif dump[u"sw_if_index"] == sw_if_index:
896 data = process_vxlan_dump(dump)
899 logger.debug(f"VXLAN data:\n{data}")
903 def create_subinterface(
904 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
906 """Create sub-interface on node. It is possible to set required
907 sub-interface type and VLAN tag(s).
909 :param node: Node to add sub-interface.
910 :param interface: Interface name on which create sub-interface.
911 :param sub_id: ID of the sub-interface to be created.
912 :param outer_vlan_id: Optional outer VLAN ID.
913 :param inner_vlan_id: Optional inner VLAN ID.
914 :param type_subif: Optional type of sub-interface. Values supported by
915 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
918 :type interface: str or int
920 :type outer_vlan_id: int
921 :type inner_vlan_id: int
922 :type type_subif: str
923 :returns: Name and index of created sub-interface.
925 :raises RuntimeError: If it is not possible to create sub-interface.
927 subif_types = type_subif.split()
930 if u"no_tags" in subif_types:
931 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
932 if u"one_tag" in subif_types:
933 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
934 if u"two_tags" in subif_types:
935 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
936 if u"dot1ad" in subif_types:
937 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
938 if u"exact_match" in subif_types:
939 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
940 if u"default_sub" in subif_types:
941 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
942 if type_subif == u"default_sub":
943 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
944 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
946 cmd = u"create_subif"
948 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
950 sub_if_flags=flags.value if hasattr(flags, u"value")
952 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
953 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
955 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
956 with PapiSocketExecutor(node) as papi_exec:
957 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
959 if_key = Topology.add_new_port(node, u"subinterface")
960 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
961 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
962 Topology.update_interface_name(node, if_key, ifc_name)
964 return f"{interface}.{sub_id}", sw_if_index
967 def create_gre_tunnel_interface(node, source_ip, destination_ip):
968 """Create GRE tunnel interface on node.
970 :param node: VPP node to add tunnel interface.
971 :param source_ip: Source of the GRE tunnel.
972 :param destination_ip: Destination of the GRE tunnel.
975 :type destination_ip: str
976 :returns: Name and index of created GRE tunnel interface.
978 :raises RuntimeError: If unable to create GRE tunnel interface.
980 cmd = u"gre_tunnel_add_del"
983 instance=Constants.BITWISE_NON_ZERO,
985 dst=str(destination_ip),
993 err_msg = f"Failed to create GRE tunnel interface " \
994 f"on host {node[u'host']}"
995 with PapiSocketExecutor(node) as papi_exec:
996 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
998 if_key = Topology.add_new_port(node, u"gre_tunnel")
999 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1000 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1001 Topology.update_interface_name(node, if_key, ifc_name)
1003 return ifc_name, sw_if_index
1006 def vpp_create_loopback(node, mac=None):
1007 """Create loopback interface on VPP node.
1009 :param node: Node to create loopback interface on.
1010 :param mac: Optional MAC address for loopback interface.
1013 :returns: SW interface index.
1015 :raises RuntimeError: If it is not possible to create loopback on the
1018 cmd = u"create_loopback"
1020 mac_address=L2Util.mac_to_bin(mac) if mac else 0
1022 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1023 with PapiSocketExecutor(node) as papi_exec:
1024 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1026 if_key = Topology.add_new_port(node, u"loopback")
1027 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1028 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1029 Topology.update_interface_name(node, if_key, ifc_name)
1031 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1032 Topology.update_interface_mac_address(node, if_key, mac)
1037 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1038 """Create bond interface on VPP node.
1040 :param node: DUT node from topology.
1041 :param mode: Link bonding mode.
1042 :param load_balance: Load balance (optional, valid for xor and lacp
1043 modes, otherwise ignored).
1044 :param mac: MAC address to assign to the bond interface (optional).
1047 :type load_balance: str
1049 :returns: Interface key (name) in topology.
1051 :raises RuntimeError: If it is not possible to create bond interface on
1054 cmd = u"bond_create"
1056 id=int(Constants.BITWISE_NON_ZERO),
1057 use_custom_mac=bool(mac is not None),
1058 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1061 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1063 lb=0 if load_balance is None else getattr(
1064 LinkBondLoadBalanceAlgo,
1065 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1069 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1070 with PapiSocketExecutor(node) as papi_exec:
1071 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1073 InterfaceUtil.add_eth_interface(
1074 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1076 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1081 def add_eth_interface(
1082 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1084 """Add ethernet interface to current topology.
1086 :param node: DUT node from topology.
1087 :param ifc_name: Name of the interface.
1088 :param sw_if_index: SW interface index.
1089 :param ifc_pfx: Interface key prefix.
1090 :param host_if_key: Host interface key from topology file.
1093 :type sw_if_index: int
1095 :type host_if_key: str
1097 if_key = Topology.add_new_port(node, ifc_pfx)
1099 if ifc_name and sw_if_index is None:
1100 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1102 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1103 if sw_if_index and ifc_name is None:
1104 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1105 Topology.update_interface_name(node, if_key, ifc_name)
1106 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1107 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1108 if host_if_key is not None:
1109 Topology.set_interface_numa_node(
1110 node, if_key, Topology.get_interface_numa_node(
1114 Topology.update_interface_pci_address(
1115 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1119 def vpp_create_avf_interface(
1120 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1121 """Create AVF interface on VPP node.
1123 :param node: DUT node from topology.
1124 :param if_key: Interface key from topology file of interface
1125 to be bound to i40evf driver.
1126 :param num_rx_queues: Number of RX queues.
1127 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1128 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1131 :type num_rx_queues: int
1134 :returns: AVF interface key (name) in topology.
1136 :raises RuntimeError: If it is not possible to create AVF interface on
1139 PapiSocketExecutor.run_cli_cmd(
1140 node, u"set logging class avf level debug"
1144 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1146 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1148 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1152 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1153 with PapiSocketExecutor(node) as papi_exec:
1154 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1156 InterfaceUtil.add_eth_interface(
1157 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1161 return Topology.get_interface_by_sw_index(node, sw_if_index)
1164 def vpp_create_rdma_interface(
1165 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1167 """Create RDMA interface on VPP node.
1169 :param node: DUT node from topology.
1170 :param if_key: Physical interface key from topology file of interface
1171 to be bound to rdma-core driver.
1172 :param num_rx_queues: Number of RX queues.
1173 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1174 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1175 :param mode: RDMA interface mode - auto/ibv/dv.
1178 :type num_rx_queues: int
1182 :returns: Interface key (name) in topology file.
1184 :raises RuntimeError: If it is not possible to create RDMA interface on
1187 PapiSocketExecutor.run_cli_cmd(
1188 node, u"set logging class avf level debug"
1191 cmd = u"rdma_create"
1192 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1194 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1195 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1196 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1199 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1201 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1202 with PapiSocketExecutor(node) as papi_exec:
1203 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1205 InterfaceUtil.vpp_set_interface_mac(
1206 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1208 InterfaceUtil.add_eth_interface(
1209 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1213 return Topology.get_interface_by_sw_index(node, sw_if_index)
1216 def vpp_enslave_physical_interface(node, interface, bond_if):
1217 """Enslave physical interface to bond interface on VPP node.
1219 :param node: DUT node from topology.
1220 :param interface: Physical interface key from topology file.
1221 :param bond_if: Load balance
1223 :type interface: str
1225 :raises RuntimeError: If it is not possible to enslave physical
1226 interface to bond interface on the node.
1228 cmd = u"bond_enslave"
1230 sw_if_index=Topology.get_interface_sw_index(node, interface),
1231 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1233 is_long_timeout=False
1235 err_msg = f"Failed to enslave physical interface {interface} to bond " \
1236 f"interface {bond_if} on host {node[u'host']}"
1237 with PapiSocketExecutor(node) as papi_exec:
1238 papi_exec.add(cmd, **args).get_reply(err_msg)
1241 def vpp_show_bond_data_on_node(node, verbose=False):
1242 """Show (detailed) bond information on VPP node.
1244 :param node: DUT node from topology.
1245 :param verbose: If detailed information is required or not.
1249 cmd = u"sw_interface_bond_dump"
1250 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1252 data = f"Bond data on node {node[u'host']}:\n"
1253 with PapiSocketExecutor(node) as papi_exec:
1254 details = papi_exec.add(cmd).get_details(err_msg)
1256 for bond in details:
1257 data += f"{bond[u'interface_name']}\n"
1258 data += u" mode: {m}\n".format(
1259 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1261 data += u" load balance: {lb}\n".format(
1262 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1264 data += f" number of active slaves: {bond[u'active_slaves']}\n"
1266 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1267 node, Topology.get_interface_by_sw_index(
1268 node, bond[u"sw_if_index"]
1271 for slave in slave_data:
1272 if not slave[u"is_passive"]:
1273 data += f" {slave[u'interface_name']}\n"
1274 data += f" number of slaves: {bond[u'slaves']}\n"
1276 for slave in slave_data:
1277 data += f" {slave[u'interface_name']}\n"
1278 data += f" interface id: {bond[u'id']}\n"
1279 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1283 def vpp_bond_slave_dump(node, interface):
1284 """Get bond interface slave(s) data on VPP node.
1286 :param node: DUT node from topology.
1287 :param interface: Physical interface key from topology file.
1289 :type interface: str
1290 :returns: Bond slave interface data.
1293 cmd = u"sw_interface_slave_dump"
1295 sw_if_index=Topology.get_interface_sw_index(node, interface)
1297 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1299 with PapiSocketExecutor(node) as papi_exec:
1300 details = papi_exec.add(cmd, **args).get_details(err_msg)
1302 logger.debug(f"Slave data:\n{details}")
1306 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1307 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1309 :param nodes: Nodes in the topology.
1310 :param verbose: If detailed information is required or not.
1314 for node_data in nodes.values():
1315 if node_data[u"type"] == NodeType.DUT:
1316 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1319 def vpp_enable_input_acl_interface(
1320 node, interface, ip_version, table_index):
1321 """Enable input acl on interface.
1323 :param node: VPP node to setup interface for input acl.
1324 :param interface: Interface to setup input acl.
1325 :param ip_version: Version of IP protocol.
1326 :param table_index: Classify table index.
1328 :type interface: str or int
1329 :type ip_version: str
1330 :type table_index: int
1332 cmd = u"input_acl_set_interface"
1334 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1335 ip4_table_index=table_index if ip_version == u"ip4"
1336 else Constants.BITWISE_NON_ZERO,
1337 ip6_table_index=table_index if ip_version == u"ip6"
1338 else Constants.BITWISE_NON_ZERO,
1339 l2_table_index=table_index if ip_version == u"l2"
1340 else Constants.BITWISE_NON_ZERO,
1342 err_msg = f"Failed to enable input acl on interface {interface}"
1343 with PapiSocketExecutor(node) as papi_exec:
1344 papi_exec.add(cmd, **args).get_reply(err_msg)
1347 def get_interface_classify_table(node, interface):
1348 """Get name of classify table for the given interface.
1350 TODO: Move to Classify.py.
1352 :param node: VPP node to get data from.
1353 :param interface: Name or sw_if_index of a specific interface.
1355 :type interface: str or int
1356 :returns: Classify table name.
1359 if isinstance(interface, str):
1360 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1362 sw_if_index = interface
1364 cmd = u"classify_table_by_interface"
1366 sw_if_index=sw_if_index
1368 err_msg = f"Failed to get classify table name by interface {interface}"
1369 with PapiSocketExecutor(node) as papi_exec:
1370 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1375 def get_sw_if_index(node, interface_name):
1376 """Get sw_if_index for the given interface from actual interface dump.
1378 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1380 :param node: VPP node to get interface data from.
1381 :param interface_name: Name of the specific interface.
1383 :type interface_name: str
1384 :returns: sw_if_index of the given interface.
1387 interface_data = InterfaceUtil.vpp_get_interface_data(
1388 node, interface=interface_name
1390 return interface_data.get(u"sw_if_index")
1393 def vxlan_gpe_dump(node, interface_name=None):
1394 """Get VxLAN GPE data for the given interface.
1396 :param node: VPP node to get interface data from.
1397 :param interface_name: Name of the specific interface. If None,
1398 information about all VxLAN GPE interfaces is returned.
1400 :type interface_name: str
1401 :returns: Dictionary containing data for the given VxLAN GPE interface
1402 or if interface=None, the list of dictionaries with all VxLAN GPE
1404 :rtype: dict or list
1406 def process_vxlan_gpe_dump(vxlan_dump):
1407 """Process vxlan_gpe dump.
1409 :param vxlan_dump: Vxlan_gpe nterface dump.
1410 :type vxlan_dump: dict
1411 :returns: Processed vxlan_gpe interface dump.
1414 if vxlan_dump[u"is_ipv6"]:
1415 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1416 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1418 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1419 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1422 if interface_name is not None:
1423 sw_if_index = InterfaceUtil.get_interface_index(
1424 node, interface_name
1427 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1429 cmd = u"vxlan_gpe_tunnel_dump"
1431 sw_if_index=sw_if_index
1433 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1434 with PapiSocketExecutor(node) as papi_exec:
1435 details = papi_exec.add(cmd, **args).get_details(err_msg)
1437 data = list() if interface_name is None else dict()
1438 for dump in details:
1439 if interface_name is None:
1440 data.append(process_vxlan_gpe_dump(dump))
1441 elif dump[u"sw_if_index"] == sw_if_index:
1442 data = process_vxlan_gpe_dump(dump)
1445 logger.debug(f"VXLAN-GPE data:\n{data}")
1449 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1450 """Assign VPP interface to specific VRF/FIB table.
1452 :param node: VPP node where the FIB and interface are located.
1453 :param interface: Interface to be assigned to FIB.
1454 :param table_id: VRF table ID.
1455 :param ipv6: Assign to IPv6 table. Default False.
1457 :type interface: str or int
1461 cmd = u"sw_interface_set_table"
1463 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1465 vrf_id=int(table_id)
1467 err_msg = f"Failed to assign interface {interface} to FIB table"
1468 with PapiSocketExecutor(node) as papi_exec:
1469 papi_exec.add(cmd, **args).get_reply(err_msg)
1472 def set_linux_interface_mac(
1473 node, interface, mac, namespace=None, vf_id=None):
1474 """Set MAC address for interface in linux.
1476 :param node: Node where to execute command.
1477 :param interface: Interface in namespace.
1478 :param mac: MAC to be assigned to interface.
1479 :param namespace: Execute command in namespace. Optional
1480 :param vf_id: Virtual Function id. Optional
1482 :type interface: str
1484 :type namespace: str
1487 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1488 else f"address {mac}"
1489 ns_str = f"ip netns exec {namespace}" if namespace else u""
1491 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1492 exec_cmd_no_error(node, cmd, sudo=True)
1495 def set_linux_interface_trust_on(
1496 node, interface, namespace=None, vf_id=None):
1497 """Set trust on (promisc) for interface in linux.
1499 :param node: Node where to execute command.
1500 :param interface: Interface in namespace.
1501 :param namespace: Execute command in namespace. Optional
1502 :param vf_id: Virtual Function id. Optional
1504 :type interface: str
1505 :type namespace: str
1508 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1509 ns_str = f"ip netns exec {namespace}" if namespace else u""
1511 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1512 exec_cmd_no_error(node, cmd, sudo=True)
1515 def set_linux_interface_spoof_off(
1516 node, interface, namespace=None, vf_id=None):
1517 """Set spoof off for interface in linux.
1519 :param node: Node where to execute command.
1520 :param interface: Interface in namespace.
1521 :param namespace: Execute command in namespace. Optional
1522 :param vf_id: Virtual Function id. Optional
1524 :type interface: str
1525 :type namespace: str
1528 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1530 ns_str = f"ip netns exec {namespace}" if namespace else u""
1532 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1533 exec_cmd_no_error(node, cmd, sudo=True)
1536 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
1537 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1538 driver testing on DUT.
1540 :param node: DUT node.
1541 :param ifc_key: Interface key from topology file.
1542 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1543 :param osi_layer: OSI Layer type to initialize TG with.
1544 Default value "L2" sets linux interface spoof off.
1548 :type osi_layer: str
1549 :returns: Virtual Function topology interface keys.
1551 :raises RuntimeError: If a reason preventing initialization is found.
1553 # Read PCI address and driver.
1554 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1555 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1556 uio_driver = Topology.get_uio_driver(node)
1557 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1558 if kernel_driver not in (u"i40e", u"i40evf"):
1560 f"AVF needs i40e-compatible driver, not {kernel_driver} "
1561 f"at node {node[u'host']} ifc {ifc_key}"
1563 current_driver = DUTSetup.get_pci_dev_driver(
1564 node, pf_pci_addr.replace(u":", r"\:"))
1566 VPPUtil.stop_vpp_service(node)
1567 if current_driver != kernel_driver:
1568 # PCI device must be re-bound to kernel driver before creating VFs.
1569 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1570 # Stop VPP to prevent deadlock.
1571 # Unbind from current driver.
1572 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1573 # Bind to kernel driver.
1574 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1576 # Initialize PCI VFs.
1577 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1580 # Set MAC address and bind each virtual function to uio driver.
1581 for vf_id in range(numvfs):
1582 vf_mac_addr = u":".join(
1583 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1584 pf_mac_addr[5], f"{vf_id:02x}"
1588 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1589 InterfaceUtil.set_linux_interface_trust_on(
1590 node, pf_dev, vf_id=vf_id
1592 if osi_layer == u"L2":
1593 InterfaceUtil.set_linux_interface_spoof_off(
1594 node, pf_dev, vf_id=vf_id
1596 InterfaceUtil.set_linux_interface_mac(
1597 node, pf_dev, vf_mac_addr, vf_id=vf_id
1600 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1601 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1603 # Add newly created ports into topology file
1604 vf_ifc_name = f"{ifc_key}_vif"
1605 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1606 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1607 Topology.update_interface_name(
1608 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1610 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1611 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1612 Topology.set_interface_numa_node(
1613 node, vf_ifc_key, Topology.get_interface_numa_node(
1617 vf_ifc_keys.append(vf_ifc_key)
1622 def vpp_sw_interface_rx_placement_dump(node):
1623 """Dump VPP interface RX placement on node.
1625 :param node: Node to run command on.
1627 :returns: Thread mapping information as a list of dictionaries.
1630 cmd = u"sw_interface_rx_placement_dump"
1631 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1632 with PapiSocketExecutor(node) as papi_exec:
1633 for ifc in node[u"interfaces"].values():
1634 if ifc[u"vpp_sw_index"] is not None:
1635 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1636 details = papi_exec.get_details(err_msg)
1637 return sorted(details, key=lambda k: k[u"sw_if_index"])
1640 def vpp_sw_interface_set_rx_placement(
1641 node, sw_if_index, queue_id, worker_id):
1642 """Set interface RX placement to worker on node.
1644 :param node: Node to run command on.
1645 :param sw_if_index: VPP SW interface index.
1646 :param queue_id: VPP interface queue ID.
1647 :param worker_id: VPP worker ID (indexing from 0).
1649 :type sw_if_index: int
1651 :type worker_id: int
1652 :raises RuntimeError: If failed to run command on host or if no API
1655 cmd = u"sw_interface_set_rx_placement"
1656 err_msg = f"Failed to set interface RX placement to worker " \
1657 f"on host {node[u'host']}!"
1659 sw_if_index=sw_if_index,
1661 worker_id=worker_id,
1664 with PapiSocketExecutor(node) as papi_exec:
1665 papi_exec.add(cmd, **args).get_reply(err_msg)
1668 def vpp_round_robin_rx_placement(node, prefix):
1669 """Set Round Robin interface RX placement on all worker threads
1672 :param node: Topology nodes.
1673 :param prefix: Interface name prefix.
1678 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1681 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1682 for interface in node[u"interfaces"].values():
1683 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1684 and prefix in interface[u"name"]:
1685 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1686 node, placement[u"sw_if_index"], placement[u"queue_id"],
1687 worker_id % worker_cnt
1692 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1693 """Set Round Robin interface RX placement on all worker threads
1696 :param nodes: Topology nodes.
1697 :param prefix: Interface name prefix.
1701 for node in nodes.values():
1702 if node[u"type"] == NodeType.DUT:
1703 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)