1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
114 """General utilities for managing interfaces"""
117 def pci_to_int(pci_str):
118 """Convert PCI address from string format (0000:18:0a.0) to
119 integer representation (169345024).
121 :param pci_str: PCI address in string representation.
123 :returns: Integer representation of PCI address.
126 pci = list(pci_str.split(u":")[0:2])
127 pci.extend(pci_str.split(u":")[2].split(u"."))
129 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
130 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
133 def pci_to_eth(node, pci_str):
134 """Convert PCI address on DUT to Linux ethernet name.
136 :param node: DUT node
137 :param pci_str: PCI address.
140 :returns: Ethernet name.
143 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
145 stdout, _ = exec_cmd_no_error(node, cmd)
147 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
149 return stdout.strip()
152 def get_interface_index(node, interface):
153 """Get interface sw_if_index from topology file.
155 :param node: Node where the interface is.
156 :param interface: Numeric index or name string of a specific interface.
158 :type interface: str or int
159 :returns: SW interface index.
163 sw_if_index = int(interface)
165 sw_if_index = Topology.get_interface_sw_index(node, interface)
166 if sw_if_index is None:
168 Topology.get_interface_sw_index_by_name(node, interface)
169 except TypeError as err:
170 raise TypeError(f"Wrong interface format {interface}") from err
175 def set_interface_state(node, interface, state, if_type=u"key"):
176 """Set interface state on a node.
178 Function can be used for DUTs as well as for TGs.
180 :param node: Node where the interface is.
181 :param interface: Interface key or sw_if_index or name.
182 :param state: One of 'up' or 'down'.
183 :param if_type: Interface type
185 :type interface: str or int
189 :raises ValueError: If the interface type is unknown.
190 :raises ValueError: If the state of interface is unexpected.
191 :raises ValueError: If the node has an unknown node type.
193 if if_type == u"key":
194 if isinstance(interface, str):
195 sw_if_index = Topology.get_interface_sw_index(node, interface)
196 iface_name = Topology.get_interface_name(node, interface)
198 sw_if_index = interface
199 elif if_type == u"name":
200 iface_key = Topology.get_interface_by_name(node, interface)
201 if iface_key is not None:
202 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
203 iface_name = interface
205 raise ValueError(f"Unknown if_type: {if_type}")
207 if node[u"type"] == NodeType.DUT:
209 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
210 elif state == u"down":
213 raise ValueError(f"Unexpected interface state: {state}")
214 cmd = u"sw_interface_set_flags"
215 err_msg = f"Failed to set interface state on host {node[u'host']}"
217 sw_if_index=int(sw_if_index),
220 with PapiSocketExecutor(node) as papi_exec:
221 papi_exec.add(cmd, **args).get_reply(err_msg)
222 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
223 cmd = f"ip link set {iface_name} {state}"
224 exec_cmd_no_error(node, cmd, sudo=True)
227 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
231 def set_interface_mtu(node, pf_pcis, mtu=9200):
232 """Set Ethernet MTU for specified interface.
234 :param node: Topology node.
235 :param pf_pcis: List of node's interfaces PCI addresses.
236 :param mtu: MTU to set. Default: 9200.
241 for pf_pci in pf_pcis:
242 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
243 cmd = f"ip link set {pf_eth} mtu {mtu}"
244 exec_cmd_no_error(node, cmd, sudo=True)
247 def vpp_set_interface_mtu(node, interface, mtu=9200):
248 """Set Ethernet MTU on interface.
250 :param node: VPP node.
251 :param interface: Interface to setup MTU. Default: 9200.
252 :param mtu: Ethernet MTU size in Bytes.
254 :type interface: str or int
257 if isinstance(interface, str):
258 sw_if_index = Topology.get_interface_sw_index(node, interface)
260 sw_if_index = interface
262 cmd = u"hw_interface_set_mtu"
263 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
265 sw_if_index=sw_if_index,
269 with PapiSocketExecutor(node) as papi_exec:
270 papi_exec.add(cmd, **args).get_reply(err_msg)
271 except AssertionError as err:
272 # TODO: Make failure tolerance optional.
273 logger.debug(f"Setting MTU failed. Expected?\n{err}")
276 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
277 """Set Ethernet MTU on all interfaces.
279 :param node: VPP node.
280 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
284 for interface in node[u"interfaces"]:
285 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
288 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
289 """Set Ethernet MTU on all interfaces on all DUTs.
291 :param nodes: VPP nodes.
292 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
296 for node in nodes.values():
297 if node[u"type"] == NodeType.DUT:
298 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
301 def vpp_node_interfaces_ready_wait(node, retries=15):
302 """Wait until all interfaces with admin-up are in link-up state.
304 :param node: Node to wait on.
305 :param retries: Number of retries to check interface status (optional,
310 :raises RuntimeError: If any interface is not in link-up state after
311 defined number of retries.
313 for _ in range(0, retries):
315 out = InterfaceUtil.vpp_get_interface_data(node)
316 for interface in out:
317 if interface.get(u"flags") == 1:
318 not_ready.append(interface.get(u"interface_name"))
321 f"Interfaces still not in link-up state:\n{not_ready}"
327 err = f"Timeout, interfaces not up:\n{not_ready}" \
328 if u"not_ready" in locals() else u"No check executed!"
329 raise RuntimeError(err)
332 def all_vpp_interfaces_ready_wait(nodes, retries=15):
333 """Wait until all interfaces with admin-up are in link-up state for all
334 nodes in the topology.
336 :param nodes: Nodes in the topology.
337 :param retries: Number of retries to check interface status (optional,
343 for node in nodes.values():
344 if node[u"type"] == NodeType.DUT:
345 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
348 def vpp_get_interface_data(node, interface=None):
349 """Get all interface data from a VPP node. If a name or
350 sw_interface_index is provided, return only data for the matching
353 :param node: VPP node to get interface data from.
354 :param interface: Numeric index or name string of a specific interface.
356 :type interface: int or str
357 :returns: List of dictionaries containing data for each interface, or a
358 single dictionary for the specified interface.
360 :raises TypeError: if the data type of interface is neither basestring
363 def process_if_dump(if_dump):
364 """Process interface dump.
366 :param if_dump: Interface dump.
368 :returns: Processed interface dump.
371 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
372 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
373 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
374 if_dump[u"flags"] = if_dump[u"flags"].value
375 if_dump[u"type"] = if_dump[u"type"].value
376 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
377 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
378 if hasattr(if_dump[u"sub_if_flags"], u"value") \
379 else int(if_dump[u"sub_if_flags"])
383 if interface is not None:
384 if isinstance(interface, str):
385 param = u"interface_name"
386 elif isinstance(interface, int):
387 param = u"sw_if_index"
389 raise TypeError(f"Wrong interface format {interface}")
393 cmd = u"sw_interface_dump"
395 name_filter_valid=False,
398 err_msg = f"Failed to get interface dump on host {node[u'host']}"
400 with PapiSocketExecutor(node) as papi_exec:
401 details = papi_exec.add(cmd, **args).get_details(err_msg)
402 logger.debug(f"Received data:\n{details!r}")
404 data = list() if interface is None else dict()
406 if interface is None:
407 data.append(process_if_dump(dump))
408 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
409 data = process_if_dump(dump)
412 logger.debug(f"Interface data:\n{data}")
416 def vpp_get_interface_name(node, sw_if_index):
417 """Get interface name for the given SW interface index from actual
420 :param node: VPP node to get interface data from.
421 :param sw_if_index: SW interface index of the specific interface.
423 :type sw_if_index: int
424 :returns: Name of the given interface.
427 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
428 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
429 if_data = InterfaceUtil.vpp_get_interface_data(
430 node, if_data[u"sup_sw_if_index"]
433 return if_data.get(u"interface_name")
436 def vpp_get_interface_sw_index(node, interface_name):
437 """Get interface name for the given SW interface index from actual
440 :param node: VPP node to get interface data from.
441 :param interface_name: Interface name.
443 :type interface_name: str
444 :returns: Name of the given interface.
447 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
449 return if_data.get(u"sw_if_index")
452 def vpp_get_interface_mac(node, interface):
453 """Get MAC address for the given interface from actual interface dump.
455 :param node: VPP node to get interface data from.
456 :param interface: Numeric index or name string of a specific interface.
458 :type interface: int or str
459 :returns: MAC address.
462 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
463 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
464 if_data = InterfaceUtil.vpp_get_interface_data(
465 node, if_data[u"sup_sw_if_index"])
467 return if_data.get(u"l2_address")
470 def vpp_set_interface_mac(node, interface, mac):
471 """Set MAC address for the given interface.
473 :param node: VPP node to set interface MAC.
474 :param interface: Numeric index or name string of a specific interface.
475 :param mac: Required MAC address.
477 :type interface: int or str
480 cmd = u"sw_interface_set_mac_address"
482 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
483 mac_address=L2Util.mac_to_bin(mac)
485 err_msg = f"Failed to set MAC address of interface {interface}" \
486 f"on host {node[u'host']}"
487 with PapiSocketExecutor(node) as papi_exec:
488 papi_exec.add(cmd, **args).get_reply(err_msg)
491 def tg_set_interface_driver(node, pci_addr, driver):
492 """Set interface driver on the TG node.
494 :param node: Node to set interface driver on (must be TG node).
495 :param pci_addr: PCI address of the interface.
496 :param driver: Driver name.
500 :raises RuntimeError: If unbinding from the current driver fails.
501 :raises RuntimeError: If binding to the new driver fails.
503 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
504 if old_driver == driver:
510 # Unbind from current driver
511 if old_driver is not None:
512 cmd = f"sh -c \"echo {pci_addr} > " \
513 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
514 ret_code, _, _ = ssh.exec_command_sudo(cmd)
515 if int(ret_code) != 0:
516 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
518 # Bind to the new driver
519 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
520 ret_code, _, _ = ssh.exec_command_sudo(cmd)
521 if int(ret_code) != 0:
522 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
525 def tg_get_interface_driver(node, pci_addr):
526 """Get interface driver from the TG node.
528 :param node: Node to get interface driver on (must be TG node).
529 :param pci_addr: PCI address of the interface.
532 :returns: Interface driver or None if not found.
534 :raises RuntimeError: If PCI rescan or lspci command execution failed.
536 return DUTSetup.get_pci_dev_driver(node, pci_addr)
539 def tg_set_interfaces_default_driver(node):
540 """Set interfaces default driver specified in topology yaml file.
542 :param node: Node to setup interfaces driver on (must be TG node).
545 for interface in node[u"interfaces"].values():
546 InterfaceUtil.tg_set_interface_driver(
547 node, interface[u"pci_address"], interface[u"driver"]
551 def update_vpp_interface_data_on_node(node):
552 """Update vpp generated interface data for a given node in DICT__nodes.
554 Updates interface names, software if index numbers and any other details
555 generated specifically by vpp that are unknown before testcase run.
556 It does this by dumping interface list from all devices using python
557 api, and pairing known information from topology (mac address) to state
560 :param node: Node selected from DICT__nodes.
563 interface_list = InterfaceUtil.vpp_get_interface_data(node)
564 interface_dict = dict()
565 for ifc in interface_list:
566 interface_dict[ifc[u"l2_address"]] = ifc
568 for if_name, if_data in node[u"interfaces"].items():
569 ifc_dict = interface_dict.get(if_data[u"mac_address"])
570 if ifc_dict is not None:
571 if_data[u"name"] = ifc_dict[u"interface_name"]
572 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
573 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
575 f"Interface {if_name} found by MAC "
576 f"{if_data[u'mac_address']}"
580 f"Interface {if_name} not found by MAC "
581 f"{if_data[u'mac_address']}"
583 if_data[u"vpp_sw_index"] = None
586 def update_nic_interface_names(node):
587 """Update interface names based on nic type and PCI address.
589 This method updates interface names in the same format as VPP does.
591 :param node: Node dictionary.
594 for ifc in node[u"interfaces"].values():
595 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
596 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
597 f"{int(if_pci[3], 16):x}"
598 if ifc[u"model"] == u"Intel-XL710":
599 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
600 elif ifc[u"model"] == u"Intel-X710":
601 ifc[u"name"] = f"TenGigabitEthernet{loc}"
602 elif ifc[u"model"] == u"Intel-X520-DA2":
603 ifc[u"name"] = f"TenGigabitEthernet{loc}"
604 elif ifc[u"model"] == u"Cisco-VIC-1385":
605 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
606 elif ifc[u"model"] == u"Cisco-VIC-1227":
607 ifc[u"name"] = f"TenGigabitEthernet{loc}"
609 ifc[u"name"] = f"UnknownEthernet{loc}"
612 def update_nic_interface_names_on_all_duts(nodes):
613 """Update interface names based on nic type and PCI address on all DUTs.
615 This method updates interface names in the same format as VPP does.
617 :param nodes: Topology nodes.
620 for node in nodes.values():
621 if node[u"type"] == NodeType.DUT:
622 InterfaceUtil.update_nic_interface_names(node)
625 def update_tg_interface_data_on_node(node):
626 """Update interface name for TG/linux node in DICT__nodes.
629 # for dev in `ls /sys/class/net/`;
630 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
631 "52:54:00:9f:82:63": "eth0"
632 "52:54:00:77:ae:a9": "eth1"
633 "52:54:00:e1:8a:0f": "eth2"
634 "00:00:00:00:00:00": "lo"
636 :param node: Node selected from DICT__nodes.
638 :raises RuntimeError: If getting of interface name and MAC fails.
640 # First setup interface driver specified in yaml file
641 InterfaceUtil.tg_set_interfaces_default_driver(node)
643 # Get interface names
647 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
648 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
650 ret_code, stdout, _ = ssh.exec_command(cmd)
651 if int(ret_code) != 0:
652 raise RuntimeError(u"Get interface name and MAC failed")
653 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
655 interfaces = JsonParser().parse_data(tmp)
656 for interface in node[u"interfaces"].values():
657 name = interfaces.get(interface[u"mac_address"])
660 interface[u"name"] = name
663 def iface_update_numa_node(node):
664 """For all interfaces from topology file update numa node based on
665 information from the node.
667 :param node: Node from topology.
670 :raises ValueError: If numa node ia less than 0.
671 :raises RuntimeError: If update of numa node failed.
674 for if_key in Topology.get_node_interfaces(node):
675 if_pci = Topology.get_interface_pci_addr(node, if_key)
677 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
679 ret, out, _ = ssh.exec_command(cmd)
682 numa_node = 0 if int(out) < 0 else int(out)
685 f"Reading numa location failed for: {if_pci}"
688 Topology.set_interface_numa_node(
689 node, if_key, numa_node
693 raise RuntimeError(f"Update numa node failed for: {if_pci}")
696 def update_all_interface_data_on_all_nodes(
697 nodes, skip_tg=False, skip_vpp=False):
698 """Update interface names on all nodes in DICT__nodes.
700 This method updates the topology dictionary by querying interface lists
701 of all nodes mentioned in the topology dictionary.
703 :param nodes: Nodes in the topology.
704 :param skip_tg: Skip TG node.
705 :param skip_vpp: Skip VPP node.
710 for node in nodes.values():
711 if node[u"type"] == NodeType.DUT and not skip_vpp:
712 InterfaceUtil.update_vpp_interface_data_on_node(node)
713 elif node[u"type"] == NodeType.TG and not skip_tg:
714 InterfaceUtil.update_tg_interface_data_on_node(node)
715 InterfaceUtil.iface_update_numa_node(node)
718 def create_vlan_subinterface(node, interface, vlan):
719 """Create VLAN sub-interface on node.
721 :param node: Node to add VLAN subinterface on.
722 :param interface: Interface name or index on which create VLAN
724 :param vlan: VLAN ID of the subinterface to be created.
726 :type interface: str on int
728 :returns: Name and index of created subinterface.
730 :raises RuntimeError: if it is unable to create VLAN subinterface on the
731 node or interface cannot be converted.
733 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
735 cmd = u"create_vlan_subif"
737 sw_if_index=sw_if_index,
740 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
742 with PapiSocketExecutor(node) as papi_exec:
743 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
745 if_key = Topology.add_new_port(node, u"vlan_subif")
746 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
747 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
748 Topology.update_interface_name(node, if_key, ifc_name)
750 return f"{interface}.{vlan}", sw_if_index
753 def create_vxlan_interface(node, vni, source_ip, destination_ip):
754 """Create VXLAN interface and return sw if index of created interface.
756 :param node: Node where to create VXLAN interface.
757 :param vni: VXLAN Network Identifier.
758 :param source_ip: Source IP of a VXLAN Tunnel End Point.
759 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
763 :type destination_ip: str
764 :returns: SW IF INDEX of created interface.
766 :raises RuntimeError: if it is unable to create VxLAN interface on the
769 cmd = u"vxlan_add_del_tunnel"
772 instance=Constants.BITWISE_NON_ZERO,
773 src_address=IPAddress.create_ip_address_object(
774 ip_address(source_ip)
776 dst_address=IPAddress.create_ip_address_object(
777 ip_address(destination_ip)
779 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
781 decap_next_index=Constants.BITWISE_NON_ZERO,
784 err_msg = f"Failed to create VXLAN tunnel interface " \
785 f"on host {node[u'host']}"
786 with PapiSocketExecutor(node) as papi_exec:
787 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
789 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
790 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
791 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
792 Topology.update_interface_name(node, if_key, ifc_name)
797 def set_vxlan_bypass(node, interface=None):
798 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
800 By adding the IPv4 vxlan-bypass graph node to an interface, the node
801 checks for and validate input vxlan packet and bypass ip4-lookup,
802 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
803 This node will cause extra overhead to for non-vxlan packets which is
806 :param node: Node where to set VXLAN bypass.
807 :param interface: Numeric index or name string of a specific interface.
809 :type interface: int or str
810 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
812 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
814 cmd = u"sw_interface_set_vxlan_bypass"
817 sw_if_index=sw_if_index,
820 err_msg = f"Failed to set VXLAN bypass on interface " \
821 f"on host {node[u'host']}"
822 with PapiSocketExecutor(node) as papi_exec:
823 papi_exec.add(cmd, **args).get_replies(err_msg)
826 def vxlan_dump(node, interface=None):
827 """Get VxLAN data for the given interface.
829 :param node: VPP node to get interface data from.
830 :param interface: Numeric index or name string of a specific interface.
831 If None, information about all VxLAN interfaces is returned.
833 :type interface: int or str
834 :returns: Dictionary containing data for the given VxLAN interface or if
835 interface=None, the list of dictionaries with all VxLAN interfaces.
837 :raises TypeError: if the data type of interface is neither basestring
840 def process_vxlan_dump(vxlan_dump):
841 """Process vxlan dump.
843 :param vxlan_dump: Vxlan interface dump.
844 :type vxlan_dump: dict
845 :returns: Processed vxlan interface dump.
848 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
849 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
852 if interface is not None:
853 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
855 sw_if_index = int(Constants.BITWISE_NON_ZERO)
857 cmd = u"vxlan_tunnel_dump"
859 sw_if_index=sw_if_index
861 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
863 with PapiSocketExecutor(node) as papi_exec:
864 details = papi_exec.add(cmd, **args).get_details(err_msg)
866 data = list() if interface is None else dict()
868 if interface is None:
869 data.append(process_vxlan_dump(dump))
870 elif dump[u"sw_if_index"] == sw_if_index:
871 data = process_vxlan_dump(dump)
874 logger.debug(f"VXLAN data:\n{data}")
878 def create_subinterface(
879 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
881 """Create sub-interface on node. It is possible to set required
882 sub-interface type and VLAN tag(s).
884 :param node: Node to add sub-interface.
885 :param interface: Interface name on which create sub-interface.
886 :param sub_id: ID of the sub-interface to be created.
887 :param outer_vlan_id: Optional outer VLAN ID.
888 :param inner_vlan_id: Optional inner VLAN ID.
889 :param type_subif: Optional type of sub-interface. Values supported by
890 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
893 :type interface: str or int
895 :type outer_vlan_id: int
896 :type inner_vlan_id: int
897 :type type_subif: str
898 :returns: Name and index of created sub-interface.
900 :raises RuntimeError: If it is not possible to create sub-interface.
902 subif_types = type_subif.split()
905 if u"no_tags" in subif_types:
906 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
907 if u"one_tag" in subif_types:
908 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
909 if u"two_tags" in subif_types:
910 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
911 if u"dot1ad" in subif_types:
912 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
913 if u"exact_match" in subif_types:
914 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
915 if u"default_sub" in subif_types:
916 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
917 if type_subif == u"default_sub":
918 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
919 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
921 cmd = u"create_subif"
923 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
925 sub_if_flags=flags.value if hasattr(flags, u"value")
927 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
928 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
930 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
931 with PapiSocketExecutor(node) as papi_exec:
932 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
934 if_key = Topology.add_new_port(node, u"subinterface")
935 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
936 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
937 Topology.update_interface_name(node, if_key, ifc_name)
939 return f"{interface}.{sub_id}", sw_if_index
942 def create_gre_tunnel_interface(node, source_ip, destination_ip):
943 """Create GRE tunnel interface on node.
945 :param node: VPP node to add tunnel interface.
946 :param source_ip: Source of the GRE tunnel.
947 :param destination_ip: Destination of the GRE tunnel.
950 :type destination_ip: str
951 :returns: Name and index of created GRE tunnel interface.
953 :raises RuntimeError: If unable to create GRE tunnel interface.
955 cmd = u"gre_tunnel_add_del"
958 instance=Constants.BITWISE_NON_ZERO,
960 dst=str(destination_ip),
968 err_msg = f"Failed to create GRE tunnel interface " \
969 f"on host {node[u'host']}"
970 with PapiSocketExecutor(node) as papi_exec:
971 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
973 if_key = Topology.add_new_port(node, u"gre_tunnel")
974 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
975 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
976 Topology.update_interface_name(node, if_key, ifc_name)
978 return ifc_name, sw_if_index
981 def vpp_create_loopback(node, mac=None):
982 """Create loopback interface on VPP node.
984 :param node: Node to create loopback interface on.
985 :param mac: Optional MAC address for loopback interface.
988 :returns: SW interface index.
990 :raises RuntimeError: If it is not possible to create loopback on the
993 cmd = u"create_loopback"
995 mac_address=L2Util.mac_to_bin(mac) if mac else 0
997 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
998 with PapiSocketExecutor(node) as papi_exec:
999 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1001 if_key = Topology.add_new_port(node, u"loopback")
1002 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1003 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1004 Topology.update_interface_name(node, if_key, ifc_name)
1006 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1007 Topology.update_interface_mac_address(node, if_key, mac)
1012 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1013 """Create bond interface on VPP node.
1015 :param node: DUT node from topology.
1016 :param mode: Link bonding mode.
1017 :param load_balance: Load balance (optional, valid for xor and lacp
1018 modes, otherwise ignored).
1019 :param mac: MAC address to assign to the bond interface (optional).
1022 :type load_balance: str
1024 :returns: Interface key (name) in topology.
1026 :raises RuntimeError: If it is not possible to create bond interface on
1029 cmd = u"bond_create"
1031 id=int(Constants.BITWISE_NON_ZERO),
1032 use_custom_mac=bool(mac is not None),
1033 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1036 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1038 lb=0 if load_balance is None else getattr(
1039 LinkBondLoadBalanceAlgo,
1040 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1044 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1045 with PapiSocketExecutor(node) as papi_exec:
1046 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1048 InterfaceUtil.add_eth_interface(
1049 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1051 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1056 def add_eth_interface(
1057 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1059 """Add ethernet interface to current topology.
1061 :param node: DUT node from topology.
1062 :param ifc_name: Name of the interface.
1063 :param sw_if_index: SW interface index.
1064 :param ifc_pfx: Interface key prefix.
1065 :param host_if_key: Host interface key from topology file.
1068 :type sw_if_index: int
1070 :type host_if_key: str
1072 if_key = Topology.add_new_port(node, ifc_pfx)
1074 if ifc_name and sw_if_index is None:
1075 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1077 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1078 if sw_if_index and ifc_name is None:
1079 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1080 Topology.update_interface_name(node, if_key, ifc_name)
1081 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1082 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1083 if host_if_key is not None:
1084 Topology.set_interface_numa_node(
1085 node, if_key, Topology.get_interface_numa_node(
1089 Topology.update_interface_pci_address(
1090 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1094 def vpp_create_avf_interface(
1095 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1096 """Create AVF interface on VPP node.
1098 :param node: DUT node from topology.
1099 :param if_key: Interface key from topology file of interface
1100 to be bound to i40evf driver.
1101 :param num_rx_queues: Number of RX queues.
1102 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1103 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1106 :type num_rx_queues: int
1109 :returns: AVF interface key (name) in topology.
1111 :raises RuntimeError: If it is not possible to create AVF interface on
1114 PapiSocketExecutor.run_cli_cmd(
1115 node, u"set logging class avf level debug"
1119 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1121 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1123 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1127 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1128 with PapiSocketExecutor(node) as papi_exec:
1129 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1131 InterfaceUtil.add_eth_interface(
1132 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1136 return Topology.get_interface_by_sw_index(node, sw_if_index)
1139 def vpp_create_rdma_interface(
1140 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1142 """Create RDMA interface on VPP node.
1144 :param node: DUT node from topology.
1145 :param if_key: Physical interface key from topology file of interface
1146 to be bound to rdma-core driver.
1147 :param num_rx_queues: Number of RX queues.
1148 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1149 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1150 :param mode: RDMA interface mode - auto/ibv/dv.
1153 :type num_rx_queues: int
1157 :returns: Interface key (name) in topology file.
1159 :raises RuntimeError: If it is not possible to create RDMA interface on
1162 PapiSocketExecutor.run_cli_cmd(
1163 node, u"set logging class rdma level debug"
1166 cmd = u"rdma_create"
1167 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1169 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1170 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1171 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1174 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1176 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1177 with PapiSocketExecutor(node) as papi_exec:
1178 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1180 InterfaceUtil.vpp_set_interface_mac(
1181 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1183 InterfaceUtil.add_eth_interface(
1184 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1188 return Topology.get_interface_by_sw_index(node, sw_if_index)
1191 def vpp_enslave_physical_interface(node, interface, bond_if):
1192 """Enslave physical interface to bond interface on VPP node.
1194 :param node: DUT node from topology.
1195 :param interface: Physical interface key from topology file.
1196 :param bond_if: Load balance
1198 :type interface: str
1200 :raises RuntimeError: If it is not possible to enslave physical
1201 interface to bond interface on the node.
1203 cmd = u"bond_enslave"
1205 sw_if_index=Topology.get_interface_sw_index(node, interface),
1206 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1208 is_long_timeout=False
1210 err_msg = f"Failed to enslave physical interface {interface} to bond " \
1211 f"interface {bond_if} on host {node[u'host']}"
1212 with PapiSocketExecutor(node) as papi_exec:
1213 papi_exec.add(cmd, **args).get_reply(err_msg)
1216 def vpp_show_bond_data_on_node(node, verbose=False):
1217 """Show (detailed) bond information on VPP node.
1219 :param node: DUT node from topology.
1220 :param verbose: If detailed information is required or not.
1224 cmd = u"sw_interface_bond_dump"
1225 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1227 data = f"Bond data on node {node[u'host']}:\n"
1228 with PapiSocketExecutor(node) as papi_exec:
1229 details = papi_exec.add(cmd).get_details(err_msg)
1231 for bond in details:
1232 data += f"{bond[u'interface_name']}\n"
1233 data += u" mode: {m}\n".format(
1234 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1236 data += u" load balance: {lb}\n".format(
1237 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1239 data += f" number of active slaves: {bond[u'active_slaves']}\n"
1241 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1242 node, Topology.get_interface_by_sw_index(
1243 node, bond[u"sw_if_index"]
1246 for slave in slave_data:
1247 if not slave[u"is_passive"]:
1248 data += f" {slave[u'interface_name']}\n"
1249 data += f" number of slaves: {bond[u'slaves']}\n"
1251 for slave in slave_data:
1252 data += f" {slave[u'interface_name']}\n"
1253 data += f" interface id: {bond[u'id']}\n"
1254 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1258 def vpp_bond_slave_dump(node, interface):
1259 """Get bond interface slave(s) data on VPP node.
1261 :param node: DUT node from topology.
1262 :param interface: Physical interface key from topology file.
1264 :type interface: str
1265 :returns: Bond slave interface data.
1268 cmd = u"sw_interface_slave_dump"
1270 sw_if_index=Topology.get_interface_sw_index(node, interface)
1272 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1274 with PapiSocketExecutor(node) as papi_exec:
1275 details = papi_exec.add(cmd, **args).get_details(err_msg)
1277 logger.debug(f"Slave data:\n{details}")
1281 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1282 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1284 :param nodes: Nodes in the topology.
1285 :param verbose: If detailed information is required or not.
1289 for node_data in nodes.values():
1290 if node_data[u"type"] == NodeType.DUT:
1291 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1294 def vpp_enable_input_acl_interface(
1295 node, interface, ip_version, table_index):
1296 """Enable input acl on interface.
1298 :param node: VPP node to setup interface for input acl.
1299 :param interface: Interface to setup input acl.
1300 :param ip_version: Version of IP protocol.
1301 :param table_index: Classify table index.
1303 :type interface: str or int
1304 :type ip_version: str
1305 :type table_index: int
1307 cmd = u"input_acl_set_interface"
1309 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1310 ip4_table_index=table_index if ip_version == u"ip4"
1311 else Constants.BITWISE_NON_ZERO,
1312 ip6_table_index=table_index if ip_version == u"ip6"
1313 else Constants.BITWISE_NON_ZERO,
1314 l2_table_index=table_index if ip_version == u"l2"
1315 else Constants.BITWISE_NON_ZERO,
1317 err_msg = f"Failed to enable input acl on interface {interface}"
1318 with PapiSocketExecutor(node) as papi_exec:
1319 papi_exec.add(cmd, **args).get_reply(err_msg)
1322 def get_interface_classify_table(node, interface):
1323 """Get name of classify table for the given interface.
1325 TODO: Move to Classify.py.
1327 :param node: VPP node to get data from.
1328 :param interface: Name or sw_if_index of a specific interface.
1330 :type interface: str or int
1331 :returns: Classify table name.
1334 if isinstance(interface, str):
1335 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1337 sw_if_index = interface
1339 cmd = u"classify_table_by_interface"
1341 sw_if_index=sw_if_index
1343 err_msg = f"Failed to get classify table name by interface {interface}"
1344 with PapiSocketExecutor(node) as papi_exec:
1345 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1350 def get_sw_if_index(node, interface_name):
1351 """Get sw_if_index for the given interface from actual interface dump.
1353 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1355 :param node: VPP node to get interface data from.
1356 :param interface_name: Name of the specific interface.
1358 :type interface_name: str
1359 :returns: sw_if_index of the given interface.
1362 interface_data = InterfaceUtil.vpp_get_interface_data(
1363 node, interface=interface_name
1365 return interface_data.get(u"sw_if_index")
1368 def vxlan_gpe_dump(node, interface_name=None):
1369 """Get VxLAN GPE data for the given interface.
1371 :param node: VPP node to get interface data from.
1372 :param interface_name: Name of the specific interface. If None,
1373 information about all VxLAN GPE interfaces is returned.
1375 :type interface_name: str
1376 :returns: Dictionary containing data for the given VxLAN GPE interface
1377 or if interface=None, the list of dictionaries with all VxLAN GPE
1379 :rtype: dict or list
1381 def process_vxlan_gpe_dump(vxlan_dump):
1382 """Process vxlan_gpe dump.
1384 :param vxlan_dump: Vxlan_gpe nterface dump.
1385 :type vxlan_dump: dict
1386 :returns: Processed vxlan_gpe interface dump.
1389 if vxlan_dump[u"is_ipv6"]:
1390 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1391 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1393 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1394 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1397 if interface_name is not None:
1398 sw_if_index = InterfaceUtil.get_interface_index(
1399 node, interface_name
1402 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1404 cmd = u"vxlan_gpe_tunnel_dump"
1406 sw_if_index=sw_if_index
1408 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1409 with PapiSocketExecutor(node) as papi_exec:
1410 details = papi_exec.add(cmd, **args).get_details(err_msg)
1412 data = list() if interface_name is None else dict()
1413 for dump in details:
1414 if interface_name is None:
1415 data.append(process_vxlan_gpe_dump(dump))
1416 elif dump[u"sw_if_index"] == sw_if_index:
1417 data = process_vxlan_gpe_dump(dump)
1420 logger.debug(f"VXLAN-GPE data:\n{data}")
1424 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1425 """Assign VPP interface to specific VRF/FIB table.
1427 :param node: VPP node where the FIB and interface are located.
1428 :param interface: Interface to be assigned to FIB.
1429 :param table_id: VRF table ID.
1430 :param ipv6: Assign to IPv6 table. Default False.
1432 :type interface: str or int
1436 cmd = u"sw_interface_set_table"
1438 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1440 vrf_id=int(table_id)
1442 err_msg = f"Failed to assign interface {interface} to FIB table"
1443 with PapiSocketExecutor(node) as papi_exec:
1444 papi_exec.add(cmd, **args).get_reply(err_msg)
1447 def set_linux_interface_mac(
1448 node, interface, mac, namespace=None, vf_id=None):
1449 """Set MAC address for interface in linux.
1451 :param node: Node where to execute command.
1452 :param interface: Interface in namespace.
1453 :param mac: MAC to be assigned to interface.
1454 :param namespace: Execute command in namespace. Optional
1455 :param vf_id: Virtual Function id. Optional
1457 :type interface: str
1459 :type namespace: str
1462 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1463 else f"address {mac}"
1464 ns_str = f"ip netns exec {namespace}" if namespace else u""
1466 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1467 exec_cmd_no_error(node, cmd, sudo=True)
1470 def set_linux_interface_trust_on(
1471 node, interface, namespace=None, vf_id=None):
1472 """Set trust on (promisc) for interface in linux.
1474 :param node: Node where to execute command.
1475 :param interface: Interface in namespace.
1476 :param namespace: Execute command in namespace. Optional
1477 :param vf_id: Virtual Function id. Optional
1479 :type interface: str
1480 :type namespace: str
1483 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1484 ns_str = f"ip netns exec {namespace}" if namespace else u""
1486 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1487 exec_cmd_no_error(node, cmd, sudo=True)
1490 def set_linux_interface_spoof_off(
1491 node, interface, namespace=None, vf_id=None):
1492 """Set spoof off for interface in linux.
1494 :param node: Node where to execute command.
1495 :param interface: Interface in namespace.
1496 :param namespace: Execute command in namespace. Optional
1497 :param vf_id: Virtual Function id. Optional
1499 :type interface: str
1500 :type namespace: str
1503 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1505 ns_str = f"ip netns exec {namespace}" if namespace else u""
1507 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1508 exec_cmd_no_error(node, cmd, sudo=True)
1511 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
1512 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1513 driver testing on DUT.
1515 :param node: DUT node.
1516 :param ifc_key: Interface key from topology file.
1517 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1518 :param osi_layer: OSI Layer type to initialize TG with.
1519 Default value "L2" sets linux interface spoof off.
1523 :type osi_layer: str
1524 :returns: Virtual Function topology interface keys.
1526 :raises RuntimeError: If a reason preventing initialization is found.
1528 # Read PCI address and driver.
1529 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1530 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1531 uio_driver = Topology.get_uio_driver(node)
1532 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1533 if kernel_driver not in (u"i40e", u"i40evf"):
1535 f"AVF needs i40e-compatible driver, not {kernel_driver} "
1536 f"at node {node[u'host']} ifc {ifc_key}"
1538 current_driver = DUTSetup.get_pci_dev_driver(
1539 node, pf_pci_addr.replace(u":", r"\:"))
1541 VPPUtil.stop_vpp_service(node)
1542 if current_driver != kernel_driver:
1543 # PCI device must be re-bound to kernel driver before creating VFs.
1544 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1545 # Stop VPP to prevent deadlock.
1546 # Unbind from current driver.
1547 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1548 # Bind to kernel driver.
1549 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1551 # Initialize PCI VFs.
1552 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1555 # Set MAC address and bind each virtual function to uio driver.
1556 for vf_id in range(numvfs):
1557 vf_mac_addr = u":".join(
1558 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1559 pf_mac_addr[5], f"{vf_id:02x}"
1563 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1564 InterfaceUtil.set_linux_interface_trust_on(
1565 node, pf_dev, vf_id=vf_id
1567 if osi_layer == u"L2":
1568 InterfaceUtil.set_linux_interface_spoof_off(
1569 node, pf_dev, vf_id=vf_id
1571 InterfaceUtil.set_linux_interface_mac(
1572 node, pf_dev, vf_mac_addr, vf_id=vf_id
1575 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1576 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1578 # Add newly created ports into topology file
1579 vf_ifc_name = f"{ifc_key}_vif"
1580 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1581 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1582 Topology.update_interface_name(
1583 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1585 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1586 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1587 Topology.set_interface_numa_node(
1588 node, vf_ifc_key, Topology.get_interface_numa_node(
1592 vf_ifc_keys.append(vf_ifc_key)
1597 def vpp_sw_interface_rx_placement_dump(node):
1598 """Dump VPP interface RX placement on node.
1600 :param node: Node to run command on.
1602 :returns: Thread mapping information as a list of dictionaries.
1605 cmd = u"sw_interface_rx_placement_dump"
1606 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1607 with PapiSocketExecutor(node) as papi_exec:
1608 for ifc in node[u"interfaces"].values():
1609 if ifc[u"vpp_sw_index"] is not None:
1610 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1611 details = papi_exec.get_details(err_msg)
1612 return sorted(details, key=lambda k: k[u"sw_if_index"])
1615 def vpp_sw_interface_set_rx_placement(
1616 node, sw_if_index, queue_id, worker_id):
1617 """Set interface RX placement to worker on node.
1619 :param node: Node to run command on.
1620 :param sw_if_index: VPP SW interface index.
1621 :param queue_id: VPP interface queue ID.
1622 :param worker_id: VPP worker ID (indexing from 0).
1624 :type sw_if_index: int
1626 :type worker_id: int
1627 :raises RuntimeError: If failed to run command on host or if no API
1630 cmd = u"sw_interface_set_rx_placement"
1631 err_msg = f"Failed to set interface RX placement to worker " \
1632 f"on host {node[u'host']}!"
1634 sw_if_index=sw_if_index,
1636 worker_id=worker_id,
1639 with PapiSocketExecutor(node) as papi_exec:
1640 papi_exec.add(cmd, **args).get_reply(err_msg)
1643 def vpp_round_robin_rx_placement(node, prefix):
1644 """Set Round Robin interface RX placement on all worker threads
1647 :param node: Topology nodes.
1648 :param prefix: Interface name prefix.
1653 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1656 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1657 for interface in node[u"interfaces"].values():
1658 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1659 and prefix in interface[u"name"]:
1660 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1661 node, placement[u"sw_if_index"], placement[u"queue_id"],
1662 worker_id % worker_cnt
1667 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1668 """Set Round Robin interface RX placement on all worker threads
1671 :param nodes: Topology nodes.
1672 :param prefix: Interface name prefix.
1676 for node in nodes.values():
1677 if node[u"type"] == NodeType.DUT:
1678 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)