1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.IPAddress import IPAddress
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class InterfaceStatusFlags(IntEnum):
35 """Interface status flags."""
36 IF_STATUS_API_FLAG_ADMIN_UP = 1
37 IF_STATUS_API_FLAG_LINK_UP = 2
40 class MtuProto(IntEnum):
45 MTU_PROTO_API_MPLS = 3
49 class LinkDuplex(IntEnum):
51 LINK_DUPLEX_API_UNKNOWN = 0
52 LINK_DUPLEX_API_HALF = 1
53 LINK_DUPLEX_API_FULL = 2
56 class SubInterfaceFlags(IntEnum):
57 """Sub-interface flags."""
58 SUB_IF_API_FLAG_NO_TAGS = 1
59 SUB_IF_API_FLAG_ONE_TAG = 2
60 SUB_IF_API_FLAG_TWO_TAGS = 4
61 SUB_IF_API_FLAG_DOT1AD = 8
62 SUB_IF_API_FLAG_EXACT_MATCH = 16
63 SUB_IF_API_FLAG_DEFAULT = 32
64 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
65 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
66 SUB_IF_API_FLAG_DOT1AH = 256
69 class RxMode(IntEnum):
71 RX_MODE_API_UNKNOWN = 0
72 RX_MODE_API_POLLING = 1
73 RX_MODE_API_INTERRUPT = 2
74 RX_MODE_API_ADAPTIVE = 3
75 RX_MODE_API_DEFAULT = 4
78 class IfType(IntEnum):
81 IF_API_TYPE_HARDWARE = 0
88 class LinkBondLoadBalanceAlgo(IntEnum):
89 """Link bonding load balance algorithm."""
90 BOND_API_LB_ALGO_L2 = 0
91 BOND_API_LB_ALGO_L34 = 1
92 BOND_API_LB_ALGO_L23 = 2
93 BOND_API_LB_ALGO_RR = 3
94 BOND_API_LB_ALGO_BC = 4
95 BOND_API_LB_ALGO_AB = 5
98 class LinkBondMode(IntEnum):
99 """Link bonding mode."""
100 BOND_API_MODE_ROUND_ROBIN = 1
101 BOND_API_MODE_ACTIVE_BACKUP = 2
102 BOND_API_MODE_XOR = 3
103 BOND_API_MODE_BROADCAST = 4
104 BOND_API_MODE_LACP = 5
107 class RdmaMode(IntEnum):
108 """RDMA interface mode."""
109 RDMA_API_MODE_AUTO = 0
110 RDMA_API_MODE_IBV = 1
115 """General utilities for managing interfaces"""
118 def pci_to_int(pci_str):
119 """Convert PCI address from string format (0000:18:0a.0) to
120 integer representation (169345024).
122 :param pci_str: PCI address in string representation.
124 :returns: Integer representation of PCI address.
127 pci = list(pci_str.split(u":")[0:2])
128 pci.extend(pci_str.split(u":")[2].split(u"."))
130 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
131 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
134 def pci_to_eth(node, pci_str):
135 """Convert PCI address on DUT to Linux ethernet name.
137 :param node: DUT node
138 :param pci_str: PCI address.
141 :returns: Ethernet name.
144 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
146 stdout, _ = exec_cmd_no_error(node, cmd)
148 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
150 return stdout.strip()
153 def get_interface_index(node, interface):
154 """Get interface sw_if_index from topology file.
156 :param node: Node where the interface is.
157 :param interface: Numeric index or name string of a specific interface.
159 :type interface: str or int
160 :returns: SW interface index.
164 sw_if_index = int(interface)
166 sw_if_index = Topology.get_interface_sw_index(node, interface)
167 if sw_if_index is None:
169 Topology.get_interface_sw_index_by_name(node, interface)
170 except TypeError as err:
171 raise TypeError(f"Wrong interface format {interface}") from err
176 def set_interface_state(node, interface, state, if_type=u"key"):
177 """Set interface state on a node.
179 Function can be used for DUTs as well as for TGs.
181 :param node: Node where the interface is.
182 :param interface: Interface key or sw_if_index or name.
183 :param state: One of 'up' or 'down'.
184 :param if_type: Interface type
186 :type interface: str or int
190 :raises ValueError: If the interface type is unknown.
191 :raises ValueError: If the state of interface is unexpected.
192 :raises ValueError: If the node has an unknown node type.
194 if if_type == u"key":
195 if isinstance(interface, str):
196 sw_if_index = Topology.get_interface_sw_index(node, interface)
197 iface_name = Topology.get_interface_name(node, interface)
199 sw_if_index = interface
200 elif if_type == u"name":
201 iface_key = Topology.get_interface_by_name(node, interface)
202 if iface_key is not None:
203 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
204 iface_name = interface
206 raise ValueError(f"Unknown if_type: {if_type}")
208 if node[u"type"] == NodeType.DUT:
210 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
211 elif state == u"down":
214 raise ValueError(f"Unexpected interface state: {state}")
215 cmd = u"sw_interface_set_flags"
216 err_msg = f"Failed to set interface state on host {node[u'host']}"
218 sw_if_index=int(sw_if_index),
221 with PapiSocketExecutor(node) as papi_exec:
222 papi_exec.add(cmd, **args).get_reply(err_msg)
223 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
224 cmd = f"ip link set {iface_name} {state}"
225 exec_cmd_no_error(node, cmd, sudo=True)
228 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
232 def set_interface_mtu(node, pf_pcis, mtu=9200):
233 """Set Ethernet MTU for specified interfaces.
235 :param node: Topology node.
236 :param pf_pcis: List of node's interfaces PCI addresses.
237 :param mtu: MTU to set. Default: 9200.
241 :raises RuntimeError: If failed to set MTU on interface.
243 for pf_pci in pf_pcis:
244 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
245 cmd = f"ip link set {pf_eth} mtu {mtu}"
246 exec_cmd_no_error(node, cmd, sudo=True)
249 def set_interface_flow_control(node, pf_pcis, rx=u"off", tx=u"off"):
250 """Set Ethernet flow control for specified interfaces.
252 :param node: Topology node.
253 :param pf_pcis: List of node's interfaces PCI addresses.
254 :param rx: RX flow. Default: off.
255 :param tx: TX flow. Default: off.
261 for pf_pci in pf_pcis:
262 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
263 cmd = f"ethtool -A {pf_eth} rx off tx off"
264 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
265 if int(ret_code) not in (0, 78):
266 raise RuntimeError("Failed to set MTU on {pf_eth}!")
270 def set_pci_parameter(node, pf_pcis, key, value):
271 """Set PCI parameter for specified interfaces.
273 :param node: Topology node.
274 :param pf_pcis: List of node's interfaces PCI addresses.
275 :param key: Key to set.
276 :param value: Value to set.
282 for pf_pci in pf_pcis:
283 cmd = f"setpci -s {pf_pci} {key}={value}"
284 exec_cmd_no_error(node, cmd, sudo=True)
287 def vpp_set_interface_mtu(node, interface, mtu=9200):
288 """Set Ethernet MTU on interface.
290 :param node: VPP node.
291 :param interface: Interface to setup MTU. Default: 9200.
292 :param mtu: Ethernet MTU size in Bytes.
294 :type interface: str or int
297 if isinstance(interface, str):
298 sw_if_index = Topology.get_interface_sw_index(node, interface)
300 sw_if_index = interface
302 cmd = u"hw_interface_set_mtu"
303 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
305 sw_if_index=sw_if_index,
309 with PapiSocketExecutor(node) as papi_exec:
310 papi_exec.add(cmd, **args).get_reply(err_msg)
311 except AssertionError as err:
312 # TODO: Make failure tolerance optional.
313 logger.debug(f"Setting MTU failed. Expected?\n{err}")
316 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
317 """Set Ethernet MTU on all interfaces.
319 :param node: VPP node.
320 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
324 for interface in node[u"interfaces"]:
325 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
328 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
329 """Set Ethernet MTU on all interfaces on all DUTs.
331 :param nodes: VPP nodes.
332 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
336 for node in nodes.values():
337 if node[u"type"] == NodeType.DUT:
338 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
341 def vpp_node_interfaces_ready_wait(node, retries=15):
342 """Wait until all interfaces with admin-up are in link-up state.
344 :param node: Node to wait on.
345 :param retries: Number of retries to check interface status (optional,
350 :raises RuntimeError: If any interface is not in link-up state after
351 defined number of retries.
353 for _ in range(0, retries):
355 out = InterfaceUtil.vpp_get_interface_data(node)
356 for interface in out:
357 if interface.get(u"flags") == 1:
358 not_ready.append(interface.get(u"interface_name"))
361 f"Interfaces still not in link-up state:\n{not_ready}"
367 err = f"Timeout, interfaces not up:\n{not_ready}" \
368 if u"not_ready" in locals() else u"No check executed!"
369 raise RuntimeError(err)
372 def all_vpp_interfaces_ready_wait(nodes, retries=15):
373 """Wait until all interfaces with admin-up are in link-up state for all
374 nodes in the topology.
376 :param nodes: Nodes in the topology.
377 :param retries: Number of retries to check interface status (optional,
383 for node in nodes.values():
384 if node[u"type"] == NodeType.DUT:
385 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
388 def vpp_get_interface_data(node, interface=None):
389 """Get all interface data from a VPP node. If a name or
390 sw_interface_index is provided, return only data for the matching
393 :param node: VPP node to get interface data from.
394 :param interface: Numeric index or name string of a specific interface.
396 :type interface: int or str
397 :returns: List of dictionaries containing data for each interface, or a
398 single dictionary for the specified interface.
400 :raises TypeError: if the data type of interface is neither basestring
403 def process_if_dump(if_dump):
404 """Process interface dump.
406 :param if_dump: Interface dump.
408 :returns: Processed interface dump.
411 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
412 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
413 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
414 if_dump[u"flags"] = if_dump[u"flags"].value
415 if_dump[u"type"] = if_dump[u"type"].value
416 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
417 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
418 if hasattr(if_dump[u"sub_if_flags"], u"value") \
419 else int(if_dump[u"sub_if_flags"])
423 if interface is not None:
424 if isinstance(interface, str):
425 param = u"interface_name"
426 elif isinstance(interface, int):
427 param = u"sw_if_index"
429 raise TypeError(f"Wrong interface format {interface}")
433 cmd = u"sw_interface_dump"
435 name_filter_valid=False,
438 err_msg = f"Failed to get interface dump on host {node[u'host']}"
440 with PapiSocketExecutor(node) as papi_exec:
441 details = papi_exec.add(cmd, **args).get_details(err_msg)
442 logger.debug(f"Received data:\n{details!r}")
444 data = list() if interface is None else dict()
446 if interface is None:
447 data.append(process_if_dump(dump))
448 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
449 data = process_if_dump(dump)
452 logger.debug(f"Interface data:\n{data}")
456 def vpp_get_interface_name(node, sw_if_index):
457 """Get interface name for the given SW interface index from actual
460 :param node: VPP node to get interface data from.
461 :param sw_if_index: SW interface index of the specific interface.
463 :type sw_if_index: int
464 :returns: Name of the given interface.
467 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
468 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
469 if_data = InterfaceUtil.vpp_get_interface_data(
470 node, if_data[u"sup_sw_if_index"]
473 return if_data.get(u"interface_name")
476 def vpp_get_interface_sw_index(node, interface_name):
477 """Get interface name for the given SW interface index from actual
480 :param node: VPP node to get interface data from.
481 :param interface_name: Interface name.
483 :type interface_name: str
484 :returns: Name of the given interface.
487 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
489 return if_data.get(u"sw_if_index")
492 def vpp_get_interface_mac(node, interface):
493 """Get MAC address for the given interface from actual interface dump.
495 :param node: VPP node to get interface data from.
496 :param interface: Numeric index or name string of a specific interface.
498 :type interface: int or str
499 :returns: MAC address.
502 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
503 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
504 if_data = InterfaceUtil.vpp_get_interface_data(
505 node, if_data[u"sup_sw_if_index"])
507 return if_data.get(u"l2_address")
510 def vpp_set_interface_mac(node, interface, mac):
511 """Set MAC address for the given interface.
513 :param node: VPP node to set interface MAC.
514 :param interface: Numeric index or name string of a specific interface.
515 :param mac: Required MAC address.
517 :type interface: int or str
520 cmd = u"sw_interface_set_mac_address"
522 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
523 mac_address=L2Util.mac_to_bin(mac)
525 err_msg = f"Failed to set MAC address of interface {interface}" \
526 f"on host {node[u'host']}"
527 with PapiSocketExecutor(node) as papi_exec:
528 papi_exec.add(cmd, **args).get_reply(err_msg)
531 def tg_set_interface_driver(node, pci_addr, driver):
532 """Set interface driver on the TG node.
534 :param node: Node to set interface driver on (must be TG node).
535 :param pci_addr: PCI address of the interface.
536 :param driver: Driver name.
540 :raises RuntimeError: If unbinding from the current driver fails.
541 :raises RuntimeError: If binding to the new driver fails.
543 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
544 if old_driver == driver:
550 # Unbind from current driver
551 if old_driver is not None:
552 cmd = f"sh -c \"echo {pci_addr} > " \
553 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
554 ret_code, _, _ = ssh.exec_command_sudo(cmd)
555 if int(ret_code) != 0:
556 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
558 # Bind to the new driver
559 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
560 ret_code, _, _ = ssh.exec_command_sudo(cmd)
561 if int(ret_code) != 0:
562 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
565 def tg_get_interface_driver(node, pci_addr):
566 """Get interface driver from the TG node.
568 :param node: Node to get interface driver on (must be TG node).
569 :param pci_addr: PCI address of the interface.
572 :returns: Interface driver or None if not found.
574 :raises RuntimeError: If PCI rescan or lspci command execution failed.
576 return DUTSetup.get_pci_dev_driver(node, pci_addr)
579 def tg_set_interfaces_default_driver(node):
580 """Set interfaces default driver specified in topology yaml file.
582 :param node: Node to setup interfaces driver on (must be TG node).
585 for interface in node[u"interfaces"].values():
586 InterfaceUtil.tg_set_interface_driver(
587 node, interface[u"pci_address"], interface[u"driver"]
591 def update_vpp_interface_data_on_node(node):
592 """Update vpp generated interface data for a given node in DICT__nodes.
594 Updates interface names, software if index numbers and any other details
595 generated specifically by vpp that are unknown before testcase run.
596 It does this by dumping interface list from all devices using python
597 api, and pairing known information from topology (mac address) to state
600 :param node: Node selected from DICT__nodes.
603 interface_list = InterfaceUtil.vpp_get_interface_data(node)
604 interface_dict = dict()
605 for ifc in interface_list:
606 interface_dict[ifc[u"l2_address"]] = ifc
608 for if_name, if_data in node[u"interfaces"].items():
609 ifc_dict = interface_dict.get(if_data[u"mac_address"])
610 if ifc_dict is not None:
611 if_data[u"name"] = ifc_dict[u"interface_name"]
612 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
613 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
615 f"Interface {if_name} found by MAC "
616 f"{if_data[u'mac_address']}"
620 f"Interface {if_name} not found by MAC "
621 f"{if_data[u'mac_address']}"
623 if_data[u"vpp_sw_index"] = None
626 def update_nic_interface_names(node):
627 """Update interface names based on nic type and PCI address.
629 This method updates interface names in the same format as VPP does.
631 :param node: Node dictionary.
634 for ifc in node[u"interfaces"].values():
635 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
636 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
637 f"{int(if_pci[3], 16):x}"
638 if ifc[u"model"] == u"Intel-XL710":
639 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
640 elif ifc[u"model"] == u"Intel-X710":
641 ifc[u"name"] = f"TenGigabitEthernet{loc}"
642 elif ifc[u"model"] == u"Intel-X520-DA2":
643 ifc[u"name"] = f"TenGigabitEthernet{loc}"
644 elif ifc[u"model"] == u"Cisco-VIC-1385":
645 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
646 elif ifc[u"model"] == u"Cisco-VIC-1227":
647 ifc[u"name"] = f"TenGigabitEthernet{loc}"
649 ifc[u"name"] = f"UnknownEthernet{loc}"
652 def update_nic_interface_names_on_all_duts(nodes):
653 """Update interface names based on nic type and PCI address on all DUTs.
655 This method updates interface names in the same format as VPP does.
657 :param nodes: Topology nodes.
660 for node in nodes.values():
661 if node[u"type"] == NodeType.DUT:
662 InterfaceUtil.update_nic_interface_names(node)
665 def update_tg_interface_data_on_node(node):
666 """Update interface name for TG/linux node in DICT__nodes.
669 # for dev in `ls /sys/class/net/`;
670 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
671 "52:54:00:9f:82:63": "eth0"
672 "52:54:00:77:ae:a9": "eth1"
673 "52:54:00:e1:8a:0f": "eth2"
674 "00:00:00:00:00:00": "lo"
676 :param node: Node selected from DICT__nodes.
678 :raises RuntimeError: If getting of interface name and MAC fails.
680 # First setup interface driver specified in yaml file
681 InterfaceUtil.tg_set_interfaces_default_driver(node)
683 # Get interface names
687 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
688 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
690 ret_code, stdout, _ = ssh.exec_command(cmd)
691 if int(ret_code) != 0:
692 raise RuntimeError(u"Get interface name and MAC failed")
693 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
695 interfaces = JsonParser().parse_data(tmp)
696 for interface in node[u"interfaces"].values():
697 name = interfaces.get(interface[u"mac_address"])
700 interface[u"name"] = name
703 def iface_update_numa_node(node):
704 """For all interfaces from topology file update numa node based on
705 information from the node.
707 :param node: Node from topology.
710 :raises ValueError: If numa node ia less than 0.
711 :raises RuntimeError: If update of numa node failed.
714 for if_key in Topology.get_node_interfaces(node):
715 if_pci = Topology.get_interface_pci_addr(node, if_key)
717 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
719 ret, out, _ = ssh.exec_command(cmd)
722 numa_node = 0 if int(out) < 0 else int(out)
725 f"Reading numa location failed for: {if_pci}"
728 Topology.set_interface_numa_node(
729 node, if_key, numa_node
733 raise RuntimeError(f"Update numa node failed for: {if_pci}")
736 def update_all_interface_data_on_all_nodes(
737 nodes, skip_tg=False, skip_vpp=False):
738 """Update interface names on all nodes in DICT__nodes.
740 This method updates the topology dictionary by querying interface lists
741 of all nodes mentioned in the topology dictionary.
743 :param nodes: Nodes in the topology.
744 :param skip_tg: Skip TG node.
745 :param skip_vpp: Skip VPP node.
750 for node in nodes.values():
751 if node[u"type"] == NodeType.DUT and not skip_vpp:
752 InterfaceUtil.update_vpp_interface_data_on_node(node)
753 elif node[u"type"] == NodeType.TG and not skip_tg:
754 InterfaceUtil.update_tg_interface_data_on_node(node)
755 InterfaceUtil.iface_update_numa_node(node)
758 def create_vlan_subinterface(node, interface, vlan):
759 """Create VLAN sub-interface on node.
761 :param node: Node to add VLAN subinterface on.
762 :param interface: Interface name or index on which create VLAN
764 :param vlan: VLAN ID of the subinterface to be created.
766 :type interface: str on int
768 :returns: Name and index of created subinterface.
770 :raises RuntimeError: if it is unable to create VLAN subinterface on the
771 node or interface cannot be converted.
773 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
775 cmd = u"create_vlan_subif"
777 sw_if_index=sw_if_index,
780 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
782 with PapiSocketExecutor(node) as papi_exec:
783 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
785 if_key = Topology.add_new_port(node, u"vlan_subif")
786 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
787 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
788 Topology.update_interface_name(node, if_key, ifc_name)
790 return f"{interface}.{vlan}", sw_if_index
793 def create_vxlan_interface(node, vni, source_ip, destination_ip):
794 """Create VXLAN interface and return sw if index of created interface.
796 :param node: Node where to create VXLAN interface.
797 :param vni: VXLAN Network Identifier.
798 :param source_ip: Source IP of a VXLAN Tunnel End Point.
799 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
803 :type destination_ip: str
804 :returns: SW IF INDEX of created interface.
806 :raises RuntimeError: if it is unable to create VxLAN interface on the
809 cmd = u"vxlan_add_del_tunnel"
812 instance=Constants.BITWISE_NON_ZERO,
813 src_address=IPAddress.create_ip_address_object(
814 ip_address(source_ip)
816 dst_address=IPAddress.create_ip_address_object(
817 ip_address(destination_ip)
819 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
821 decap_next_index=Constants.BITWISE_NON_ZERO,
824 err_msg = f"Failed to create VXLAN tunnel interface " \
825 f"on host {node[u'host']}"
826 with PapiSocketExecutor(node) as papi_exec:
827 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
829 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
830 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
831 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
832 Topology.update_interface_name(node, if_key, ifc_name)
837 def set_vxlan_bypass(node, interface=None):
838 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
840 By adding the IPv4 vxlan-bypass graph node to an interface, the node
841 checks for and validate input vxlan packet and bypass ip4-lookup,
842 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
843 This node will cause extra overhead to for non-vxlan packets which is
846 :param node: Node where to set VXLAN bypass.
847 :param interface: Numeric index or name string of a specific interface.
849 :type interface: int or str
850 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
852 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
854 cmd = u"sw_interface_set_vxlan_bypass"
857 sw_if_index=sw_if_index,
860 err_msg = f"Failed to set VXLAN bypass on interface " \
861 f"on host {node[u'host']}"
862 with PapiSocketExecutor(node) as papi_exec:
863 papi_exec.add(cmd, **args).get_replies(err_msg)
866 def vxlan_dump(node, interface=None):
867 """Get VxLAN data for the given interface.
869 :param node: VPP node to get interface data from.
870 :param interface: Numeric index or name string of a specific interface.
871 If None, information about all VxLAN interfaces is returned.
873 :type interface: int or str
874 :returns: Dictionary containing data for the given VxLAN interface or if
875 interface=None, the list of dictionaries with all VxLAN interfaces.
877 :raises TypeError: if the data type of interface is neither basestring
880 def process_vxlan_dump(vxlan_dump):
881 """Process vxlan dump.
883 :param vxlan_dump: Vxlan interface dump.
884 :type vxlan_dump: dict
885 :returns: Processed vxlan interface dump.
888 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
889 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
892 if interface is not None:
893 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
895 sw_if_index = int(Constants.BITWISE_NON_ZERO)
897 cmd = u"vxlan_tunnel_dump"
899 sw_if_index=sw_if_index
901 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
903 with PapiSocketExecutor(node) as papi_exec:
904 details = papi_exec.add(cmd, **args).get_details(err_msg)
906 data = list() if interface is None else dict()
908 if interface is None:
909 data.append(process_vxlan_dump(dump))
910 elif dump[u"sw_if_index"] == sw_if_index:
911 data = process_vxlan_dump(dump)
914 logger.debug(f"VXLAN data:\n{data}")
918 def create_subinterface(
919 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
921 """Create sub-interface on node. It is possible to set required
922 sub-interface type and VLAN tag(s).
924 :param node: Node to add sub-interface.
925 :param interface: Interface name on which create sub-interface.
926 :param sub_id: ID of the sub-interface to be created.
927 :param outer_vlan_id: Optional outer VLAN ID.
928 :param inner_vlan_id: Optional inner VLAN ID.
929 :param type_subif: Optional type of sub-interface. Values supported by
930 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
933 :type interface: str or int
935 :type outer_vlan_id: int
936 :type inner_vlan_id: int
937 :type type_subif: str
938 :returns: Name and index of created sub-interface.
940 :raises RuntimeError: If it is not possible to create sub-interface.
942 subif_types = type_subif.split()
945 if u"no_tags" in subif_types:
946 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
947 if u"one_tag" in subif_types:
948 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
949 if u"two_tags" in subif_types:
950 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
951 if u"dot1ad" in subif_types:
952 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
953 if u"exact_match" in subif_types:
954 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
955 if u"default_sub" in subif_types:
956 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
957 if type_subif == u"default_sub":
958 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
959 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
961 cmd = u"create_subif"
963 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
965 sub_if_flags=flags.value if hasattr(flags, u"value")
967 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
968 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
970 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
971 with PapiSocketExecutor(node) as papi_exec:
972 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
974 if_key = Topology.add_new_port(node, u"subinterface")
975 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
976 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
977 Topology.update_interface_name(node, if_key, ifc_name)
979 return f"{interface}.{sub_id}", sw_if_index
982 def create_gre_tunnel_interface(node, source_ip, destination_ip):
983 """Create GRE tunnel interface on node.
985 :param node: VPP node to add tunnel interface.
986 :param source_ip: Source of the GRE tunnel.
987 :param destination_ip: Destination of the GRE tunnel.
990 :type destination_ip: str
991 :returns: Name and index of created GRE tunnel interface.
993 :raises RuntimeError: If unable to create GRE tunnel interface.
995 cmd = u"gre_tunnel_add_del"
998 instance=Constants.BITWISE_NON_ZERO,
1000 dst=str(destination_ip),
1008 err_msg = f"Failed to create GRE tunnel interface " \
1009 f"on host {node[u'host']}"
1010 with PapiSocketExecutor(node) as papi_exec:
1011 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1013 if_key = Topology.add_new_port(node, u"gre_tunnel")
1014 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1015 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1016 Topology.update_interface_name(node, if_key, ifc_name)
1018 return ifc_name, sw_if_index
1021 def vpp_create_loopback(node, mac=None):
1022 """Create loopback interface on VPP node.
1024 :param node: Node to create loopback interface on.
1025 :param mac: Optional MAC address for loopback interface.
1028 :returns: SW interface index.
1030 :raises RuntimeError: If it is not possible to create loopback on the
1033 cmd = u"create_loopback"
1035 mac_address=L2Util.mac_to_bin(mac) if mac else 0
1037 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1038 with PapiSocketExecutor(node) as papi_exec:
1039 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1041 if_key = Topology.add_new_port(node, u"loopback")
1042 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1043 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1044 Topology.update_interface_name(node, if_key, ifc_name)
1046 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1047 Topology.update_interface_mac_address(node, if_key, mac)
1052 def vpp_create_bond_interface(
1053 node, mode, load_balance=None, mac=None, gso=False):
1054 """Create bond interface on VPP node.
1056 :param node: DUT node from topology.
1057 :param mode: Link bonding mode.
1058 :param load_balance: Load balance (optional, valid for xor and lacp
1059 modes, otherwise ignored). Default: None.
1060 :param mac: MAC address to assign to the bond interface (optional).
1062 :param gso: Enable GSO support (optional). Default: False.
1065 :type load_balance: str
1068 :returns: Interface key (name) in topology.
1070 :raises RuntimeError: If it is not possible to create bond interface on
1073 cmd = u"bond_create2"
1075 id=int(Constants.BITWISE_NON_ZERO),
1076 use_custom_mac=bool(mac is not None),
1077 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1080 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1082 lb=0 if load_balance is None else getattr(
1083 LinkBondLoadBalanceAlgo,
1084 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1089 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1090 with PapiSocketExecutor(node) as papi_exec:
1091 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1093 InterfaceUtil.add_eth_interface(
1094 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1096 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1101 def add_eth_interface(
1102 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1104 """Add ethernet interface to current topology.
1106 :param node: DUT node from topology.
1107 :param ifc_name: Name of the interface.
1108 :param sw_if_index: SW interface index.
1109 :param ifc_pfx: Interface key prefix.
1110 :param host_if_key: Host interface key from topology file.
1113 :type sw_if_index: int
1115 :type host_if_key: str
1117 if_key = Topology.add_new_port(node, ifc_pfx)
1119 if ifc_name and sw_if_index is None:
1120 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1122 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1123 if sw_if_index and ifc_name is None:
1124 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1125 Topology.update_interface_name(node, if_key, ifc_name)
1126 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1127 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1128 if host_if_key is not None:
1129 Topology.set_interface_numa_node(
1130 node, if_key, Topology.get_interface_numa_node(
1134 Topology.update_interface_pci_address(
1135 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1139 def vpp_create_avf_interface(
1140 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1141 """Create AVF interface on VPP node.
1143 :param node: DUT node from topology.
1144 :param if_key: Interface key from topology file of interface
1145 to be bound to i40evf driver.
1146 :param num_rx_queues: Number of RX queues.
1147 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1148 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1151 :type num_rx_queues: int
1154 :returns: AVF interface key (name) in topology.
1156 :raises RuntimeError: If it is not possible to create AVF interface on
1159 PapiSocketExecutor.run_cli_cmd(
1160 node, u"set logging class avf level debug"
1164 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1166 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1168 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1172 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1173 with PapiSocketExecutor(node) as papi_exec:
1174 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1176 InterfaceUtil.add_eth_interface(
1177 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1181 return Topology.get_interface_by_sw_index(node, sw_if_index)
1184 def vpp_create_rdma_interface(
1185 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1187 """Create RDMA interface on VPP node.
1189 :param node: DUT node from topology.
1190 :param if_key: Physical interface key from topology file of interface
1191 to be bound to rdma-core driver.
1192 :param num_rx_queues: Number of RX queues.
1193 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1194 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1195 :param mode: RDMA interface mode - auto/ibv/dv.
1198 :type num_rx_queues: int
1202 :returns: Interface key (name) in topology file.
1204 :raises RuntimeError: If it is not possible to create RDMA interface on
1207 PapiSocketExecutor.run_cli_cmd(
1208 node, u"set logging class rdma level debug"
1211 cmd = u"rdma_create"
1212 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1214 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1215 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1216 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1219 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1221 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1222 with PapiSocketExecutor(node) as papi_exec:
1223 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1225 InterfaceUtil.vpp_set_interface_mac(
1226 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1228 InterfaceUtil.add_eth_interface(
1229 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1233 return Topology.get_interface_by_sw_index(node, sw_if_index)
1236 def vpp_add_bond_member(node, interface, bond_if):
1237 """Add member interface to bond interface on VPP node.
1239 :param node: DUT node from topology.
1240 :param interface: Physical interface key from topology file.
1241 :param bond_if: Load balance
1243 :type interface: str
1245 :raises RuntimeError: If it is not possible to add member to bond
1246 interface on the node.
1248 cmd = u"bond_add_member"
1250 sw_if_index=Topology.get_interface_sw_index(node, interface),
1251 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1253 is_long_timeout=False
1255 err_msg = f"Failed to add member {interface} to bond interface " \
1256 f"{bond_if} on host {node[u'host']}"
1257 with PapiSocketExecutor(node) as papi_exec:
1258 papi_exec.add(cmd, **args).get_reply(err_msg)
1261 def vpp_show_bond_data_on_node(node, verbose=False):
1262 """Show (detailed) bond information on VPP node.
1264 :param node: DUT node from topology.
1265 :param verbose: If detailed information is required or not.
1269 cmd = u"sw_bond_interface_dump"
1270 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1272 data = f"Bond data on node {node[u'host']}:\n"
1273 with PapiSocketExecutor(node) as papi_exec:
1274 details = papi_exec.add(cmd).get_details(err_msg)
1276 for bond in details:
1277 data += f"{bond[u'interface_name']}\n"
1278 data += u" mode: {m}\n".format(
1279 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1281 data += u" load balance: {lb}\n".format(
1282 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1284 data += f" number of active members: {bond[u'active_members']}\n"
1286 member_data = InterfaceUtil.vpp_bond_member_dump(
1287 node, Topology.get_interface_by_sw_index(
1288 node, bond[u"sw_if_index"]
1291 for member in member_data:
1292 if not member[u"is_passive"]:
1293 data += f" {member[u'interface_name']}\n"
1294 data += f" number of members: {bond[u'members']}\n"
1296 for member in member_data:
1297 data += f" {member[u'interface_name']}\n"
1298 data += f" interface id: {bond[u'id']}\n"
1299 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1303 def vpp_bond_member_dump(node, interface):
1304 """Get bond interface slave(s) data on VPP node.
1306 :param node: DUT node from topology.
1307 :param interface: Physical interface key from topology file.
1309 :type interface: str
1310 :returns: Bond slave interface data.
1313 cmd = u"sw_member_interface_dump"
1315 sw_if_index=Topology.get_interface_sw_index(node, interface)
1317 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1319 with PapiSocketExecutor(node) as papi_exec:
1320 details = papi_exec.add(cmd, **args).get_details(err_msg)
1322 logger.debug(f"Member data:\n{details}")
1326 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1327 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1329 :param nodes: Nodes in the topology.
1330 :param verbose: If detailed information is required or not.
1334 for node_data in nodes.values():
1335 if node_data[u"type"] == NodeType.DUT:
1336 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1339 def vpp_enable_input_acl_interface(
1340 node, interface, ip_version, table_index):
1341 """Enable input acl on interface.
1343 :param node: VPP node to setup interface for input acl.
1344 :param interface: Interface to setup input acl.
1345 :param ip_version: Version of IP protocol.
1346 :param table_index: Classify table index.
1348 :type interface: str or int
1349 :type ip_version: str
1350 :type table_index: int
1352 cmd = u"input_acl_set_interface"
1354 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1355 ip4_table_index=table_index if ip_version == u"ip4"
1356 else Constants.BITWISE_NON_ZERO,
1357 ip6_table_index=table_index if ip_version == u"ip6"
1358 else Constants.BITWISE_NON_ZERO,
1359 l2_table_index=table_index if ip_version == u"l2"
1360 else Constants.BITWISE_NON_ZERO,
1362 err_msg = f"Failed to enable input acl on interface {interface}"
1363 with PapiSocketExecutor(node) as papi_exec:
1364 papi_exec.add(cmd, **args).get_reply(err_msg)
1367 def get_interface_classify_table(node, interface):
1368 """Get name of classify table for the given interface.
1370 TODO: Move to Classify.py.
1372 :param node: VPP node to get data from.
1373 :param interface: Name or sw_if_index of a specific interface.
1375 :type interface: str or int
1376 :returns: Classify table name.
1379 if isinstance(interface, str):
1380 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1382 sw_if_index = interface
1384 cmd = u"classify_table_by_interface"
1386 sw_if_index=sw_if_index
1388 err_msg = f"Failed to get classify table name by interface {interface}"
1389 with PapiSocketExecutor(node) as papi_exec:
1390 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1395 def get_sw_if_index(node, interface_name):
1396 """Get sw_if_index for the given interface from actual interface dump.
1398 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1400 :param node: VPP node to get interface data from.
1401 :param interface_name: Name of the specific interface.
1403 :type interface_name: str
1404 :returns: sw_if_index of the given interface.
1407 interface_data = InterfaceUtil.vpp_get_interface_data(
1408 node, interface=interface_name
1410 return interface_data.get(u"sw_if_index")
1413 def vxlan_gpe_dump(node, interface_name=None):
1414 """Get VxLAN GPE data for the given interface.
1416 :param node: VPP node to get interface data from.
1417 :param interface_name: Name of the specific interface. If None,
1418 information about all VxLAN GPE interfaces is returned.
1420 :type interface_name: str
1421 :returns: Dictionary containing data for the given VxLAN GPE interface
1422 or if interface=None, the list of dictionaries with all VxLAN GPE
1424 :rtype: dict or list
1426 def process_vxlan_gpe_dump(vxlan_dump):
1427 """Process vxlan_gpe dump.
1429 :param vxlan_dump: Vxlan_gpe nterface dump.
1430 :type vxlan_dump: dict
1431 :returns: Processed vxlan_gpe interface dump.
1434 if vxlan_dump[u"is_ipv6"]:
1435 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1436 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1438 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1439 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1442 if interface_name is not None:
1443 sw_if_index = InterfaceUtil.get_interface_index(
1444 node, interface_name
1447 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1449 cmd = u"vxlan_gpe_tunnel_dump"
1451 sw_if_index=sw_if_index
1453 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1454 with PapiSocketExecutor(node) as papi_exec:
1455 details = papi_exec.add(cmd, **args).get_details(err_msg)
1457 data = list() if interface_name is None else dict()
1458 for dump in details:
1459 if interface_name is None:
1460 data.append(process_vxlan_gpe_dump(dump))
1461 elif dump[u"sw_if_index"] == sw_if_index:
1462 data = process_vxlan_gpe_dump(dump)
1465 logger.debug(f"VXLAN-GPE data:\n{data}")
1469 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1470 """Assign VPP interface to specific VRF/FIB table.
1472 :param node: VPP node where the FIB and interface are located.
1473 :param interface: Interface to be assigned to FIB.
1474 :param table_id: VRF table ID.
1475 :param ipv6: Assign to IPv6 table. Default False.
1477 :type interface: str or int
1481 cmd = u"sw_interface_set_table"
1483 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1485 vrf_id=int(table_id)
1487 err_msg = f"Failed to assign interface {interface} to FIB table"
1488 with PapiSocketExecutor(node) as papi_exec:
1489 papi_exec.add(cmd, **args).get_reply(err_msg)
1492 def set_linux_interface_mac(
1493 node, interface, mac, namespace=None, vf_id=None):
1494 """Set MAC address for interface in linux.
1496 :param node: Node where to execute command.
1497 :param interface: Interface in namespace.
1498 :param mac: MAC to be assigned to interface.
1499 :param namespace: Execute command in namespace. Optional
1500 :param vf_id: Virtual Function id. Optional
1502 :type interface: str
1504 :type namespace: str
1507 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1508 else f"address {mac}"
1509 ns_str = f"ip netns exec {namespace}" if namespace else u""
1511 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1512 exec_cmd_no_error(node, cmd, sudo=True)
1515 def set_linux_interface_trust_on(
1516 node, interface, namespace=None, vf_id=None):
1517 """Set trust on (promisc) for interface in linux.
1519 :param node: Node where to execute command.
1520 :param interface: Interface in namespace.
1521 :param namespace: Execute command in namespace. Optional
1522 :param vf_id: Virtual Function id. Optional
1524 :type interface: str
1525 :type namespace: str
1528 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1529 ns_str = f"ip netns exec {namespace}" if namespace else u""
1531 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1532 exec_cmd_no_error(node, cmd, sudo=True)
1535 def set_linux_interface_spoof_off(
1536 node, interface, namespace=None, vf_id=None):
1537 """Set spoof off for interface in linux.
1539 :param node: Node where to execute command.
1540 :param interface: Interface in namespace.
1541 :param namespace: Execute command in namespace. Optional
1542 :param vf_id: Virtual Function id. Optional
1544 :type interface: str
1545 :type namespace: str
1548 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1550 ns_str = f"ip netns exec {namespace}" if namespace else u""
1552 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1553 exec_cmd_no_error(node, cmd, sudo=True)
1556 def set_linux_interface_state(
1557 node, interface, namespace=None, state=u"up"):
1558 """Set operational state for interface in linux.
1560 :param node: Node where to execute command.
1561 :param interface: Interface in namespace.
1562 :param namespace: Execute command in namespace. Optional
1563 :param state: Up/Down.
1565 :type interface: str
1566 :type namespace: str
1569 ns_str = f"ip netns exec {namespace}" if namespace else u""
1571 cmd = f"{ns_str} ip link set dev {interface} {state}"
1572 exec_cmd_no_error(node, cmd, sudo=True)
1575 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
1576 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1577 driver testing on DUT.
1579 :param node: DUT node.
1580 :param ifc_key: Interface key from topology file.
1581 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1582 :param osi_layer: OSI Layer type to initialize TG with.
1583 Default value "L2" sets linux interface spoof off.
1587 :type osi_layer: str
1588 :returns: Virtual Function topology interface keys.
1590 :raises RuntimeError: If a reason preventing initialization is found.
1592 # Read PCI address and driver.
1593 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1594 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1595 uio_driver = Topology.get_uio_driver(node)
1596 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1597 if kernel_driver not in (u"ice", u"iavf", u"i40e", u"i40evf"):
1599 f"AVF needs ice or i40e compatible driver, not {kernel_driver}"
1600 f"at node {node[u'host']} ifc {ifc_key}"
1602 current_driver = DUTSetup.get_pci_dev_driver(
1603 node, pf_pci_addr.replace(u":", r"\:"))
1605 VPPUtil.stop_vpp_service(node)
1606 if current_driver != kernel_driver:
1607 # PCI device must be re-bound to kernel driver before creating VFs.
1608 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1609 # Stop VPP to prevent deadlock.
1610 # Unbind from current driver.
1611 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1612 # Bind to kernel driver.
1613 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1615 # Initialize PCI VFs.
1616 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1619 # Set MAC address and bind each virtual function to uio driver.
1620 for vf_id in range(numvfs):
1621 vf_mac_addr = u":".join(
1622 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1623 pf_mac_addr[5], f"{vf_id:02x}"
1627 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1628 InterfaceUtil.set_linux_interface_trust_on(
1629 node, pf_dev, vf_id=vf_id
1631 if osi_layer == u"L2":
1632 InterfaceUtil.set_linux_interface_spoof_off(
1633 node, pf_dev, vf_id=vf_id
1635 InterfaceUtil.set_linux_interface_mac(
1636 node, pf_dev, vf_mac_addr, vf_id=vf_id
1638 InterfaceUtil.set_linux_interface_state(
1639 node, pf_dev, state=u"up"
1642 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1643 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1645 # Add newly created ports into topology file
1646 vf_ifc_name = f"{ifc_key}_vif"
1647 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1648 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1649 Topology.update_interface_name(
1650 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1652 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1653 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1654 Topology.set_interface_numa_node(
1655 node, vf_ifc_key, Topology.get_interface_numa_node(
1659 vf_ifc_keys.append(vf_ifc_key)
1664 def vpp_sw_interface_rx_placement_dump(node):
1665 """Dump VPP interface RX placement on node.
1667 :param node: Node to run command on.
1669 :returns: Thread mapping information as a list of dictionaries.
1672 cmd = u"sw_interface_rx_placement_dump"
1673 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1674 with PapiSocketExecutor(node) as papi_exec:
1675 for ifc in node[u"interfaces"].values():
1676 if ifc[u"vpp_sw_index"] is not None:
1677 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1678 details = papi_exec.get_details(err_msg)
1679 return sorted(details, key=lambda k: k[u"sw_if_index"])
1682 def vpp_sw_interface_set_rx_placement(
1683 node, sw_if_index, queue_id, worker_id):
1684 """Set interface RX placement to worker on node.
1686 :param node: Node to run command on.
1687 :param sw_if_index: VPP SW interface index.
1688 :param queue_id: VPP interface queue ID.
1689 :param worker_id: VPP worker ID (indexing from 0).
1691 :type sw_if_index: int
1693 :type worker_id: int
1694 :raises RuntimeError: If failed to run command on host or if no API
1697 cmd = u"sw_interface_set_rx_placement"
1698 err_msg = f"Failed to set interface RX placement to worker " \
1699 f"on host {node[u'host']}!"
1701 sw_if_index=sw_if_index,
1703 worker_id=worker_id,
1706 with PapiSocketExecutor(node) as papi_exec:
1707 papi_exec.add(cmd, **args).get_reply(err_msg)
1710 def vpp_round_robin_rx_placement(
1711 node, prefix, dp_worker_limit=None
1713 """Set Round Robin interface RX placement on all worker threads
1716 If specified, dp_core_limit limits the number of physical cores used
1717 for data plane I/O work. Other cores are presumed to do something else,
1718 e.g. asynchronous crypto processing.
1719 None means all workers are used for data plane work.
1720 Note this keyword specifies workers, not cores.
1722 :param node: Topology nodes.
1723 :param prefix: Interface name prefix.
1724 :param dp_worker_limit: How many cores for data plane work.
1727 :type dp_worker_limit: Optional[int]
1730 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1731 if dp_worker_limit is not None:
1732 worker_cnt = min(worker_cnt, dp_worker_limit)
1735 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1736 for interface in node[u"interfaces"].values():
1737 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1738 and prefix in interface[u"name"]:
1739 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1740 node, placement[u"sw_if_index"], placement[u"queue_id"],
1741 worker_id % worker_cnt
1746 def vpp_round_robin_rx_placement_on_all_duts(
1747 nodes, prefix, dp_core_limit=None
1749 """Set Round Robin interface RX placement on all worker threads
1752 If specified, dp_core_limit limits the number of physical cores used
1753 for data plane I/O work. Other cores are presumed to do something else,
1754 e.g. asynchronous crypto processing.
1755 None means all cores are used for data plane work.
1756 Note this keyword specifies cores, not workers.
1758 :param nodes: Topology nodes.
1759 :param prefix: Interface name prefix.
1760 :param dp_worker_limit: How many cores for data plane work.
1763 :type dp_worker_limit: Optional[int]
1765 for node in nodes.values():
1766 if node[u"type"] == NodeType.DUT:
1767 dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
1768 phy_cores=dp_core_limit,
1769 smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
1771 InterfaceUtil.vpp_round_robin_rx_placement(
1772 node, prefix, dp_worker_limit