1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
215 if sw_if_index is None:
217 f"Interface index for {interface} not assigned by VPP."
220 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
221 elif state == u"down":
224 raise ValueError(f"Unexpected interface state: {state}")
225 cmd = u"sw_interface_set_flags"
226 err_msg = f"Failed to set interface state on host {node[u'host']}"
228 sw_if_index=int(sw_if_index),
231 with PapiSocketExecutor(node) as papi_exec:
232 papi_exec.add(cmd, **args).get_reply(err_msg)
233 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
234 cmd = f"ip link set {iface_name} {state}"
235 exec_cmd_no_error(node, cmd, sudo=True)
238 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
242 def set_interface_state_pci(
243 node, pf_pcis, namespace=None, state=u"up"):
244 """Set operational state for interface specified by PCI address.
246 :param node: Topology node.
247 :param pf_pcis: List of node's interfaces PCI addresses.
248 :param namespace: Exec command in namespace. (Optional, Default: none)
249 :param state: Up/Down. (Optional, default: up)
255 for pf_pci in pf_pcis:
256 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
257 InterfaceUtil.set_linux_interface_state(
258 node, pf_eth, namespace=namespace, state=state
262 def set_interface_mtu(node, pf_pcis, mtu=9200):
263 """Set Ethernet MTU for specified interfaces.
265 :param node: Topology node.
266 :param pf_pcis: List of node's interfaces PCI addresses.
267 :param mtu: MTU to set. Default: 9200.
271 :raises RuntimeError: If failed to set MTU on interface.
273 for pf_pci in pf_pcis:
274 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
275 cmd = f"ip link set {pf_eth} mtu {mtu}"
276 exec_cmd_no_error(node, cmd, sudo=True)
279 def set_interface_channels(
280 node, pf_pcis, num_queues=1, channel=u"combined"):
281 """Set interface channels for specified interfaces.
283 :param node: Topology node.
284 :param pf_pcis: List of node's interfaces PCI addresses.
285 :param num_queues: Number of channels. (Optional, Default: 1)
286 :param channel: Channel type. (Optional, Default: combined)
289 :type num_queues: int
292 for pf_pci in pf_pcis:
293 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
294 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
295 exec_cmd_no_error(node, cmd, sudo=True)
298 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
299 """Set Ethernet flow control for specified interfaces.
301 :param node: Topology node.
302 :param pf_pcis: List of node's interfaces PCI addresses.
303 :param rxf: RX flow. (Optional, Default: off).
304 :param txf: TX flow. (Optional, Default: off).
310 for pf_pci in pf_pcis:
311 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
312 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
313 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
314 if int(ret_code) not in (0, 78):
315 raise RuntimeError("Failed to set flow control on {pf_eth}!")
318 def set_pci_parameter(node, pf_pcis, key, value):
319 """Set PCI parameter for specified interfaces.
321 :param node: Topology node.
322 :param pf_pcis: List of node's interfaces PCI addresses.
323 :param key: Key to set.
324 :param value: Value to set.
330 for pf_pci in pf_pcis:
331 cmd = f"setpci -s {pf_pci} {key}={value}"
332 exec_cmd_no_error(node, cmd, sudo=True)
335 def vpp_set_interface_mtu(node, interface, mtu=9200):
336 """Set Ethernet MTU on interface.
338 :param node: VPP node.
339 :param interface: Interface to setup MTU. Default: 9200.
340 :param mtu: Ethernet MTU size in Bytes.
342 :type interface: str or int
345 if isinstance(interface, str):
346 sw_if_index = Topology.get_interface_sw_index(node, interface)
348 sw_if_index = interface
350 cmd = u"hw_interface_set_mtu"
351 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
353 sw_if_index=sw_if_index,
357 with PapiSocketExecutor(node) as papi_exec:
358 papi_exec.add(cmd, **args).get_reply(err_msg)
359 except AssertionError as err:
360 logger.debug(f"Setting MTU failed.\n{err}")
363 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
364 """Set Ethernet MTU on all interfaces.
366 :param node: VPP node.
367 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
371 for interface in node[u"interfaces"]:
372 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
375 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
376 """Set Ethernet MTU on all interfaces on all DUTs.
378 :param nodes: VPP nodes.
379 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
383 for node in nodes.values():
384 if node[u"type"] == NodeType.DUT:
385 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
388 def vpp_node_interfaces_ready_wait(node, retries=15):
389 """Wait until all interfaces with admin-up are in link-up state.
391 :param node: Node to wait on.
392 :param retries: Number of retries to check interface status (optional,
397 :raises RuntimeError: If any interface is not in link-up state after
398 defined number of retries.
400 for _ in range(0, retries):
402 out = InterfaceUtil.vpp_get_interface_data(node)
403 for interface in out:
404 if interface.get(u"flags") == 1:
405 not_ready.append(interface.get(u"interface_name"))
408 f"Interfaces still not in link-up state:\n{not_ready}"
414 err = f"Timeout, interfaces not up:\n{not_ready}" \
415 if u"not_ready" in locals() else u"No check executed!"
416 raise RuntimeError(err)
419 def all_vpp_interfaces_ready_wait(nodes, retries=15):
420 """Wait until all interfaces with admin-up are in link-up state for all
421 nodes in the topology.
423 :param nodes: Nodes in the topology.
424 :param retries: Number of retries to check interface status (optional,
430 for node in nodes.values():
431 if node[u"type"] == NodeType.DUT:
432 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
435 def vpp_get_interface_data(node, interface=None):
436 """Get all interface data from a VPP node. If a name or
437 sw_interface_index is provided, return only data for the matching
440 :param node: VPP node to get interface data from.
441 :param interface: Numeric index or name string of a specific interface.
443 :type interface: int or str
444 :returns: List of dictionaries containing data for each interface, or a
445 single dictionary for the specified interface.
447 :raises TypeError: if the data type of interface is neither basestring
450 def process_if_dump(if_dump):
451 """Process interface dump.
453 :param if_dump: Interface dump.
455 :returns: Processed interface dump.
458 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
459 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
460 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
461 if_dump[u"flags"] = if_dump[u"flags"].value
462 if_dump[u"type"] = if_dump[u"type"].value
463 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
464 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
465 if hasattr(if_dump[u"sub_if_flags"], u"value") \
466 else int(if_dump[u"sub_if_flags"])
470 if interface is not None:
471 if isinstance(interface, str):
472 param = u"interface_name"
473 elif isinstance(interface, int):
474 param = u"sw_if_index"
476 raise TypeError(f"Wrong interface format {interface}")
480 cmd = u"sw_interface_dump"
482 name_filter_valid=False,
485 err_msg = f"Failed to get interface dump on host {node[u'host']}"
487 with PapiSocketExecutor(node) as papi_exec:
488 details = papi_exec.add(cmd, **args).get_details(err_msg)
489 logger.debug(f"Received data:\n{details!r}")
491 data = list() if interface is None else dict()
493 if interface is None:
494 data.append(process_if_dump(dump))
495 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
496 data = process_if_dump(dump)
499 logger.debug(f"Interface data:\n{data}")
503 def vpp_get_interface_name(node, sw_if_index):
504 """Get interface name for the given SW interface index from actual
507 :param node: VPP node to get interface data from.
508 :param sw_if_index: SW interface index of the specific interface.
510 :type sw_if_index: int
511 :returns: Name of the given interface.
514 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
515 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
516 if_data = InterfaceUtil.vpp_get_interface_data(
517 node, if_data[u"sup_sw_if_index"]
520 return if_data.get(u"interface_name")
523 def vpp_get_interface_sw_index(node, interface_name):
524 """Get interface name for the given SW interface index from actual
527 :param node: VPP node to get interface data from.
528 :param interface_name: Interface name.
530 :type interface_name: str
531 :returns: Name of the given interface.
534 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
536 return if_data.get(u"sw_if_index")
539 def vpp_get_interface_mac(node, interface):
540 """Get MAC address for the given interface from actual interface dump.
542 :param node: VPP node to get interface data from.
543 :param interface: Numeric index or name string of a specific interface.
545 :type interface: int or str
546 :returns: MAC address.
549 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
550 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
551 if_data = InterfaceUtil.vpp_get_interface_data(
552 node, if_data[u"sup_sw_if_index"])
554 return if_data.get(u"l2_address")
557 def vpp_set_interface_mac(node, interface, mac):
558 """Set MAC address for the given interface.
560 :param node: VPP node to set interface MAC.
561 :param interface: Numeric index or name string of a specific interface.
562 :param mac: Required MAC address.
564 :type interface: int or str
567 cmd = u"sw_interface_set_mac_address"
569 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
570 mac_address=L2Util.mac_to_bin(mac)
572 err_msg = f"Failed to set MAC address of interface {interface}" \
573 f"on host {node[u'host']}"
574 with PapiSocketExecutor(node) as papi_exec:
575 papi_exec.add(cmd, **args).get_reply(err_msg)
578 def tg_set_interface_driver(node, pci_addr, driver):
579 """Set interface driver on the TG node.
581 :param node: Node to set interface driver on (must be TG node).
582 :param pci_addr: PCI address of the interface.
583 :param driver: Driver name.
587 :raises RuntimeError: If unbinding from the current driver fails.
588 :raises RuntimeError: If binding to the new driver fails.
590 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
591 if old_driver == driver:
597 # Unbind from current driver
598 if old_driver is not None:
599 cmd = f"sh -c \"echo {pci_addr} > " \
600 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
601 ret_code, _, _ = ssh.exec_command_sudo(cmd)
602 if int(ret_code) != 0:
603 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
605 # Bind to the new driver
606 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
607 ret_code, _, _ = ssh.exec_command_sudo(cmd)
608 if int(ret_code) != 0:
609 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
612 def tg_get_interface_driver(node, pci_addr):
613 """Get interface driver from the TG node.
615 :param node: Node to get interface driver on (must be TG node).
616 :param pci_addr: PCI address of the interface.
619 :returns: Interface driver or None if not found.
621 :raises RuntimeError: If PCI rescan or lspci command execution failed.
623 return DUTSetup.get_pci_dev_driver(node, pci_addr)
626 def tg_set_interfaces_default_driver(node):
627 """Set interfaces default driver specified in topology yaml file.
629 :param node: Node to setup interfaces driver on (must be TG node).
632 for interface in node[u"interfaces"].values():
633 InterfaceUtil.tg_set_interface_driver(
634 node, interface[u"pci_address"], interface[u"driver"]
638 def update_vpp_interface_data_on_node(node):
639 """Update vpp generated interface data for a given node in DICT__nodes.
641 Updates interface names, software if index numbers and any other details
642 generated specifically by vpp that are unknown before testcase run.
643 It does this by dumping interface list from all devices using python
644 api, and pairing known information from topology (mac address) to state
647 :param node: Node selected from DICT__nodes.
650 interface_list = InterfaceUtil.vpp_get_interface_data(node)
651 interface_dict = dict()
652 for ifc in interface_list:
653 interface_dict[ifc[u"l2_address"]] = ifc
655 for if_name, if_data in node[u"interfaces"].items():
656 ifc_dict = interface_dict.get(if_data[u"mac_address"])
657 if ifc_dict is not None:
658 if_data[u"name"] = ifc_dict[u"interface_name"]
659 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
660 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
662 f"Interface {if_name} found by MAC "
663 f"{if_data[u'mac_address']}"
667 f"Interface {if_name} not found by MAC "
668 f"{if_data[u'mac_address']}"
670 if_data[u"vpp_sw_index"] = None
673 def update_nic_interface_names(node):
674 """Update interface names based on nic type and PCI address.
676 This method updates interface names in the same format as VPP does.
678 :param node: Node dictionary.
681 for ifc in node[u"interfaces"].values():
682 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
683 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
684 f"{int(if_pci[3], 16):x}"
685 if ifc[u"model"] == u"Intel-XL710":
686 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
687 elif ifc[u"model"] == u"Intel-X710":
688 ifc[u"name"] = f"TenGigabitEthernet{loc}"
689 elif ifc[u"model"] == u"Intel-X520-DA2":
690 ifc[u"name"] = f"TenGigabitEthernet{loc}"
691 elif ifc[u"model"] == u"Cisco-VIC-1385":
692 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
693 elif ifc[u"model"] == u"Cisco-VIC-1227":
694 ifc[u"name"] = f"TenGigabitEthernet{loc}"
696 ifc[u"name"] = f"UnknownEthernet{loc}"
699 def update_nic_interface_names_on_all_duts(nodes):
700 """Update interface names based on nic type and PCI address on all DUTs.
702 This method updates interface names in the same format as VPP does.
704 :param nodes: Topology nodes.
707 for node in nodes.values():
708 if node[u"type"] == NodeType.DUT:
709 InterfaceUtil.update_nic_interface_names(node)
712 def update_tg_interface_data_on_node(node):
713 """Update interface name for TG/linux node in DICT__nodes.
716 # for dev in `ls /sys/class/net/`;
717 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
718 "52:54:00:9f:82:63": "eth0"
719 "52:54:00:77:ae:a9": "eth1"
720 "52:54:00:e1:8a:0f": "eth2"
721 "00:00:00:00:00:00": "lo"
723 :param node: Node selected from DICT__nodes.
725 :raises RuntimeError: If getting of interface name and MAC fails.
727 # First setup interface driver specified in yaml file
728 InterfaceUtil.tg_set_interfaces_default_driver(node)
730 # Get interface names
734 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
735 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
737 ret_code, stdout, _ = ssh.exec_command(cmd)
738 if int(ret_code) != 0:
739 raise RuntimeError(u"Get interface name and MAC failed")
740 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
742 interfaces = JsonParser().parse_data(tmp)
743 for interface in node[u"interfaces"].values():
744 name = interfaces.get(interface[u"mac_address"])
747 interface[u"name"] = name
750 def iface_update_numa_node(node):
751 """For all interfaces from topology file update numa node based on
752 information from the node.
754 :param node: Node from topology.
757 :raises ValueError: If numa node ia less than 0.
758 :raises RuntimeError: If update of numa node failed.
761 for if_key in Topology.get_node_interfaces(node):
762 if_pci = Topology.get_interface_pci_addr(node, if_key)
764 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
766 ret, out, _ = ssh.exec_command(cmd)
769 numa_node = 0 if int(out) < 0 else int(out)
772 f"Reading numa location failed for: {if_pci}"
775 Topology.set_interface_numa_node(
776 node, if_key, numa_node
780 raise RuntimeError(f"Update numa node failed for: {if_pci}")
783 def update_all_interface_data_on_all_nodes(
784 nodes, skip_tg=False, skip_vpp=False):
785 """Update interface names on all nodes in DICT__nodes.
787 This method updates the topology dictionary by querying interface lists
788 of all nodes mentioned in the topology dictionary.
790 :param nodes: Nodes in the topology.
791 :param skip_tg: Skip TG node.
792 :param skip_vpp: Skip VPP node.
797 for node in nodes.values():
798 if node[u"type"] == NodeType.DUT and not skip_vpp:
799 InterfaceUtil.update_vpp_interface_data_on_node(node)
800 elif node[u"type"] == NodeType.TG and not skip_tg:
801 InterfaceUtil.update_tg_interface_data_on_node(node)
802 InterfaceUtil.iface_update_numa_node(node)
805 def create_vlan_subinterface(node, interface, vlan):
806 """Create VLAN sub-interface on node.
808 :param node: Node to add VLAN subinterface on.
809 :param interface: Interface name or index on which create VLAN
811 :param vlan: VLAN ID of the subinterface to be created.
813 :type interface: str on int
815 :returns: Name and index of created subinterface.
817 :raises RuntimeError: if it is unable to create VLAN subinterface on the
818 node or interface cannot be converted.
820 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
822 cmd = u"create_vlan_subif"
824 sw_if_index=sw_if_index,
827 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
829 with PapiSocketExecutor(node) as papi_exec:
830 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
832 if_key = Topology.add_new_port(node, u"vlan_subif")
833 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
834 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
835 Topology.update_interface_name(node, if_key, ifc_name)
837 return f"{interface}.{vlan}", sw_if_index
840 def create_vxlan_interface(node, vni, source_ip, destination_ip):
841 """Create VXLAN interface and return sw if index of created interface.
843 :param node: Node where to create VXLAN interface.
844 :param vni: VXLAN Network Identifier.
845 :param source_ip: Source IP of a VXLAN Tunnel End Point.
846 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
850 :type destination_ip: str
851 :returns: SW IF INDEX of created interface.
853 :raises RuntimeError: if it is unable to create VxLAN interface on the
856 cmd = u"vxlan_add_del_tunnel_v3"
859 instance=Constants.BITWISE_NON_ZERO,
860 src_address=IPAddress.create_ip_address_object(
861 ip_address(source_ip)
863 dst_address=IPAddress.create_ip_address_object(
864 ip_address(destination_ip)
866 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
868 decap_next_index=Constants.BITWISE_NON_ZERO,
871 err_msg = f"Failed to create VXLAN tunnel interface " \
872 f"on host {node[u'host']}"
873 with PapiSocketExecutor(node) as papi_exec:
874 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
876 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
877 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
878 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
879 Topology.update_interface_name(node, if_key, ifc_name)
884 def set_vxlan_bypass(node, interface=None):
885 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
887 By adding the IPv4 vxlan-bypass graph node to an interface, the node
888 checks for and validate input vxlan packet and bypass ip4-lookup,
889 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
890 This node will cause extra overhead to for non-vxlan packets which is
893 :param node: Node where to set VXLAN bypass.
894 :param interface: Numeric index or name string of a specific interface.
896 :type interface: int or str
897 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
899 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
901 cmd = u"sw_interface_set_vxlan_bypass"
904 sw_if_index=sw_if_index,
907 err_msg = f"Failed to set VXLAN bypass on interface " \
908 f"on host {node[u'host']}"
909 with PapiSocketExecutor(node) as papi_exec:
910 papi_exec.add(cmd, **args).get_replies(err_msg)
913 def vxlan_dump(node, interface=None):
914 """Get VxLAN data for the given interface.
916 :param node: VPP node to get interface data from.
917 :param interface: Numeric index or name string of a specific interface.
918 If None, information about all VxLAN interfaces is returned.
920 :type interface: int or str
921 :returns: Dictionary containing data for the given VxLAN interface or if
922 interface=None, the list of dictionaries with all VxLAN interfaces.
924 :raises TypeError: if the data type of interface is neither basestring
927 def process_vxlan_dump(vxlan_dump):
928 """Process vxlan dump.
930 :param vxlan_dump: Vxlan interface dump.
931 :type vxlan_dump: dict
932 :returns: Processed vxlan interface dump.
935 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
936 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
939 if interface is not None:
940 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
942 sw_if_index = int(Constants.BITWISE_NON_ZERO)
944 cmd = u"vxlan_tunnel_dump"
946 sw_if_index=sw_if_index
948 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
950 with PapiSocketExecutor(node) as papi_exec:
951 details = papi_exec.add(cmd, **args).get_details(err_msg)
953 data = list() if interface is None else dict()
955 if interface is None:
956 data.append(process_vxlan_dump(dump))
957 elif dump[u"sw_if_index"] == sw_if_index:
958 data = process_vxlan_dump(dump)
961 logger.debug(f"VXLAN data:\n{data}")
965 def create_subinterface(
966 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
968 """Create sub-interface on node. It is possible to set required
969 sub-interface type and VLAN tag(s).
971 :param node: Node to add sub-interface.
972 :param interface: Interface name on which create sub-interface.
973 :param sub_id: ID of the sub-interface to be created.
974 :param outer_vlan_id: Optional outer VLAN ID.
975 :param inner_vlan_id: Optional inner VLAN ID.
976 :param type_subif: Optional type of sub-interface. Values supported by
977 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
980 :type interface: str or int
982 :type outer_vlan_id: int
983 :type inner_vlan_id: int
984 :type type_subif: str
985 :returns: Name and index of created sub-interface.
987 :raises RuntimeError: If it is not possible to create sub-interface.
989 subif_types = type_subif.split()
992 if u"no_tags" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
994 if u"one_tag" in subif_types:
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
996 if u"two_tags" in subif_types:
997 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
998 if u"dot1ad" in subif_types:
999 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
1000 if u"exact_match" in subif_types:
1001 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
1002 if u"default_sub" in subif_types:
1003 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1004 if type_subif == u"default_sub":
1005 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1006 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1008 cmd = u"create_subif"
1010 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1012 sub_if_flags=flags.value if hasattr(flags, u"value")
1014 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1015 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1017 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1018 with PapiSocketExecutor(node) as papi_exec:
1019 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1021 if_key = Topology.add_new_port(node, u"subinterface")
1022 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1023 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1024 Topology.update_interface_name(node, if_key, ifc_name)
1026 return f"{interface}.{sub_id}", sw_if_index
1029 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1030 """Create GRE tunnel interface on node.
1032 :param node: VPP node to add tunnel interface.
1033 :param source_ip: Source of the GRE tunnel.
1034 :param destination_ip: Destination of the GRE tunnel.
1036 :type source_ip: str
1037 :type destination_ip: str
1038 :returns: Name and index of created GRE tunnel interface.
1040 :raises RuntimeError: If unable to create GRE tunnel interface.
1042 cmd = u"gre_tunnel_add_del"
1045 instance=Constants.BITWISE_NON_ZERO,
1047 dst=str(destination_ip),
1055 err_msg = f"Failed to create GRE tunnel interface " \
1056 f"on host {node[u'host']}"
1057 with PapiSocketExecutor(node) as papi_exec:
1058 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1060 if_key = Topology.add_new_port(node, u"gre_tunnel")
1061 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1062 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1063 Topology.update_interface_name(node, if_key, ifc_name)
1065 return ifc_name, sw_if_index
1068 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1069 """Create GTPU interface and return sw if index of created interface.
1071 :param node: Node where to create GTPU interface.
1072 :param teid: GTPU Tunnel Endpoint Identifier.
1073 :param source_ip: Source IP of a GTPU Tunnel End Point.
1074 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1077 :type source_ip: str
1078 :type destination_ip: str
1079 :returns: SW IF INDEX of created interface.
1081 :raises RuntimeError: if it is unable to create GTPU interface on the
1084 cmd = u"gtpu_add_del_tunnel"
1087 src_address=IPAddress.create_ip_address_object(
1088 ip_address(source_ip)
1090 dst_address=IPAddress.create_ip_address_object(
1091 ip_address(destination_ip)
1093 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1098 err_msg = f"Failed to create GTPU tunnel interface " \
1099 f"on host {node[u'host']}"
1100 with PapiSocketExecutor(node) as papi_exec:
1101 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1103 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1104 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1105 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1106 Topology.update_interface_name(node, if_key, ifc_name)
1111 def vpp_create_loopback(node, mac=None):
1112 """Create loopback interface on VPP node.
1114 :param node: Node to create loopback interface on.
1115 :param mac: Optional MAC address for loopback interface.
1118 :returns: SW interface index.
1120 :raises RuntimeError: If it is not possible to create loopback on the
1123 cmd = u"create_loopback_instance"
1125 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1129 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1130 with PapiSocketExecutor(node) as papi_exec:
1131 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1133 if_key = Topology.add_new_port(node, u"loopback")
1134 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1135 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1136 Topology.update_interface_name(node, if_key, ifc_name)
1138 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1139 Topology.update_interface_mac_address(node, if_key, mac)
1144 def vpp_create_bond_interface(
1145 node, mode, load_balance=None, mac=None, gso=False):
1146 """Create bond interface on VPP node.
1148 :param node: DUT node from topology.
1149 :param mode: Link bonding mode.
1150 :param load_balance: Load balance (optional, valid for xor and lacp
1151 modes, otherwise ignored). Default: None.
1152 :param mac: MAC address to assign to the bond interface (optional).
1154 :param gso: Enable GSO support (optional). Default: False.
1157 :type load_balance: str
1160 :returns: Interface key (name) in topology.
1162 :raises RuntimeError: If it is not possible to create bond interface on
1165 cmd = u"bond_create2"
1167 id=int(Constants.BITWISE_NON_ZERO),
1168 use_custom_mac=bool(mac is not None),
1169 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1172 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1174 lb=0 if load_balance is None else getattr(
1175 LinkBondLoadBalanceAlgo,
1176 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1181 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1182 with PapiSocketExecutor(node) as papi_exec:
1183 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1185 InterfaceUtil.add_eth_interface(
1186 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1188 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1193 def add_eth_interface(
1194 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1196 """Add ethernet interface to current topology.
1198 :param node: DUT node from topology.
1199 :param ifc_name: Name of the interface.
1200 :param sw_if_index: SW interface index.
1201 :param ifc_pfx: Interface key prefix.
1202 :param host_if_key: Host interface key from topology file.
1205 :type sw_if_index: int
1207 :type host_if_key: str
1209 if_key = Topology.add_new_port(node, ifc_pfx)
1211 if ifc_name and sw_if_index is None:
1212 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1214 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1215 if sw_if_index and ifc_name is None:
1216 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1217 Topology.update_interface_name(node, if_key, ifc_name)
1218 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1219 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1220 if host_if_key is not None:
1221 Topology.set_interface_numa_node(
1222 node, if_key, Topology.get_interface_numa_node(
1226 Topology.update_interface_pci_address(
1227 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1231 def vpp_create_avf_interface(
1232 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1233 """Create AVF interface on VPP node.
1235 :param node: DUT node from topology.
1236 :param if_key: Interface key from topology file of interface
1237 to be bound to i40evf driver.
1238 :param num_rx_queues: Number of RX queues.
1239 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1240 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1243 :type num_rx_queues: int
1246 :returns: AVF interface key (name) in topology.
1248 :raises RuntimeError: If it is not possible to create AVF interface on
1251 PapiSocketExecutor.run_cli_cmd(
1252 node, u"set logging class avf level debug"
1256 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1258 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1260 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1264 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1266 # FIXME: Remove once the fw/driver is upgraded.
1268 with PapiSocketExecutor(node) as papi_exec:
1270 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1274 except AssertionError:
1275 logger.error(err_msg)
1277 raise AssertionError(err_msg)
1279 InterfaceUtil.add_eth_interface(
1280 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1284 return Topology.get_interface_by_sw_index(node, sw_if_index)
1287 def vpp_create_af_xdp_interface(
1288 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1290 """Create AF_XDP interface on VPP node.
1292 :param node: DUT node from topology.
1293 :param if_key: Physical interface key from topology file of interface
1294 to be bound to compatible driver.
1295 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1296 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1297 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1298 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1301 :type num_rx_queues: int
1305 :returns: Interface key (name) in topology file.
1307 :raises RuntimeError: If it is not possible to create AF_XDP interface
1310 PapiSocketExecutor.run_cli_cmd(
1311 node, u"set logging class af_xdp level debug"
1314 cmd = u"af_xdp_create"
1315 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1317 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1318 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1319 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1322 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1324 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1325 with PapiSocketExecutor(node) as papi_exec:
1326 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1328 InterfaceUtil.vpp_set_interface_mac(
1329 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1331 InterfaceUtil.add_eth_interface(
1332 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1336 return Topology.get_interface_by_sw_index(node, sw_if_index)
1339 def vpp_create_rdma_interface(
1340 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1342 """Create RDMA interface on VPP node.
1344 :param node: DUT node from topology.
1345 :param if_key: Physical interface key from topology file of interface
1346 to be bound to rdma-core driver.
1347 :param num_rx_queues: Number of RX queues.
1348 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1349 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1350 :param mode: RDMA interface mode - auto/ibv/dv.
1353 :type num_rx_queues: int
1357 :returns: Interface key (name) in topology file.
1359 :raises RuntimeError: If it is not possible to create RDMA interface on
1362 PapiSocketExecutor.run_cli_cmd(
1363 node, u"set logging class rdma level debug"
1366 cmd = u"rdma_create_v3"
1367 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1369 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1370 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1371 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1374 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1375 # Note: Set True for non-jumbo packets.
1378 # TODO: Apply desired RSS flags.
1380 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1381 with PapiSocketExecutor(node) as papi_exec:
1382 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1384 InterfaceUtil.vpp_set_interface_mac(
1385 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1387 InterfaceUtil.add_eth_interface(
1388 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1392 return Topology.get_interface_by_sw_index(node, sw_if_index)
1395 def vpp_add_bond_member(node, interface, bond_if):
1396 """Add member interface to bond interface on VPP node.
1398 :param node: DUT node from topology.
1399 :param interface: Physical interface key from topology file.
1400 :param bond_if: Load balance
1402 :type interface: str
1404 :raises RuntimeError: If it is not possible to add member to bond
1405 interface on the node.
1407 cmd = u"bond_add_member"
1409 sw_if_index=Topology.get_interface_sw_index(node, interface),
1410 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1412 is_long_timeout=False
1414 err_msg = f"Failed to add member {interface} to bond interface " \
1415 f"{bond_if} on host {node[u'host']}"
1416 with PapiSocketExecutor(node) as papi_exec:
1417 papi_exec.add(cmd, **args).get_reply(err_msg)
1420 def vpp_show_bond_data_on_node(node, verbose=False):
1421 """Show (detailed) bond information on VPP node.
1423 :param node: DUT node from topology.
1424 :param verbose: If detailed information is required or not.
1428 cmd = u"sw_bond_interface_dump"
1429 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1431 data = f"Bond data on node {node[u'host']}:\n"
1432 with PapiSocketExecutor(node) as papi_exec:
1433 details = papi_exec.add(cmd).get_details(err_msg)
1435 for bond in details:
1436 data += f"{bond[u'interface_name']}\n"
1437 data += u" mode: {m}\n".format(
1438 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1440 data += u" load balance: {lb}\n".format(
1441 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1443 data += f" number of active members: {bond[u'active_members']}\n"
1445 member_data = InterfaceUtil.vpp_bond_member_dump(
1446 node, Topology.get_interface_by_sw_index(
1447 node, bond[u"sw_if_index"]
1450 for member in member_data:
1451 if not member[u"is_passive"]:
1452 data += f" {member[u'interface_name']}\n"
1453 data += f" number of members: {bond[u'members']}\n"
1455 for member in member_data:
1456 data += f" {member[u'interface_name']}\n"
1457 data += f" interface id: {bond[u'id']}\n"
1458 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1462 def vpp_bond_member_dump(node, interface):
1463 """Get bond interface slave(s) data on VPP node.
1465 :param node: DUT node from topology.
1466 :param interface: Physical interface key from topology file.
1468 :type interface: str
1469 :returns: Bond slave interface data.
1472 cmd = u"sw_member_interface_dump"
1474 sw_if_index=Topology.get_interface_sw_index(node, interface)
1476 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1478 with PapiSocketExecutor(node) as papi_exec:
1479 details = papi_exec.add(cmd, **args).get_details(err_msg)
1481 logger.debug(f"Member data:\n{details}")
1485 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1486 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1488 :param nodes: Nodes in the topology.
1489 :param verbose: If detailed information is required or not.
1493 for node_data in nodes.values():
1494 if node_data[u"type"] == NodeType.DUT:
1495 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1498 def vpp_enable_input_acl_interface(
1499 node, interface, ip_version, table_index):
1500 """Enable input acl on interface.
1502 :param node: VPP node to setup interface for input acl.
1503 :param interface: Interface to setup input acl.
1504 :param ip_version: Version of IP protocol.
1505 :param table_index: Classify table index.
1507 :type interface: str or int
1508 :type ip_version: str
1509 :type table_index: int
1511 cmd = u"input_acl_set_interface"
1513 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1514 ip4_table_index=table_index if ip_version == u"ip4"
1515 else Constants.BITWISE_NON_ZERO,
1516 ip6_table_index=table_index if ip_version == u"ip6"
1517 else Constants.BITWISE_NON_ZERO,
1518 l2_table_index=table_index if ip_version == u"l2"
1519 else Constants.BITWISE_NON_ZERO,
1521 err_msg = f"Failed to enable input acl on interface {interface}"
1522 with PapiSocketExecutor(node) as papi_exec:
1523 papi_exec.add(cmd, **args).get_reply(err_msg)
1526 def get_interface_classify_table(node, interface):
1527 """Get name of classify table for the given interface.
1529 TODO: Move to Classify.py.
1531 :param node: VPP node to get data from.
1532 :param interface: Name or sw_if_index of a specific interface.
1534 :type interface: str or int
1535 :returns: Classify table name.
1538 if isinstance(interface, str):
1539 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1541 sw_if_index = interface
1543 cmd = u"classify_table_by_interface"
1545 sw_if_index=sw_if_index
1547 err_msg = f"Failed to get classify table name by interface {interface}"
1548 with PapiSocketExecutor(node) as papi_exec:
1549 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1554 def get_sw_if_index(node, interface_name):
1555 """Get sw_if_index for the given interface from actual interface dump.
1557 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1559 :param node: VPP node to get interface data from.
1560 :param interface_name: Name of the specific interface.
1562 :type interface_name: str
1563 :returns: sw_if_index of the given interface.
1566 interface_data = InterfaceUtil.vpp_get_interface_data(
1567 node, interface=interface_name
1569 return interface_data.get(u"sw_if_index")
1572 def vxlan_gpe_dump(node, interface_name=None):
1573 """Get VxLAN GPE data for the given interface.
1575 :param node: VPP node to get interface data from.
1576 :param interface_name: Name of the specific interface. If None,
1577 information about all VxLAN GPE interfaces is returned.
1579 :type interface_name: str
1580 :returns: Dictionary containing data for the given VxLAN GPE interface
1581 or if interface=None, the list of dictionaries with all VxLAN GPE
1583 :rtype: dict or list
1585 def process_vxlan_gpe_dump(vxlan_dump):
1586 """Process vxlan_gpe dump.
1588 :param vxlan_dump: Vxlan_gpe nterface dump.
1589 :type vxlan_dump: dict
1590 :returns: Processed vxlan_gpe interface dump.
1593 if vxlan_dump[u"is_ipv6"]:
1594 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1595 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1597 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1598 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1601 if interface_name is not None:
1602 sw_if_index = InterfaceUtil.get_interface_index(
1603 node, interface_name
1606 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1608 cmd = u"vxlan_gpe_tunnel_dump"
1610 sw_if_index=sw_if_index
1612 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1613 with PapiSocketExecutor(node) as papi_exec:
1614 details = papi_exec.add(cmd, **args).get_details(err_msg)
1616 data = list() if interface_name is None else dict()
1617 for dump in details:
1618 if interface_name is None:
1619 data.append(process_vxlan_gpe_dump(dump))
1620 elif dump[u"sw_if_index"] == sw_if_index:
1621 data = process_vxlan_gpe_dump(dump)
1624 logger.debug(f"VXLAN-GPE data:\n{data}")
1628 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1629 """Assign VPP interface to specific VRF/FIB table.
1631 :param node: VPP node where the FIB and interface are located.
1632 :param interface: Interface to be assigned to FIB.
1633 :param table_id: VRF table ID.
1634 :param ipv6: Assign to IPv6 table. Default False.
1636 :type interface: str or int
1640 cmd = u"sw_interface_set_table"
1642 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1644 vrf_id=int(table_id)
1646 err_msg = f"Failed to assign interface {interface} to FIB table"
1647 with PapiSocketExecutor(node) as papi_exec:
1648 papi_exec.add(cmd, **args).get_reply(err_msg)
1651 def set_linux_interface_mac(
1652 node, interface, mac, namespace=None, vf_id=None):
1653 """Set MAC address for interface in linux.
1655 :param node: Node where to execute command.
1656 :param interface: Interface in namespace.
1657 :param mac: MAC to be assigned to interface.
1658 :param namespace: Execute command in namespace. Optional
1659 :param vf_id: Virtual Function id. Optional
1661 :type interface: str
1663 :type namespace: str
1666 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1667 else f"address {mac}"
1668 ns_str = f"ip netns exec {namespace}" if namespace else u""
1670 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1671 exec_cmd_no_error(node, cmd, sudo=True)
1674 def set_linux_interface_promisc(
1675 node, interface, namespace=None, vf_id=None, state=u"on"):
1676 """Set promisc state for interface in linux.
1678 :param node: Node where to execute command.
1679 :param interface: Interface in namespace.
1680 :param namespace: Exec command in namespace. (Optional, Default: None)
1681 :param vf_id: Virtual Function id. (Optional, Default: None)
1682 :param state: State of feature. (Optional, Default: on)
1684 :type interface: str
1685 :type namespace: str
1689 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1690 else f"promisc {state}"
1691 ns_str = f"ip netns exec {namespace}" if namespace else u""
1693 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1694 exec_cmd_no_error(node, cmd, sudo=True)
1697 def set_linux_interface_trust_on(
1698 node, interface, namespace=None, vf_id=None):
1699 """Set trust on (promisc) for interface in linux.
1701 :param node: Node where to execute command.
1702 :param interface: Interface in namespace.
1703 :param namespace: Execute command in namespace. Optional
1704 :param vf_id: Virtual Function id. Optional
1706 :type interface: str
1707 :type namespace: str
1710 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1711 ns_str = f"ip netns exec {namespace}" if namespace else u""
1713 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1714 exec_cmd_no_error(node, cmd, sudo=True)
1717 def set_linux_interface_spoof_off(
1718 node, interface, namespace=None, vf_id=None):
1719 """Set spoof off for interface in linux.
1721 :param node: Node where to execute command.
1722 :param interface: Interface in namespace.
1723 :param namespace: Execute command in namespace. Optional
1724 :param vf_id: Virtual Function id. Optional
1726 :type interface: str
1727 :type namespace: str
1730 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1732 ns_str = f"ip netns exec {namespace}" if namespace else u""
1734 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1735 exec_cmd_no_error(node, cmd, sudo=True)
1738 def set_linux_interface_state(
1739 node, interface, namespace=None, state=u"up"):
1740 """Set operational state for interface in linux.
1742 :param node: Node where to execute command.
1743 :param interface: Interface in namespace.
1744 :param namespace: Execute command in namespace. Optional
1745 :param state: Up/Down.
1747 :type interface: str
1748 :type namespace: str
1751 ns_str = f"ip netns exec {namespace}" if namespace else u""
1753 cmd = f"{ns_str} ip link set dev {interface} {state}"
1754 exec_cmd_no_error(node, cmd, sudo=True)
1757 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1758 """Init PCI device. Check driver compatibility and bind to proper
1759 drivers. Optionally create NIC VFs.
1761 :param node: DUT node.
1762 :param ifc_key: Interface key from topology file.
1763 :param driver: Base driver to use.
1764 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1765 :param osi_layer: OSI Layer type to initialize TG with.
1766 Default value "L2" sets linux interface spoof off.
1771 :type osi_layer: str
1772 :returns: Virtual Function topology interface keys.
1774 :raises RuntimeError: If a reason preventing initialization is found.
1776 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1778 if driver == u"avf":
1779 if kernel_driver not in (
1780 u"ice", u"iavf", u"i40e", u"i40evf"):
1782 f"AVF needs ice or i40e compatible driver, not "
1783 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1785 vf_keys = InterfaceUtil.init_generic_interface(
1786 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1788 elif driver == u"af_xdp":
1789 if kernel_driver not in (
1790 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1793 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1794 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1796 vf_keys = InterfaceUtil.init_generic_interface(
1797 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1799 elif driver == u"rdma-core":
1800 vf_keys = InterfaceUtil.init_generic_interface(
1801 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1806 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1807 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1809 :param node: DUT node.
1810 :param ifc_key: Interface key from topology file.
1811 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1812 :param osi_layer: OSI Layer type to initialize TG with.
1813 Default value "L2" sets linux interface spoof off.
1817 :type osi_layer: str
1818 :returns: Virtual Function topology interface keys.
1820 :raises RuntimeError: If a reason preventing initialization is found.
1822 # Read PCI address and driver.
1823 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1824 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1825 uio_driver = Topology.get_uio_driver(node)
1826 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1827 current_driver = DUTSetup.get_pci_dev_driver(
1828 node, pf_pci_addr.replace(u":", r"\:"))
1829 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1831 VPPUtil.stop_vpp_service(node)
1832 if current_driver != kernel_driver:
1833 # PCI device must be re-bound to kernel driver before creating VFs.
1834 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1835 # Stop VPP to prevent deadlock.
1836 # Unbind from current driver if bound.
1838 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1839 # Bind to kernel driver.
1840 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1842 # Initialize PCI VFs.
1843 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1846 if osi_layer == u"L2":
1847 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1850 # Set MAC address and bind each virtual function to uio driver.
1851 for vf_id in range(numvfs):
1852 vf_mac_addr = u":".join(
1853 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1854 pf_mac_addr[5], f"{vf_id:02x}"
1858 InterfaceUtil.set_linux_interface_trust_on(
1859 node, pf_dev, vf_id=vf_id
1861 if osi_layer == u"L2":
1862 InterfaceUtil.set_linux_interface_spoof_off(
1863 node, pf_dev, vf_id=vf_id
1865 InterfaceUtil.set_linux_interface_mac(
1866 node, pf_dev, vf_mac_addr, vf_id=vf_id
1868 InterfaceUtil.set_linux_interface_state(
1869 node, pf_dev, state=u"up"
1872 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1873 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1875 # Add newly created ports into topology file
1876 vf_ifc_name = f"{ifc_key}_vif"
1877 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1878 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1879 Topology.update_interface_name(
1880 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1882 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1883 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1884 Topology.set_interface_numa_node(
1885 node, vf_ifc_key, Topology.get_interface_numa_node(
1889 vf_ifc_keys.append(vf_ifc_key)
1894 def vpp_sw_interface_rx_placement_dump(node):
1895 """Dump VPP interface RX placement on node.
1897 :param node: Node to run command on.
1899 :returns: Thread mapping information as a list of dictionaries.
1902 cmd = u"sw_interface_rx_placement_dump"
1903 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1904 with PapiSocketExecutor(node) as papi_exec:
1905 for ifc in node[u"interfaces"].values():
1906 if ifc[u"vpp_sw_index"] is not None:
1907 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1908 details = papi_exec.get_details(err_msg)
1909 return sorted(details, key=lambda k: k[u"sw_if_index"])
1912 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1913 """Dump VPP interface RX placement on all given nodes.
1915 :param nodes: Nodes to run command on.
1917 :returns: Thread mapping information as a list of dictionaries.
1920 for node in nodes.values():
1921 if node[u"type"] == NodeType.DUT:
1922 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1925 def vpp_sw_interface_set_rx_placement(
1926 node, sw_if_index, queue_id, worker_id):
1927 """Set interface RX placement to worker on node.
1929 :param node: Node to run command on.
1930 :param sw_if_index: VPP SW interface index.
1931 :param queue_id: VPP interface queue ID.
1932 :param worker_id: VPP worker ID (indexing from 0).
1934 :type sw_if_index: int
1936 :type worker_id: int
1937 :raises RuntimeError: If failed to run command on host or if no API
1940 cmd = u"sw_interface_set_rx_placement"
1941 err_msg = f"Failed to set interface RX placement to worker " \
1942 f"on host {node[u'host']}!"
1944 sw_if_index=sw_if_index,
1946 worker_id=worker_id,
1949 with PapiSocketExecutor(node) as papi_exec:
1950 papi_exec.add(cmd, **args).get_reply(err_msg)
1953 def vpp_round_robin_rx_placement(
1954 node, prefix, workers=None):
1955 """Set Round Robin interface RX placement on all worker threads
1958 If specified, workers limits the number of physical cores used
1959 for data plane I/O work. Other cores are presumed to do something else,
1960 e.g. asynchronous crypto processing.
1961 None means all workers are used for data plane work.
1963 :param node: Topology nodes.
1964 :param prefix: Interface name prefix.
1965 :param workers: Comma separated worker index numbers intended for
1971 thread_data = VPPUtil.vpp_show_threads(node)
1972 worker_cnt = len(thread_data) - 1
1977 for item in thread_data:
1978 if str(item.cpu_id) in workers.split(u","):
1979 worker_ids.append(item.id)
1981 for item in thread_data:
1982 if u"vpp_main" not in item.name:
1983 worker_ids.append(item.id)
1986 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1987 for interface in node[u"interfaces"].values():
1988 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1989 and prefix in interface[u"name"]:
1990 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1991 node, placement[u"sw_if_index"], placement[u"queue_id"],
1992 worker_ids[worker_idx % len(worker_ids)] - 1
1997 def vpp_round_robin_rx_placement_on_all_duts(
1998 nodes, prefix, workers=None):
1999 """Set Round Robin interface RX placement on worker threads
2002 If specified, workers limits the number of physical cores used
2003 for data plane I/O work. Other cores are presumed to do something else,
2004 e.g. asynchronous crypto processing.
2005 None means all cores are used for data plane work.
2007 :param nodes: Topology nodes.
2008 :param prefix: Interface name prefix.
2009 :param workers: Comma separated worker index numbers intended for
2015 for node in nodes.values():
2016 if node[u"type"] == NodeType.DUT:
2017 InterfaceUtil.vpp_round_robin_rx_placement(
2018 node, prefix, workers