1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
215 if sw_if_index is None:
217 f"Interface index for {interface} not assigned by VPP."
220 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
221 elif state == u"down":
224 raise ValueError(f"Unexpected interface state: {state}")
225 cmd = u"sw_interface_set_flags"
226 err_msg = f"Failed to set interface state on host {node[u'host']}"
228 sw_if_index=int(sw_if_index),
231 with PapiSocketExecutor(node) as papi_exec:
232 papi_exec.add(cmd, **args).get_reply(err_msg)
233 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
234 cmd = f"ip link set {iface_name} {state}"
235 exec_cmd_no_error(node, cmd, sudo=True)
238 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
242 def set_interface_state_pci(
243 node, pf_pcis, namespace=None, state=u"up"):
244 """Set operational state for interface specified by PCI address.
246 :param node: Topology node.
247 :param pf_pcis: List of node's interfaces PCI addresses.
248 :param namespace: Exec command in namespace. (Optional, Default: none)
249 :param state: Up/Down. (Optional, default: up)
255 for pf_pci in pf_pcis:
256 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
257 InterfaceUtil.set_linux_interface_state(
258 node, pf_eth, namespace=namespace, state=state
262 def set_interface_mtu(node, pf_pcis, mtu=9200):
263 """Set Ethernet MTU for specified interfaces.
265 :param node: Topology node.
266 :param pf_pcis: List of node's interfaces PCI addresses.
267 :param mtu: MTU to set. Default: 9200.
271 :raises RuntimeError: If failed to set MTU on interface.
273 for pf_pci in pf_pcis:
274 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
275 cmd = f"ip link set {pf_eth} mtu {mtu}"
276 exec_cmd_no_error(node, cmd, sudo=True)
279 def set_interface_channels(
280 node, pf_pcis, num_queues=1, channel=u"combined"):
281 """Set interface channels for specified interfaces.
283 :param node: Topology node.
284 :param pf_pcis: List of node's interfaces PCI addresses.
285 :param num_queues: Number of channels. (Optional, Default: 1)
286 :param channel: Channel type. (Optional, Default: combined)
289 :type num_queues: int
292 for pf_pci in pf_pcis:
293 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
294 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
295 exec_cmd_no_error(node, cmd, sudo=True)
298 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
299 """Set Ethernet flow control for specified interfaces.
301 :param node: Topology node.
302 :param pf_pcis: List of node's interfaces PCI addresses.
303 :param rxf: RX flow. (Optional, Default: off).
304 :param txf: TX flow. (Optional, Default: off).
310 for pf_pci in pf_pcis:
311 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
312 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
313 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
314 if int(ret_code) not in (0, 78):
315 raise RuntimeError("Failed to set flow control on {pf_eth}!")
318 def set_pci_parameter(node, pf_pcis, key, value):
319 """Set PCI parameter for specified interfaces.
321 :param node: Topology node.
322 :param pf_pcis: List of node's interfaces PCI addresses.
323 :param key: Key to set.
324 :param value: Value to set.
330 for pf_pci in pf_pcis:
331 cmd = f"setpci -s {pf_pci} {key}={value}"
332 exec_cmd_no_error(node, cmd, sudo=True)
335 def vpp_set_interface_mtu(node, interface, mtu=9200):
336 """Set Ethernet MTU on interface.
338 :param node: VPP node.
339 :param interface: Interface to setup MTU. Default: 9200.
340 :param mtu: Ethernet MTU size in Bytes.
342 :type interface: str or int
345 if isinstance(interface, str):
346 sw_if_index = Topology.get_interface_sw_index(node, interface)
348 sw_if_index = interface
350 cmd = u"hw_interface_set_mtu"
351 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
353 sw_if_index=sw_if_index,
357 with PapiSocketExecutor(node) as papi_exec:
358 papi_exec.add(cmd, **args).get_reply(err_msg)
359 except AssertionError as err:
360 logger.debug(f"Setting MTU failed.\n{err}")
363 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
364 """Set Ethernet MTU on all interfaces.
366 :param node: VPP node.
367 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
371 for interface in node[u"interfaces"]:
372 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
375 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
376 """Set Ethernet MTU on all interfaces on all DUTs.
378 :param nodes: VPP nodes.
379 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
383 for node in nodes.values():
384 if node[u"type"] == NodeType.DUT:
385 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
388 def vpp_node_interfaces_ready_wait(node, retries=15):
389 """Wait until all interfaces with admin-up are in link-up state.
391 :param node: Node to wait on.
392 :param retries: Number of retries to check interface status (optional,
397 :raises RuntimeError: If any interface is not in link-up state after
398 defined number of retries.
400 for _ in range(0, retries):
402 out = InterfaceUtil.vpp_get_interface_data(node)
403 for interface in out:
404 if interface.get(u"flags") == 1:
405 not_ready.append(interface.get(u"interface_name"))
408 f"Interfaces still not in link-up state:\n{not_ready}"
414 err = f"Timeout, interfaces not up:\n{not_ready}" \
415 if u"not_ready" in locals() else u"No check executed!"
416 raise RuntimeError(err)
419 def all_vpp_interfaces_ready_wait(nodes, retries=15):
420 """Wait until all interfaces with admin-up are in link-up state for all
421 nodes in the topology.
423 :param nodes: Nodes in the topology.
424 :param retries: Number of retries to check interface status (optional,
430 for node in nodes.values():
431 if node[u"type"] == NodeType.DUT:
432 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
435 def vpp_get_interface_data(node, interface=None):
436 """Get all interface data from a VPP node. If a name or
437 sw_interface_index is provided, return only data for the matching
440 :param node: VPP node to get interface data from.
441 :param interface: Numeric index or name string of a specific interface.
443 :type interface: int or str
444 :returns: List of dictionaries containing data for each interface, or a
445 single dictionary for the specified interface.
447 :raises TypeError: if the data type of interface is neither basestring
450 def process_if_dump(if_dump):
451 """Process interface dump.
453 :param if_dump: Interface dump.
455 :returns: Processed interface dump.
458 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
459 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
460 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
461 if_dump[u"flags"] = if_dump[u"flags"].value
462 if_dump[u"type"] = if_dump[u"type"].value
463 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
464 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
465 if hasattr(if_dump[u"sub_if_flags"], u"value") \
466 else int(if_dump[u"sub_if_flags"])
470 if interface is not None:
471 if isinstance(interface, str):
472 param = u"interface_name"
473 elif isinstance(interface, int):
474 param = u"sw_if_index"
476 raise TypeError(f"Wrong interface format {interface}")
480 cmd = u"sw_interface_dump"
482 name_filter_valid=False,
485 err_msg = f"Failed to get interface dump on host {node[u'host']}"
487 with PapiSocketExecutor(node) as papi_exec:
488 details = papi_exec.add(cmd, **args).get_details(err_msg)
489 logger.debug(f"Received data:\n{details!r}")
491 data = list() if interface is None else dict()
493 if interface is None:
494 data.append(process_if_dump(dump))
495 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
496 data = process_if_dump(dump)
499 logger.debug(f"Interface data:\n{data}")
503 def vpp_get_interface_name(node, sw_if_index):
504 """Get interface name for the given SW interface index from actual
507 :param node: VPP node to get interface data from.
508 :param sw_if_index: SW interface index of the specific interface.
510 :type sw_if_index: int
511 :returns: Name of the given interface.
514 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
515 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
516 if_data = InterfaceUtil.vpp_get_interface_data(
517 node, if_data[u"sup_sw_if_index"]
520 return if_data.get(u"interface_name")
523 def vpp_get_interface_sw_index(node, interface_name):
524 """Get interface name for the given SW interface index from actual
527 :param node: VPP node to get interface data from.
528 :param interface_name: Interface name.
530 :type interface_name: str
531 :returns: Name of the given interface.
534 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
536 return if_data.get(u"sw_if_index")
539 def vpp_get_interface_mac(node, interface):
540 """Get MAC address for the given interface from actual interface dump.
542 :param node: VPP node to get interface data from.
543 :param interface: Numeric index or name string of a specific interface.
545 :type interface: int or str
546 :returns: MAC address.
549 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
550 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
551 if_data = InterfaceUtil.vpp_get_interface_data(
552 node, if_data[u"sup_sw_if_index"])
554 return if_data.get(u"l2_address")
557 def vpp_set_interface_mac(node, interface, mac):
558 """Set MAC address for the given interface.
560 :param node: VPP node to set interface MAC.
561 :param interface: Numeric index or name string of a specific interface.
562 :param mac: Required MAC address.
564 :type interface: int or str
567 cmd = u"sw_interface_set_mac_address"
569 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
570 mac_address=L2Util.mac_to_bin(mac)
572 err_msg = f"Failed to set MAC address of interface {interface}" \
573 f"on host {node[u'host']}"
574 with PapiSocketExecutor(node) as papi_exec:
575 papi_exec.add(cmd, **args).get_reply(err_msg)
578 def tg_set_interface_driver(node, pci_addr, driver):
579 """Set interface driver on the TG node.
581 :param node: Node to set interface driver on (must be TG node).
582 :param pci_addr: PCI address of the interface.
583 :param driver: Driver name.
587 :raises RuntimeError: If unbinding from the current driver fails.
588 :raises RuntimeError: If binding to the new driver fails.
590 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
591 if old_driver == driver:
597 # Unbind from current driver
598 if old_driver is not None:
599 cmd = f"sh -c \"echo {pci_addr} > " \
600 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
601 ret_code, _, _ = ssh.exec_command_sudo(cmd)
602 if int(ret_code) != 0:
603 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
605 # Bind to the new driver
606 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
607 ret_code, _, _ = ssh.exec_command_sudo(cmd)
608 if int(ret_code) != 0:
609 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
612 def tg_get_interface_driver(node, pci_addr):
613 """Get interface driver from the TG node.
615 :param node: Node to get interface driver on (must be TG node).
616 :param pci_addr: PCI address of the interface.
619 :returns: Interface driver or None if not found.
621 :raises RuntimeError: If PCI rescan or lspci command execution failed.
623 return DUTSetup.get_pci_dev_driver(node, pci_addr)
626 def tg_set_interfaces_default_driver(node):
627 """Set interfaces default driver specified in topology yaml file.
629 :param node: Node to setup interfaces driver on (must be TG node).
632 for interface in node[u"interfaces"].values():
633 InterfaceUtil.tg_set_interface_driver(
634 node, interface[u"pci_address"], interface[u"driver"]
638 def update_vpp_interface_data_on_node(node):
639 """Update vpp generated interface data for a given node in DICT__nodes.
641 Updates interface names, software if index numbers and any other details
642 generated specifically by vpp that are unknown before testcase run.
643 It does this by dumping interface list from all devices using python
644 api, and pairing known information from topology (mac address) to state
647 :param node: Node selected from DICT__nodes.
650 interface_list = InterfaceUtil.vpp_get_interface_data(node)
651 interface_dict = dict()
652 for ifc in interface_list:
653 interface_dict[ifc[u"l2_address"]] = ifc
655 for if_name, if_data in node[u"interfaces"].items():
656 ifc_dict = interface_dict.get(if_data[u"mac_address"])
657 if ifc_dict is not None:
658 if_data[u"name"] = ifc_dict[u"interface_name"]
659 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
660 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
662 f"Interface {if_name} found by MAC "
663 f"{if_data[u'mac_address']}"
667 f"Interface {if_name} not found by MAC "
668 f"{if_data[u'mac_address']}"
670 if_data[u"vpp_sw_index"] = None
673 def update_nic_interface_names(node):
674 """Update interface names based on nic type and PCI address.
676 This method updates interface names in the same format as VPP does.
678 :param node: Node dictionary.
681 for ifc in node[u"interfaces"].values():
682 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
683 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
684 f"{int(if_pci[3], 16):x}"
685 if ifc[u"model"] == u"Intel-XL710":
686 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
687 elif ifc[u"model"] == u"Intel-X710":
688 ifc[u"name"] = f"TenGigabitEthernet{loc}"
689 elif ifc[u"model"] == u"Intel-X520-DA2":
690 ifc[u"name"] = f"TenGigabitEthernet{loc}"
691 elif ifc[u"model"] == u"Cisco-VIC-1385":
692 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
693 elif ifc[u"model"] == u"Cisco-VIC-1227":
694 ifc[u"name"] = f"TenGigabitEthernet{loc}"
696 ifc[u"name"] = f"UnknownEthernet{loc}"
699 def update_nic_interface_names_on_all_duts(nodes):
700 """Update interface names based on nic type and PCI address on all DUTs.
702 This method updates interface names in the same format as VPP does.
704 :param nodes: Topology nodes.
707 for node in nodes.values():
708 if node[u"type"] == NodeType.DUT:
709 InterfaceUtil.update_nic_interface_names(node)
712 def update_tg_interface_data_on_node(node):
713 """Update interface name for TG/linux node in DICT__nodes.
716 # for dev in `ls /sys/class/net/`;
717 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
718 "52:54:00:9f:82:63": "eth0"
719 "52:54:00:77:ae:a9": "eth1"
720 "52:54:00:e1:8a:0f": "eth2"
721 "00:00:00:00:00:00": "lo"
723 :param node: Node selected from DICT__nodes.
725 :raises RuntimeError: If getting of interface name and MAC fails.
727 # First setup interface driver specified in yaml file
728 InterfaceUtil.tg_set_interfaces_default_driver(node)
730 # Get interface names
734 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
735 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
737 ret_code, stdout, _ = ssh.exec_command(cmd)
738 if int(ret_code) != 0:
739 raise RuntimeError(u"Get interface name and MAC failed")
740 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
742 interfaces = JsonParser().parse_data(tmp)
743 for interface in node[u"interfaces"].values():
744 name = interfaces.get(interface[u"mac_address"])
747 interface[u"name"] = name
750 def iface_update_numa_node(node):
751 """For all interfaces from topology file update numa node based on
752 information from the node.
754 :param node: Node from topology.
757 :raises ValueError: If numa node ia less than 0.
758 :raises RuntimeError: If update of numa node failed.
761 for if_key in Topology.get_node_interfaces(node):
762 if_pci = Topology.get_interface_pci_addr(node, if_key)
764 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
766 ret, out, _ = ssh.exec_command(cmd)
769 numa_node = 0 if int(out) < 0 else int(out)
772 f"Reading numa location failed for: {if_pci}"
775 Topology.set_interface_numa_node(
776 node, if_key, numa_node
780 raise RuntimeError(f"Update numa node failed for: {if_pci}")
783 def update_all_interface_data_on_all_nodes(
784 nodes, skip_tg=False, skip_vpp=False):
785 """Update interface names on all nodes in DICT__nodes.
787 This method updates the topology dictionary by querying interface lists
788 of all nodes mentioned in the topology dictionary.
790 :param nodes: Nodes in the topology.
791 :param skip_tg: Skip TG node.
792 :param skip_vpp: Skip VPP node.
797 for node in nodes.values():
798 if node[u"type"] == NodeType.DUT and not skip_vpp:
799 InterfaceUtil.update_vpp_interface_data_on_node(node)
800 elif node[u"type"] == NodeType.TG and not skip_tg:
801 InterfaceUtil.update_tg_interface_data_on_node(node)
802 InterfaceUtil.iface_update_numa_node(node)
805 def create_vlan_subinterface(node, interface, vlan):
806 """Create VLAN sub-interface on node.
808 :param node: Node to add VLAN subinterface on.
809 :param interface: Interface name or index on which create VLAN
811 :param vlan: VLAN ID of the subinterface to be created.
813 :type interface: str on int
815 :returns: Name and index of created subinterface.
817 :raises RuntimeError: if it is unable to create VLAN subinterface on the
818 node or interface cannot be converted.
820 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
822 cmd = u"create_vlan_subif"
824 sw_if_index=sw_if_index,
827 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
829 with PapiSocketExecutor(node) as papi_exec:
830 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
832 if_key = Topology.add_new_port(node, u"vlan_subif")
833 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
834 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
835 Topology.update_interface_name(node, if_key, ifc_name)
837 return f"{interface}.{vlan}", sw_if_index
840 def create_vxlan_interface(node, vni, source_ip, destination_ip):
841 """Create VXLAN interface and return sw if index of created interface.
843 :param node: Node where to create VXLAN interface.
844 :param vni: VXLAN Network Identifier.
845 :param source_ip: Source IP of a VXLAN Tunnel End Point.
846 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
850 :type destination_ip: str
851 :returns: SW IF INDEX of created interface.
853 :raises RuntimeError: if it is unable to create VxLAN interface on the
856 cmd = u"vxlan_add_del_tunnel_v3"
859 instance=Constants.BITWISE_NON_ZERO,
860 src_address=IPAddress.create_ip_address_object(
861 ip_address(source_ip)
863 dst_address=IPAddress.create_ip_address_object(
864 ip_address(destination_ip)
866 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
868 decap_next_index=Constants.BITWISE_NON_ZERO,
871 err_msg = f"Failed to create VXLAN tunnel interface " \
872 f"on host {node[u'host']}"
873 with PapiSocketExecutor(node) as papi_exec:
874 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
876 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
877 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
878 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
879 Topology.update_interface_name(node, if_key, ifc_name)
884 def set_vxlan_bypass(node, interface=None):
885 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
887 By adding the IPv4 vxlan-bypass graph node to an interface, the node
888 checks for and validate input vxlan packet and bypass ip4-lookup,
889 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
890 This node will cause extra overhead to for non-vxlan packets which is
893 :param node: Node where to set VXLAN bypass.
894 :param interface: Numeric index or name string of a specific interface.
896 :type interface: int or str
897 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
899 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
901 cmd = u"sw_interface_set_vxlan_bypass"
904 sw_if_index=sw_if_index,
907 err_msg = f"Failed to set VXLAN bypass on interface " \
908 f"on host {node[u'host']}"
909 with PapiSocketExecutor(node) as papi_exec:
910 papi_exec.add(cmd, **args).get_replies(err_msg)
913 def vxlan_dump(node, interface=None):
914 """Get VxLAN data for the given interface.
916 :param node: VPP node to get interface data from.
917 :param interface: Numeric index or name string of a specific interface.
918 If None, information about all VxLAN interfaces is returned.
920 :type interface: int or str
921 :returns: Dictionary containing data for the given VxLAN interface or if
922 interface=None, the list of dictionaries with all VxLAN interfaces.
924 :raises TypeError: if the data type of interface is neither basestring
927 def process_vxlan_dump(vxlan_dump):
928 """Process vxlan dump.
930 :param vxlan_dump: Vxlan interface dump.
931 :type vxlan_dump: dict
932 :returns: Processed vxlan interface dump.
935 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
936 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
939 if interface is not None:
940 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
942 sw_if_index = int(Constants.BITWISE_NON_ZERO)
944 cmd = u"vxlan_tunnel_dump"
946 sw_if_index=sw_if_index
948 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
950 with PapiSocketExecutor(node) as papi_exec:
951 details = papi_exec.add(cmd, **args).get_details(err_msg)
953 data = list() if interface is None else dict()
955 if interface is None:
956 data.append(process_vxlan_dump(dump))
957 elif dump[u"sw_if_index"] == sw_if_index:
958 data = process_vxlan_dump(dump)
961 logger.debug(f"VXLAN data:\n{data}")
965 def create_subinterface(
966 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
968 """Create sub-interface on node. It is possible to set required
969 sub-interface type and VLAN tag(s).
971 :param node: Node to add sub-interface.
972 :param interface: Interface name on which create sub-interface.
973 :param sub_id: ID of the sub-interface to be created.
974 :param outer_vlan_id: Optional outer VLAN ID.
975 :param inner_vlan_id: Optional inner VLAN ID.
976 :param type_subif: Optional type of sub-interface. Values supported by
977 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
980 :type interface: str or int
982 :type outer_vlan_id: int
983 :type inner_vlan_id: int
984 :type type_subif: str
985 :returns: Name and index of created sub-interface.
987 :raises RuntimeError: If it is not possible to create sub-interface.
989 subif_types = type_subif.split()
992 if u"no_tags" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
994 if u"one_tag" in subif_types:
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
996 if u"two_tags" in subif_types:
997 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
998 if u"dot1ad" in subif_types:
999 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
1000 if u"exact_match" in subif_types:
1001 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
1002 if u"default_sub" in subif_types:
1003 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1004 if type_subif == u"default_sub":
1005 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1006 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1008 cmd = u"create_subif"
1010 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1012 sub_if_flags=flags.value if hasattr(flags, u"value")
1014 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1015 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1017 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1018 with PapiSocketExecutor(node) as papi_exec:
1019 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1021 if_key = Topology.add_new_port(node, u"subinterface")
1022 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1023 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1024 Topology.update_interface_name(node, if_key, ifc_name)
1026 return f"{interface}.{sub_id}", sw_if_index
1029 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1030 """Create GRE tunnel interface on node.
1032 :param node: VPP node to add tunnel interface.
1033 :param source_ip: Source of the GRE tunnel.
1034 :param destination_ip: Destination of the GRE tunnel.
1036 :type source_ip: str
1037 :type destination_ip: str
1038 :returns: Name and index of created GRE tunnel interface.
1040 :raises RuntimeError: If unable to create GRE tunnel interface.
1042 cmd = u"gre_tunnel_add_del"
1045 instance=Constants.BITWISE_NON_ZERO,
1047 dst=str(destination_ip),
1055 err_msg = f"Failed to create GRE tunnel interface " \
1056 f"on host {node[u'host']}"
1057 with PapiSocketExecutor(node) as papi_exec:
1058 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1060 if_key = Topology.add_new_port(node, u"gre_tunnel")
1061 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1062 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1063 Topology.update_interface_name(node, if_key, ifc_name)
1065 return ifc_name, sw_if_index
1068 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1069 """Create GTPU interface and return sw if index of created interface.
1071 :param node: Node where to create GTPU interface.
1072 :param teid: GTPU Tunnel Endpoint Identifier.
1073 :param source_ip: Source IP of a GTPU Tunnel End Point.
1074 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1077 :type source_ip: str
1078 :type destination_ip: str
1079 :returns: SW IF INDEX of created interface.
1081 :raises RuntimeError: if it is unable to create GTPU interface on the
1084 cmd = u"gtpu_add_del_tunnel"
1087 src_address=IPAddress.create_ip_address_object(
1088 ip_address(source_ip)
1090 dst_address=IPAddress.create_ip_address_object(
1091 ip_address(destination_ip)
1093 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1098 err_msg = f"Failed to create GTPU tunnel interface " \
1099 f"on host {node[u'host']}"
1100 with PapiSocketExecutor(node) as papi_exec:
1101 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1103 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1104 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1105 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1106 Topology.update_interface_name(node, if_key, ifc_name)
1111 def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
1112 """Enable GTPU offload RX onto interface.
1114 :param node: Node to run command on.
1115 :param interface: Name of the specific interface.
1116 :param gtpu_if_index: Index of GTPU tunnel interface.
1119 :type interface: str
1120 :type gtpu_interface: int
1122 sw_if_index = Topology.get_interface_sw_index(node, interface)
1124 cmd = u"gtpu_offload_rx"
1126 hw_if_index=sw_if_index,
1127 sw_if_index=gtpu_if_index,
1131 err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
1132 with PapiSocketExecutor(node) as papi_exec:
1133 papi_exec.add(cmd, **args).get_reply(err_msg)
1136 def vpp_create_loopback(node, mac=None):
1137 """Create loopback interface on VPP node.
1139 :param node: Node to create loopback interface on.
1140 :param mac: Optional MAC address for loopback interface.
1143 :returns: SW interface index.
1145 :raises RuntimeError: If it is not possible to create loopback on the
1148 cmd = u"create_loopback_instance"
1150 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1154 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1155 with PapiSocketExecutor(node) as papi_exec:
1156 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1158 if_key = Topology.add_new_port(node, u"loopback")
1159 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1160 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1161 Topology.update_interface_name(node, if_key, ifc_name)
1163 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1164 Topology.update_interface_mac_address(node, if_key, mac)
1169 def vpp_create_bond_interface(
1170 node, mode, load_balance=None, mac=None, gso=False):
1171 """Create bond interface on VPP node.
1173 :param node: DUT node from topology.
1174 :param mode: Link bonding mode.
1175 :param load_balance: Load balance (optional, valid for xor and lacp
1176 modes, otherwise ignored). Default: None.
1177 :param mac: MAC address to assign to the bond interface (optional).
1179 :param gso: Enable GSO support (optional). Default: False.
1182 :type load_balance: str
1185 :returns: Interface key (name) in topology.
1187 :raises RuntimeError: If it is not possible to create bond interface on
1190 cmd = u"bond_create2"
1192 id=int(Constants.BITWISE_NON_ZERO),
1193 use_custom_mac=bool(mac is not None),
1194 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1197 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1199 lb=0 if load_balance is None else getattr(
1200 LinkBondLoadBalanceAlgo,
1201 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1206 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1207 with PapiSocketExecutor(node) as papi_exec:
1208 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1210 InterfaceUtil.add_eth_interface(
1211 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1213 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1218 def add_eth_interface(
1219 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1221 """Add ethernet interface to current topology.
1223 :param node: DUT node from topology.
1224 :param ifc_name: Name of the interface.
1225 :param sw_if_index: SW interface index.
1226 :param ifc_pfx: Interface key prefix.
1227 :param host_if_key: Host interface key from topology file.
1230 :type sw_if_index: int
1232 :type host_if_key: str
1234 if_key = Topology.add_new_port(node, ifc_pfx)
1236 if ifc_name and sw_if_index is None:
1237 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1239 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1240 if sw_if_index and ifc_name is None:
1241 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1242 Topology.update_interface_name(node, if_key, ifc_name)
1243 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1244 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1245 if host_if_key is not None:
1246 Topology.set_interface_numa_node(
1247 node, if_key, Topology.get_interface_numa_node(
1251 Topology.update_interface_pci_address(
1252 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1256 def vpp_create_avf_interface(
1257 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1258 """Create AVF interface on VPP node.
1260 :param node: DUT node from topology.
1261 :param if_key: Interface key from topology file of interface
1262 to be bound to i40evf driver.
1263 :param num_rx_queues: Number of RX queues.
1264 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1265 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1268 :type num_rx_queues: int
1271 :returns: AVF interface key (name) in topology.
1273 :raises RuntimeError: If it is not possible to create AVF interface on
1276 PapiSocketExecutor.run_cli_cmd(
1277 node, u"set logging class avf level debug"
1281 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1283 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1285 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1289 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1291 # FIXME: Remove once the fw/driver is upgraded.
1293 with PapiSocketExecutor(node) as papi_exec:
1295 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1299 except AssertionError:
1300 logger.error(err_msg)
1302 raise AssertionError(err_msg)
1304 InterfaceUtil.add_eth_interface(
1305 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1309 return Topology.get_interface_by_sw_index(node, sw_if_index)
1312 def vpp_create_af_xdp_interface(
1313 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1315 """Create AF_XDP interface on VPP node.
1317 :param node: DUT node from topology.
1318 :param if_key: Physical interface key from topology file of interface
1319 to be bound to compatible driver.
1320 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1321 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1322 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1323 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1326 :type num_rx_queues: int
1330 :returns: Interface key (name) in topology file.
1332 :raises RuntimeError: If it is not possible to create AF_XDP interface
1335 PapiSocketExecutor.run_cli_cmd(
1336 node, u"set logging class af_xdp level debug"
1339 cmd = u"af_xdp_create"
1340 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1342 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1343 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1344 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1347 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1349 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1350 with PapiSocketExecutor(node) as papi_exec:
1351 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1353 InterfaceUtil.vpp_set_interface_mac(
1354 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1356 InterfaceUtil.add_eth_interface(
1357 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1361 return Topology.get_interface_by_sw_index(node, sw_if_index)
1364 def vpp_create_rdma_interface(
1365 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1367 """Create RDMA interface on VPP node.
1369 :param node: DUT node from topology.
1370 :param if_key: Physical interface key from topology file of interface
1371 to be bound to rdma-core driver.
1372 :param num_rx_queues: Number of RX queues.
1373 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1374 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1375 :param mode: RDMA interface mode - auto/ibv/dv.
1378 :type num_rx_queues: int
1382 :returns: Interface key (name) in topology file.
1384 :raises RuntimeError: If it is not possible to create RDMA interface on
1387 PapiSocketExecutor.run_cli_cmd(
1388 node, u"set logging class rdma level debug"
1391 cmd = u"rdma_create_v3"
1392 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1394 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1395 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1396 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1399 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1400 # Note: Set True for non-jumbo packets.
1403 # TODO: Apply desired RSS flags.
1405 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1406 with PapiSocketExecutor(node) as papi_exec:
1407 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1409 InterfaceUtil.vpp_set_interface_mac(
1410 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1412 InterfaceUtil.add_eth_interface(
1413 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1417 return Topology.get_interface_by_sw_index(node, sw_if_index)
1420 def vpp_add_bond_member(node, interface, bond_if):
1421 """Add member interface to bond interface on VPP node.
1423 :param node: DUT node from topology.
1424 :param interface: Physical interface key from topology file.
1425 :param bond_if: Load balance
1427 :type interface: str
1429 :raises RuntimeError: If it is not possible to add member to bond
1430 interface on the node.
1432 cmd = u"bond_add_member"
1434 sw_if_index=Topology.get_interface_sw_index(node, interface),
1435 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1437 is_long_timeout=False
1439 err_msg = f"Failed to add member {interface} to bond interface " \
1440 f"{bond_if} on host {node[u'host']}"
1441 with PapiSocketExecutor(node) as papi_exec:
1442 papi_exec.add(cmd, **args).get_reply(err_msg)
1445 def vpp_show_bond_data_on_node(node, verbose=False):
1446 """Show (detailed) bond information on VPP node.
1448 :param node: DUT node from topology.
1449 :param verbose: If detailed information is required or not.
1453 cmd = u"sw_bond_interface_dump"
1454 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1456 data = f"Bond data on node {node[u'host']}:\n"
1457 with PapiSocketExecutor(node) as papi_exec:
1458 details = papi_exec.add(cmd).get_details(err_msg)
1460 for bond in details:
1461 data += f"{bond[u'interface_name']}\n"
1462 data += u" mode: {m}\n".format(
1463 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1465 data += u" load balance: {lb}\n".format(
1466 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1468 data += f" number of active members: {bond[u'active_members']}\n"
1470 member_data = InterfaceUtil.vpp_bond_member_dump(
1471 node, Topology.get_interface_by_sw_index(
1472 node, bond[u"sw_if_index"]
1475 for member in member_data:
1476 if not member[u"is_passive"]:
1477 data += f" {member[u'interface_name']}\n"
1478 data += f" number of members: {bond[u'members']}\n"
1480 for member in member_data:
1481 data += f" {member[u'interface_name']}\n"
1482 data += f" interface id: {bond[u'id']}\n"
1483 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1487 def vpp_bond_member_dump(node, interface):
1488 """Get bond interface slave(s) data on VPP node.
1490 :param node: DUT node from topology.
1491 :param interface: Physical interface key from topology file.
1493 :type interface: str
1494 :returns: Bond slave interface data.
1497 cmd = u"sw_member_interface_dump"
1499 sw_if_index=Topology.get_interface_sw_index(node, interface)
1501 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1503 with PapiSocketExecutor(node) as papi_exec:
1504 details = papi_exec.add(cmd, **args).get_details(err_msg)
1506 logger.debug(f"Member data:\n{details}")
1510 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1511 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1513 :param nodes: Nodes in the topology.
1514 :param verbose: If detailed information is required or not.
1518 for node_data in nodes.values():
1519 if node_data[u"type"] == NodeType.DUT:
1520 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1523 def vpp_enable_input_acl_interface(
1524 node, interface, ip_version, table_index):
1525 """Enable input acl on interface.
1527 :param node: VPP node to setup interface for input acl.
1528 :param interface: Interface to setup input acl.
1529 :param ip_version: Version of IP protocol.
1530 :param table_index: Classify table index.
1532 :type interface: str or int
1533 :type ip_version: str
1534 :type table_index: int
1536 cmd = u"input_acl_set_interface"
1538 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1539 ip4_table_index=table_index if ip_version == u"ip4"
1540 else Constants.BITWISE_NON_ZERO,
1541 ip6_table_index=table_index if ip_version == u"ip6"
1542 else Constants.BITWISE_NON_ZERO,
1543 l2_table_index=table_index if ip_version == u"l2"
1544 else Constants.BITWISE_NON_ZERO,
1546 err_msg = f"Failed to enable input acl on interface {interface}"
1547 with PapiSocketExecutor(node) as papi_exec:
1548 papi_exec.add(cmd, **args).get_reply(err_msg)
1551 def get_interface_classify_table(node, interface):
1552 """Get name of classify table for the given interface.
1554 TODO: Move to Classify.py.
1556 :param node: VPP node to get data from.
1557 :param interface: Name or sw_if_index of a specific interface.
1559 :type interface: str or int
1560 :returns: Classify table name.
1563 if isinstance(interface, str):
1564 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1566 sw_if_index = interface
1568 cmd = u"classify_table_by_interface"
1570 sw_if_index=sw_if_index
1572 err_msg = f"Failed to get classify table name by interface {interface}"
1573 with PapiSocketExecutor(node) as papi_exec:
1574 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1579 def get_sw_if_index(node, interface_name):
1580 """Get sw_if_index for the given interface from actual interface dump.
1582 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1584 :param node: VPP node to get interface data from.
1585 :param interface_name: Name of the specific interface.
1587 :type interface_name: str
1588 :returns: sw_if_index of the given interface.
1591 interface_data = InterfaceUtil.vpp_get_interface_data(
1592 node, interface=interface_name
1594 return interface_data.get(u"sw_if_index")
1597 def vxlan_gpe_dump(node, interface_name=None):
1598 """Get VxLAN GPE data for the given interface.
1600 :param node: VPP node to get interface data from.
1601 :param interface_name: Name of the specific interface. If None,
1602 information about all VxLAN GPE interfaces is returned.
1604 :type interface_name: str
1605 :returns: Dictionary containing data for the given VxLAN GPE interface
1606 or if interface=None, the list of dictionaries with all VxLAN GPE
1608 :rtype: dict or list
1610 def process_vxlan_gpe_dump(vxlan_dump):
1611 """Process vxlan_gpe dump.
1613 :param vxlan_dump: Vxlan_gpe nterface dump.
1614 :type vxlan_dump: dict
1615 :returns: Processed vxlan_gpe interface dump.
1618 if vxlan_dump[u"is_ipv6"]:
1619 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1620 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1622 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1623 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1626 if interface_name is not None:
1627 sw_if_index = InterfaceUtil.get_interface_index(
1628 node, interface_name
1631 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1633 cmd = u"vxlan_gpe_tunnel_dump"
1635 sw_if_index=sw_if_index
1637 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1638 with PapiSocketExecutor(node) as papi_exec:
1639 details = papi_exec.add(cmd, **args).get_details(err_msg)
1641 data = list() if interface_name is None else dict()
1642 for dump in details:
1643 if interface_name is None:
1644 data.append(process_vxlan_gpe_dump(dump))
1645 elif dump[u"sw_if_index"] == sw_if_index:
1646 data = process_vxlan_gpe_dump(dump)
1649 logger.debug(f"VXLAN-GPE data:\n{data}")
1653 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1654 """Assign VPP interface to specific VRF/FIB table.
1656 :param node: VPP node where the FIB and interface are located.
1657 :param interface: Interface to be assigned to FIB.
1658 :param table_id: VRF table ID.
1659 :param ipv6: Assign to IPv6 table. Default False.
1661 :type interface: str or int
1665 cmd = u"sw_interface_set_table"
1667 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1669 vrf_id=int(table_id)
1671 err_msg = f"Failed to assign interface {interface} to FIB table"
1672 with PapiSocketExecutor(node) as papi_exec:
1673 papi_exec.add(cmd, **args).get_reply(err_msg)
1676 def set_linux_interface_mac(
1677 node, interface, mac, namespace=None, vf_id=None):
1678 """Set MAC address for interface in linux.
1680 :param node: Node where to execute command.
1681 :param interface: Interface in namespace.
1682 :param mac: MAC to be assigned to interface.
1683 :param namespace: Execute command in namespace. Optional
1684 :param vf_id: Virtual Function id. Optional
1686 :type interface: str
1688 :type namespace: str
1691 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1692 else f"address {mac}"
1693 ns_str = f"ip netns exec {namespace}" if namespace else u""
1695 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1696 exec_cmd_no_error(node, cmd, sudo=True)
1699 def set_linux_interface_promisc(
1700 node, interface, namespace=None, vf_id=None, state=u"on"):
1701 """Set promisc state for interface in linux.
1703 :param node: Node where to execute command.
1704 :param interface: Interface in namespace.
1705 :param namespace: Exec command in namespace. (Optional, Default: None)
1706 :param vf_id: Virtual Function id. (Optional, Default: None)
1707 :param state: State of feature. (Optional, Default: on)
1709 :type interface: str
1710 :type namespace: str
1714 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1715 else f"promisc {state}"
1716 ns_str = f"ip netns exec {namespace}" if namespace else u""
1718 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1719 exec_cmd_no_error(node, cmd, sudo=True)
1722 def set_linux_interface_trust_on(
1723 node, interface, namespace=None, vf_id=None):
1724 """Set trust on (promisc) for interface in linux.
1726 :param node: Node where to execute command.
1727 :param interface: Interface in namespace.
1728 :param namespace: Execute command in namespace. Optional
1729 :param vf_id: Virtual Function id. Optional
1731 :type interface: str
1732 :type namespace: str
1735 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1736 ns_str = f"ip netns exec {namespace}" if namespace else u""
1738 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1739 exec_cmd_no_error(node, cmd, sudo=True)
1742 def set_linux_interface_spoof_off(
1743 node, interface, namespace=None, vf_id=None):
1744 """Set spoof off for interface in linux.
1746 :param node: Node where to execute command.
1747 :param interface: Interface in namespace.
1748 :param namespace: Execute command in namespace. Optional
1749 :param vf_id: Virtual Function id. Optional
1751 :type interface: str
1752 :type namespace: str
1755 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1757 ns_str = f"ip netns exec {namespace}" if namespace else u""
1759 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1760 exec_cmd_no_error(node, cmd, sudo=True)
1763 def set_linux_interface_state(
1764 node, interface, namespace=None, state=u"up"):
1765 """Set operational state for interface in linux.
1767 :param node: Node where to execute command.
1768 :param interface: Interface in namespace.
1769 :param namespace: Execute command in namespace. Optional
1770 :param state: Up/Down.
1772 :type interface: str
1773 :type namespace: str
1776 ns_str = f"ip netns exec {namespace}" if namespace else u""
1778 cmd = f"{ns_str} ip link set dev {interface} {state}"
1779 exec_cmd_no_error(node, cmd, sudo=True)
1782 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1783 """Init PCI device. Check driver compatibility and bind to proper
1784 drivers. Optionally create NIC VFs.
1786 :param node: DUT node.
1787 :param ifc_key: Interface key from topology file.
1788 :param driver: Base driver to use.
1789 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1790 :param osi_layer: OSI Layer type to initialize TG with.
1791 Default value "L2" sets linux interface spoof off.
1796 :type osi_layer: str
1797 :returns: Virtual Function topology interface keys.
1799 :raises RuntimeError: If a reason preventing initialization is found.
1801 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1803 if driver == u"avf":
1804 if kernel_driver not in (
1805 u"ice", u"iavf", u"i40e", u"i40evf"):
1807 f"AVF needs ice or i40e compatible driver, not "
1808 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1810 vf_keys = InterfaceUtil.init_generic_interface(
1811 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1813 elif driver == u"af_xdp":
1814 if kernel_driver not in (
1815 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1818 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1819 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1821 vf_keys = InterfaceUtil.init_generic_interface(
1822 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1824 elif driver == u"rdma-core":
1825 vf_keys = InterfaceUtil.init_generic_interface(
1826 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1831 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1832 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1834 :param node: DUT node.
1835 :param ifc_key: Interface key from topology file.
1836 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1837 :param osi_layer: OSI Layer type to initialize TG with.
1838 Default value "L2" sets linux interface spoof off.
1842 :type osi_layer: str
1843 :returns: Virtual Function topology interface keys.
1845 :raises RuntimeError: If a reason preventing initialization is found.
1847 # Read PCI address and driver.
1848 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1849 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1850 uio_driver = Topology.get_uio_driver(node)
1851 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1852 current_driver = DUTSetup.get_pci_dev_driver(
1853 node, pf_pci_addr.replace(u":", r"\:"))
1854 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1856 VPPUtil.stop_vpp_service(node)
1857 if current_driver != kernel_driver:
1858 # PCI device must be re-bound to kernel driver before creating VFs.
1859 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1860 # Stop VPP to prevent deadlock.
1861 # Unbind from current driver if bound.
1863 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1864 # Bind to kernel driver.
1865 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1867 # Initialize PCI VFs.
1868 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1871 if osi_layer == u"L2":
1872 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1875 # Set MAC address and bind each virtual function to uio driver.
1876 for vf_id in range(numvfs):
1877 vf_mac_addr = u":".join(
1878 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1879 pf_mac_addr[5], f"{vf_id:02x}"
1883 InterfaceUtil.set_linux_interface_trust_on(
1884 node, pf_dev, vf_id=vf_id
1886 if osi_layer == u"L2":
1887 InterfaceUtil.set_linux_interface_spoof_off(
1888 node, pf_dev, vf_id=vf_id
1890 InterfaceUtil.set_linux_interface_mac(
1891 node, pf_dev, vf_mac_addr, vf_id=vf_id
1893 InterfaceUtil.set_linux_interface_state(
1894 node, pf_dev, state=u"up"
1897 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1898 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1900 # Add newly created ports into topology file
1901 vf_ifc_name = f"{ifc_key}_vif"
1902 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1903 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1904 Topology.update_interface_name(
1905 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1907 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1908 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1909 Topology.set_interface_numa_node(
1910 node, vf_ifc_key, Topology.get_interface_numa_node(
1914 vf_ifc_keys.append(vf_ifc_key)
1919 def vpp_sw_interface_rx_placement_dump(node):
1920 """Dump VPP interface RX placement on node.
1922 :param node: Node to run command on.
1924 :returns: Thread mapping information as a list of dictionaries.
1927 cmd = u"sw_interface_rx_placement_dump"
1928 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1929 with PapiSocketExecutor(node) as papi_exec:
1930 for ifc in node[u"interfaces"].values():
1931 if ifc[u"vpp_sw_index"] is not None:
1932 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1933 details = papi_exec.get_details(err_msg)
1934 return sorted(details, key=lambda k: k[u"sw_if_index"])
1937 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1938 """Dump VPP interface RX placement on all given nodes.
1940 :param nodes: Nodes to run command on.
1942 :returns: Thread mapping information as a list of dictionaries.
1945 for node in nodes.values():
1946 if node[u"type"] == NodeType.DUT:
1947 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1950 def vpp_sw_interface_set_rx_placement(
1951 node, sw_if_index, queue_id, worker_id):
1952 """Set interface RX placement to worker on node.
1954 :param node: Node to run command on.
1955 :param sw_if_index: VPP SW interface index.
1956 :param queue_id: VPP interface queue ID.
1957 :param worker_id: VPP worker ID (indexing from 0).
1959 :type sw_if_index: int
1961 :type worker_id: int
1962 :raises RuntimeError: If failed to run command on host or if no API
1965 cmd = u"sw_interface_set_rx_placement"
1966 err_msg = f"Failed to set interface RX placement to worker " \
1967 f"on host {node[u'host']}!"
1969 sw_if_index=sw_if_index,
1971 worker_id=worker_id,
1974 with PapiSocketExecutor(node) as papi_exec:
1975 papi_exec.add(cmd, **args).get_reply(err_msg)
1978 def vpp_round_robin_rx_placement(
1979 node, prefix, workers=None):
1980 """Set Round Robin interface RX placement on all worker threads
1983 If specified, workers limits the number of physical cores used
1984 for data plane I/O work. Other cores are presumed to do something else,
1985 e.g. asynchronous crypto processing.
1986 None means all workers are used for data plane work.
1988 :param node: Topology nodes.
1989 :param prefix: Interface name prefix.
1990 :param workers: Comma separated worker index numbers intended for
1996 thread_data = VPPUtil.vpp_show_threads(node)
1997 worker_cnt = len(thread_data) - 1
2002 for item in thread_data:
2003 if str(item.cpu_id) in workers.split(u","):
2004 worker_ids.append(item.id)
2006 for item in thread_data:
2007 if u"vpp_main" not in item.name:
2008 worker_ids.append(item.id)
2011 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
2012 for interface in node[u"interfaces"].values():
2013 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
2014 and prefix in interface[u"name"]:
2015 InterfaceUtil.vpp_sw_interface_set_rx_placement(
2016 node, placement[u"sw_if_index"], placement[u"queue_id"],
2017 worker_ids[worker_idx % len(worker_ids)] - 1
2022 def vpp_round_robin_rx_placement_on_all_duts(
2023 nodes, prefix, workers=None):
2024 """Set Round Robin interface RX placement on worker threads
2027 If specified, workers limits the number of physical cores used
2028 for data plane I/O work. Other cores are presumed to do something else,
2029 e.g. asynchronous crypto processing.
2030 None means all cores are used for data plane work.
2032 :param nodes: Topology nodes.
2033 :param prefix: Interface name prefix.
2034 :param workers: Comma separated worker index numbers intended for
2040 for node in nodes.values():
2041 if node[u"type"] == NodeType.DUT:
2042 InterfaceUtil.vpp_round_robin_rx_placement(
2043 node, prefix, workers