1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
216 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
217 elif state == u"down":
220 raise ValueError(f"Unexpected interface state: {state}")
221 cmd = u"sw_interface_set_flags"
222 err_msg = f"Failed to set interface state on host {node[u'host']}"
224 sw_if_index=int(sw_if_index),
227 with PapiSocketExecutor(node) as papi_exec:
228 papi_exec.add(cmd, **args).get_reply(err_msg)
229 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
230 cmd = f"ip link set {iface_name} {state}"
231 exec_cmd_no_error(node, cmd, sudo=True)
234 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
238 def set_interface_state_pci(
239 node, pf_pcis, namespace=None, state=u"up"):
240 """Set operational state for interface specified by PCI address.
242 :param node: Topology node.
243 :param pf_pcis: List of node's interfaces PCI addresses.
244 :param namespace: Exec command in namespace. (Optional, Default: none)
245 :param state: Up/Down. (Optional, default: up)
251 for pf_pci in pf_pcis:
252 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
253 InterfaceUtil.set_linux_interface_state(
254 node, pf_eth, namespace=namespace, state=state
258 def set_interface_mtu(node, pf_pcis, mtu=9200):
259 """Set Ethernet MTU for specified interfaces.
261 :param node: Topology node.
262 :param pf_pcis: List of node's interfaces PCI addresses.
263 :param mtu: MTU to set. Default: 9200.
267 :raises RuntimeError: If failed to set MTU on interface.
269 for pf_pci in pf_pcis:
270 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
271 cmd = f"ip link set {pf_eth} mtu {mtu}"
272 exec_cmd_no_error(node, cmd, sudo=True)
275 def set_interface_channels(
276 node, pf_pcis, num_queues=1, channel=u"combined"):
277 """Set interface channels for specified interfaces.
279 :param node: Topology node.
280 :param pf_pcis: List of node's interfaces PCI addresses.
281 :param num_queues: Number of channels. (Optional, Default: 1)
282 :param channel: Channel type. (Optional, Default: combined)
285 :type num_queues: int
288 for pf_pci in pf_pcis:
289 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
290 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
291 exec_cmd_no_error(node, cmd, sudo=True)
294 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
295 """Set Ethernet flow control for specified interfaces.
297 :param node: Topology node.
298 :param pf_pcis: List of node's interfaces PCI addresses.
299 :param rxf: RX flow. (Optional, Default: off).
300 :param txf: TX flow. (Optional, Default: off).
306 for pf_pci in pf_pcis:
307 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
308 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
309 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
310 if int(ret_code) not in (0, 78):
311 raise RuntimeError("Failed to set flow control on {pf_eth}!")
314 def set_pci_parameter(node, pf_pcis, key, value):
315 """Set PCI parameter for specified interfaces.
317 :param node: Topology node.
318 :param pf_pcis: List of node's interfaces PCI addresses.
319 :param key: Key to set.
320 :param value: Value to set.
326 for pf_pci in pf_pcis:
327 cmd = f"setpci -s {pf_pci} {key}={value}"
328 exec_cmd_no_error(node, cmd, sudo=True)
331 def vpp_set_interface_mtu(node, interface, mtu=9200):
332 """Set Ethernet MTU on interface.
334 :param node: VPP node.
335 :param interface: Interface to setup MTU. Default: 9200.
336 :param mtu: Ethernet MTU size in Bytes.
338 :type interface: str or int
341 if isinstance(interface, str):
342 sw_if_index = Topology.get_interface_sw_index(node, interface)
344 sw_if_index = interface
346 cmd = u"hw_interface_set_mtu"
347 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
349 sw_if_index=sw_if_index,
353 with PapiSocketExecutor(node) as papi_exec:
354 papi_exec.add(cmd, **args).get_reply(err_msg)
355 except AssertionError as err:
356 logger.debug(f"Setting MTU failed.\n{err}")
359 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
360 """Set Ethernet MTU on all interfaces.
362 :param node: VPP node.
363 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
367 for interface in node[u"interfaces"]:
368 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
371 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
372 """Set Ethernet MTU on all interfaces on all DUTs.
374 :param nodes: VPP nodes.
375 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
379 for node in nodes.values():
380 if node[u"type"] == NodeType.DUT:
381 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
384 def vpp_node_interfaces_ready_wait(node, retries=15):
385 """Wait until all interfaces with admin-up are in link-up state.
387 :param node: Node to wait on.
388 :param retries: Number of retries to check interface status (optional,
393 :raises RuntimeError: If any interface is not in link-up state after
394 defined number of retries.
396 for _ in range(0, retries):
398 out = InterfaceUtil.vpp_get_interface_data(node)
399 for interface in out:
400 if interface.get(u"flags") == 1:
401 not_ready.append(interface.get(u"interface_name"))
404 f"Interfaces still not in link-up state:\n{not_ready}"
410 err = f"Timeout, interfaces not up:\n{not_ready}" \
411 if u"not_ready" in locals() else u"No check executed!"
412 raise RuntimeError(err)
415 def all_vpp_interfaces_ready_wait(nodes, retries=15):
416 """Wait until all interfaces with admin-up are in link-up state for all
417 nodes in the topology.
419 :param nodes: Nodes in the topology.
420 :param retries: Number of retries to check interface status (optional,
426 for node in nodes.values():
427 if node[u"type"] == NodeType.DUT:
428 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
431 def vpp_get_interface_data(node, interface=None):
432 """Get all interface data from a VPP node. If a name or
433 sw_interface_index is provided, return only data for the matching
436 :param node: VPP node to get interface data from.
437 :param interface: Numeric index or name string of a specific interface.
439 :type interface: int or str
440 :returns: List of dictionaries containing data for each interface, or a
441 single dictionary for the specified interface.
443 :raises TypeError: if the data type of interface is neither basestring
446 def process_if_dump(if_dump):
447 """Process interface dump.
449 :param if_dump: Interface dump.
451 :returns: Processed interface dump.
454 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
455 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
456 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
457 if_dump[u"flags"] = if_dump[u"flags"].value
458 if_dump[u"type"] = if_dump[u"type"].value
459 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
460 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
461 if hasattr(if_dump[u"sub_if_flags"], u"value") \
462 else int(if_dump[u"sub_if_flags"])
466 if interface is not None:
467 if isinstance(interface, str):
468 param = u"interface_name"
469 elif isinstance(interface, int):
470 param = u"sw_if_index"
472 raise TypeError(f"Wrong interface format {interface}")
476 cmd = u"sw_interface_dump"
478 name_filter_valid=False,
481 err_msg = f"Failed to get interface dump on host {node[u'host']}"
483 with PapiSocketExecutor(node) as papi_exec:
484 details = papi_exec.add(cmd, **args).get_details(err_msg)
485 logger.debug(f"Received data:\n{details!r}")
487 data = list() if interface is None else dict()
489 if interface is None:
490 data.append(process_if_dump(dump))
491 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
492 data = process_if_dump(dump)
495 logger.debug(f"Interface data:\n{data}")
499 def vpp_get_interface_name(node, sw_if_index):
500 """Get interface name for the given SW interface index from actual
503 :param node: VPP node to get interface data from.
504 :param sw_if_index: SW interface index of the specific interface.
506 :type sw_if_index: int
507 :returns: Name of the given interface.
510 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
511 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
512 if_data = InterfaceUtil.vpp_get_interface_data(
513 node, if_data[u"sup_sw_if_index"]
516 return if_data.get(u"interface_name")
519 def vpp_get_interface_sw_index(node, interface_name):
520 """Get interface name for the given SW interface index from actual
523 :param node: VPP node to get interface data from.
524 :param interface_name: Interface name.
526 :type interface_name: str
527 :returns: Name of the given interface.
530 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
532 return if_data.get(u"sw_if_index")
535 def vpp_get_interface_mac(node, interface):
536 """Get MAC address for the given interface from actual interface dump.
538 :param node: VPP node to get interface data from.
539 :param interface: Numeric index or name string of a specific interface.
541 :type interface: int or str
542 :returns: MAC address.
545 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
546 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
547 if_data = InterfaceUtil.vpp_get_interface_data(
548 node, if_data[u"sup_sw_if_index"])
550 return if_data.get(u"l2_address")
553 def vpp_set_interface_mac(node, interface, mac):
554 """Set MAC address for the given interface.
556 :param node: VPP node to set interface MAC.
557 :param interface: Numeric index or name string of a specific interface.
558 :param mac: Required MAC address.
560 :type interface: int or str
563 cmd = u"sw_interface_set_mac_address"
565 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
566 mac_address=L2Util.mac_to_bin(mac)
568 err_msg = f"Failed to set MAC address of interface {interface}" \
569 f"on host {node[u'host']}"
570 with PapiSocketExecutor(node) as papi_exec:
571 papi_exec.add(cmd, **args).get_reply(err_msg)
574 def tg_set_interface_driver(node, pci_addr, driver):
575 """Set interface driver on the TG node.
577 :param node: Node to set interface driver on (must be TG node).
578 :param pci_addr: PCI address of the interface.
579 :param driver: Driver name.
583 :raises RuntimeError: If unbinding from the current driver fails.
584 :raises RuntimeError: If binding to the new driver fails.
586 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
587 if old_driver == driver:
593 # Unbind from current driver
594 if old_driver is not None:
595 cmd = f"sh -c \"echo {pci_addr} > " \
596 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
597 ret_code, _, _ = ssh.exec_command_sudo(cmd)
598 if int(ret_code) != 0:
599 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
601 # Bind to the new driver
602 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
603 ret_code, _, _ = ssh.exec_command_sudo(cmd)
604 if int(ret_code) != 0:
605 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
608 def tg_get_interface_driver(node, pci_addr):
609 """Get interface driver from the TG node.
611 :param node: Node to get interface driver on (must be TG node).
612 :param pci_addr: PCI address of the interface.
615 :returns: Interface driver or None if not found.
617 :raises RuntimeError: If PCI rescan or lspci command execution failed.
619 return DUTSetup.get_pci_dev_driver(node, pci_addr)
622 def tg_set_interfaces_default_driver(node):
623 """Set interfaces default driver specified in topology yaml file.
625 :param node: Node to setup interfaces driver on (must be TG node).
628 for interface in node[u"interfaces"].values():
629 InterfaceUtil.tg_set_interface_driver(
630 node, interface[u"pci_address"], interface[u"driver"]
634 def update_vpp_interface_data_on_node(node):
635 """Update vpp generated interface data for a given node in DICT__nodes.
637 Updates interface names, software if index numbers and any other details
638 generated specifically by vpp that are unknown before testcase run.
639 It does this by dumping interface list from all devices using python
640 api, and pairing known information from topology (mac address) to state
643 :param node: Node selected from DICT__nodes.
646 interface_list = InterfaceUtil.vpp_get_interface_data(node)
647 interface_dict = dict()
648 for ifc in interface_list:
649 interface_dict[ifc[u"l2_address"]] = ifc
651 for if_name, if_data in node[u"interfaces"].items():
652 ifc_dict = interface_dict.get(if_data[u"mac_address"])
653 if ifc_dict is not None:
654 if_data[u"name"] = ifc_dict[u"interface_name"]
655 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
656 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
658 f"Interface {if_name} found by MAC "
659 f"{if_data[u'mac_address']}"
663 f"Interface {if_name} not found by MAC "
664 f"{if_data[u'mac_address']}"
666 if_data[u"vpp_sw_index"] = None
669 def update_nic_interface_names(node):
670 """Update interface names based on nic type and PCI address.
672 This method updates interface names in the same format as VPP does.
674 :param node: Node dictionary.
677 for ifc in node[u"interfaces"].values():
678 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
679 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
680 f"{int(if_pci[3], 16):x}"
681 if ifc[u"model"] == u"Intel-XL710":
682 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
683 elif ifc[u"model"] == u"Intel-X710":
684 ifc[u"name"] = f"TenGigabitEthernet{loc}"
685 elif ifc[u"model"] == u"Intel-X520-DA2":
686 ifc[u"name"] = f"TenGigabitEthernet{loc}"
687 elif ifc[u"model"] == u"Cisco-VIC-1385":
688 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
689 elif ifc[u"model"] == u"Cisco-VIC-1227":
690 ifc[u"name"] = f"TenGigabitEthernet{loc}"
692 ifc[u"name"] = f"UnknownEthernet{loc}"
695 def update_nic_interface_names_on_all_duts(nodes):
696 """Update interface names based on nic type and PCI address on all DUTs.
698 This method updates interface names in the same format as VPP does.
700 :param nodes: Topology nodes.
703 for node in nodes.values():
704 if node[u"type"] == NodeType.DUT:
705 InterfaceUtil.update_nic_interface_names(node)
708 def update_tg_interface_data_on_node(node):
709 """Update interface name for TG/linux node in DICT__nodes.
712 # for dev in `ls /sys/class/net/`;
713 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
714 "52:54:00:9f:82:63": "eth0"
715 "52:54:00:77:ae:a9": "eth1"
716 "52:54:00:e1:8a:0f": "eth2"
717 "00:00:00:00:00:00": "lo"
719 :param node: Node selected from DICT__nodes.
721 :raises RuntimeError: If getting of interface name and MAC fails.
723 # First setup interface driver specified in yaml file
724 InterfaceUtil.tg_set_interfaces_default_driver(node)
726 # Get interface names
730 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
731 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
733 ret_code, stdout, _ = ssh.exec_command(cmd)
734 if int(ret_code) != 0:
735 raise RuntimeError(u"Get interface name and MAC failed")
736 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
738 interfaces = JsonParser().parse_data(tmp)
739 for interface in node[u"interfaces"].values():
740 name = interfaces.get(interface[u"mac_address"])
743 interface[u"name"] = name
746 def iface_update_numa_node(node):
747 """For all interfaces from topology file update numa node based on
748 information from the node.
750 :param node: Node from topology.
753 :raises ValueError: If numa node ia less than 0.
754 :raises RuntimeError: If update of numa node failed.
757 for if_key in Topology.get_node_interfaces(node):
758 if_pci = Topology.get_interface_pci_addr(node, if_key)
760 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
762 ret, out, _ = ssh.exec_command(cmd)
765 numa_node = 0 if int(out) < 0 else int(out)
768 f"Reading numa location failed for: {if_pci}"
771 Topology.set_interface_numa_node(
772 node, if_key, numa_node
776 raise RuntimeError(f"Update numa node failed for: {if_pci}")
779 def update_all_interface_data_on_all_nodes(
780 nodes, skip_tg=False, skip_vpp=False):
781 """Update interface names on all nodes in DICT__nodes.
783 This method updates the topology dictionary by querying interface lists
784 of all nodes mentioned in the topology dictionary.
786 :param nodes: Nodes in the topology.
787 :param skip_tg: Skip TG node.
788 :param skip_vpp: Skip VPP node.
793 for node in nodes.values():
794 if node[u"type"] == NodeType.DUT and not skip_vpp:
795 InterfaceUtil.update_vpp_interface_data_on_node(node)
796 elif node[u"type"] == NodeType.TG and not skip_tg:
797 InterfaceUtil.update_tg_interface_data_on_node(node)
798 InterfaceUtil.iface_update_numa_node(node)
801 def create_vlan_subinterface(node, interface, vlan):
802 """Create VLAN sub-interface on node.
804 :param node: Node to add VLAN subinterface on.
805 :param interface: Interface name or index on which create VLAN
807 :param vlan: VLAN ID of the subinterface to be created.
809 :type interface: str on int
811 :returns: Name and index of created subinterface.
813 :raises RuntimeError: if it is unable to create VLAN subinterface on the
814 node or interface cannot be converted.
816 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
818 cmd = u"create_vlan_subif"
820 sw_if_index=sw_if_index,
823 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
825 with PapiSocketExecutor(node) as papi_exec:
826 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
828 if_key = Topology.add_new_port(node, u"vlan_subif")
829 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
830 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
831 Topology.update_interface_name(node, if_key, ifc_name)
833 return f"{interface}.{vlan}", sw_if_index
836 def create_vxlan_interface(node, vni, source_ip, destination_ip):
837 """Create VXLAN interface and return sw if index of created interface.
839 :param node: Node where to create VXLAN interface.
840 :param vni: VXLAN Network Identifier.
841 :param source_ip: Source IP of a VXLAN Tunnel End Point.
842 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
846 :type destination_ip: str
847 :returns: SW IF INDEX of created interface.
849 :raises RuntimeError: if it is unable to create VxLAN interface on the
852 cmd = u"vxlan_add_del_tunnel"
855 instance=Constants.BITWISE_NON_ZERO,
856 src_address=IPAddress.create_ip_address_object(
857 ip_address(source_ip)
859 dst_address=IPAddress.create_ip_address_object(
860 ip_address(destination_ip)
862 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
864 decap_next_index=Constants.BITWISE_NON_ZERO,
867 err_msg = f"Failed to create VXLAN tunnel interface " \
868 f"on host {node[u'host']}"
869 with PapiSocketExecutor(node) as papi_exec:
870 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
872 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
873 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
874 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
875 Topology.update_interface_name(node, if_key, ifc_name)
880 def set_vxlan_bypass(node, interface=None):
881 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
883 By adding the IPv4 vxlan-bypass graph node to an interface, the node
884 checks for and validate input vxlan packet and bypass ip4-lookup,
885 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
886 This node will cause extra overhead to for non-vxlan packets which is
889 :param node: Node where to set VXLAN bypass.
890 :param interface: Numeric index or name string of a specific interface.
892 :type interface: int or str
893 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
895 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
897 cmd = u"sw_interface_set_vxlan_bypass"
900 sw_if_index=sw_if_index,
903 err_msg = f"Failed to set VXLAN bypass on interface " \
904 f"on host {node[u'host']}"
905 with PapiSocketExecutor(node) as papi_exec:
906 papi_exec.add(cmd, **args).get_replies(err_msg)
909 def vxlan_dump(node, interface=None):
910 """Get VxLAN data for the given interface.
912 :param node: VPP node to get interface data from.
913 :param interface: Numeric index or name string of a specific interface.
914 If None, information about all VxLAN interfaces is returned.
916 :type interface: int or str
917 :returns: Dictionary containing data for the given VxLAN interface or if
918 interface=None, the list of dictionaries with all VxLAN interfaces.
920 :raises TypeError: if the data type of interface is neither basestring
923 def process_vxlan_dump(vxlan_dump):
924 """Process vxlan dump.
926 :param vxlan_dump: Vxlan interface dump.
927 :type vxlan_dump: dict
928 :returns: Processed vxlan interface dump.
931 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
932 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
935 if interface is not None:
936 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
938 sw_if_index = int(Constants.BITWISE_NON_ZERO)
940 cmd = u"vxlan_tunnel_dump"
942 sw_if_index=sw_if_index
944 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
946 with PapiSocketExecutor(node) as papi_exec:
947 details = papi_exec.add(cmd, **args).get_details(err_msg)
949 data = list() if interface is None else dict()
951 if interface is None:
952 data.append(process_vxlan_dump(dump))
953 elif dump[u"sw_if_index"] == sw_if_index:
954 data = process_vxlan_dump(dump)
957 logger.debug(f"VXLAN data:\n{data}")
961 def create_subinterface(
962 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
964 """Create sub-interface on node. It is possible to set required
965 sub-interface type and VLAN tag(s).
967 :param node: Node to add sub-interface.
968 :param interface: Interface name on which create sub-interface.
969 :param sub_id: ID of the sub-interface to be created.
970 :param outer_vlan_id: Optional outer VLAN ID.
971 :param inner_vlan_id: Optional inner VLAN ID.
972 :param type_subif: Optional type of sub-interface. Values supported by
973 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
976 :type interface: str or int
978 :type outer_vlan_id: int
979 :type inner_vlan_id: int
980 :type type_subif: str
981 :returns: Name and index of created sub-interface.
983 :raises RuntimeError: If it is not possible to create sub-interface.
985 subif_types = type_subif.split()
988 if u"no_tags" in subif_types:
989 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
990 if u"one_tag" in subif_types:
991 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
992 if u"two_tags" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
994 if u"dot1ad" in subif_types:
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
996 if u"exact_match" in subif_types:
997 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
998 if u"default_sub" in subif_types:
999 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1000 if type_subif == u"default_sub":
1001 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1002 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1004 cmd = u"create_subif"
1006 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1008 sub_if_flags=flags.value if hasattr(flags, u"value")
1010 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1011 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1013 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1014 with PapiSocketExecutor(node) as papi_exec:
1015 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1017 if_key = Topology.add_new_port(node, u"subinterface")
1018 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1019 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1020 Topology.update_interface_name(node, if_key, ifc_name)
1022 return f"{interface}.{sub_id}", sw_if_index
1025 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1026 """Create GRE tunnel interface on node.
1028 :param node: VPP node to add tunnel interface.
1029 :param source_ip: Source of the GRE tunnel.
1030 :param destination_ip: Destination of the GRE tunnel.
1032 :type source_ip: str
1033 :type destination_ip: str
1034 :returns: Name and index of created GRE tunnel interface.
1036 :raises RuntimeError: If unable to create GRE tunnel interface.
1038 cmd = u"gre_tunnel_add_del"
1041 instance=Constants.BITWISE_NON_ZERO,
1043 dst=str(destination_ip),
1051 err_msg = f"Failed to create GRE tunnel interface " \
1052 f"on host {node[u'host']}"
1053 with PapiSocketExecutor(node) as papi_exec:
1054 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1056 if_key = Topology.add_new_port(node, u"gre_tunnel")
1057 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1058 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1059 Topology.update_interface_name(node, if_key, ifc_name)
1061 return ifc_name, sw_if_index
1064 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1065 """Create GTPU interface and return sw if index of created interface.
1067 :param node: Node where to create GTPU interface.
1068 :param teid: GTPU Tunnel Endpoint Identifier.
1069 :param source_ip: Source IP of a GTPU Tunnel End Point.
1070 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1073 :type source_ip: str
1074 :type destination_ip: str
1075 :returns: SW IF INDEX of created interface.
1077 :raises RuntimeError: if it is unable to create GTPU interface on the
1080 cmd = u"gtpu_add_del_tunnel"
1083 src_address=IPAddress.create_ip_address_object(
1084 ip_address(source_ip)
1086 dst_address=IPAddress.create_ip_address_object(
1087 ip_address(destination_ip)
1089 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1094 err_msg = f"Failed to create GTPU tunnel interface " \
1095 f"on host {node[u'host']}"
1096 with PapiSocketExecutor(node) as papi_exec:
1097 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1099 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1100 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1101 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1102 Topology.update_interface_name(node, if_key, ifc_name)
1107 def vpp_create_loopback(node, mac=None):
1108 """Create loopback interface on VPP node.
1110 :param node: Node to create loopback interface on.
1111 :param mac: Optional MAC address for loopback interface.
1114 :returns: SW interface index.
1116 :raises RuntimeError: If it is not possible to create loopback on the
1119 cmd = u"create_loopback_instance"
1121 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1125 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1126 with PapiSocketExecutor(node) as papi_exec:
1127 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1129 if_key = Topology.add_new_port(node, u"loopback")
1130 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1131 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1132 Topology.update_interface_name(node, if_key, ifc_name)
1134 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1135 Topology.update_interface_mac_address(node, if_key, mac)
1140 def vpp_create_bond_interface(
1141 node, mode, load_balance=None, mac=None, gso=False):
1142 """Create bond interface on VPP node.
1144 :param node: DUT node from topology.
1145 :param mode: Link bonding mode.
1146 :param load_balance: Load balance (optional, valid for xor and lacp
1147 modes, otherwise ignored). Default: None.
1148 :param mac: MAC address to assign to the bond interface (optional).
1150 :param gso: Enable GSO support (optional). Default: False.
1153 :type load_balance: str
1156 :returns: Interface key (name) in topology.
1158 :raises RuntimeError: If it is not possible to create bond interface on
1161 cmd = u"bond_create2"
1163 id=int(Constants.BITWISE_NON_ZERO),
1164 use_custom_mac=bool(mac is not None),
1165 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1168 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1170 lb=0 if load_balance is None else getattr(
1171 LinkBondLoadBalanceAlgo,
1172 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1177 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1178 with PapiSocketExecutor(node) as papi_exec:
1179 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1181 InterfaceUtil.add_eth_interface(
1182 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1184 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1189 def add_eth_interface(
1190 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1192 """Add ethernet interface to current topology.
1194 :param node: DUT node from topology.
1195 :param ifc_name: Name of the interface.
1196 :param sw_if_index: SW interface index.
1197 :param ifc_pfx: Interface key prefix.
1198 :param host_if_key: Host interface key from topology file.
1201 :type sw_if_index: int
1203 :type host_if_key: str
1205 if_key = Topology.add_new_port(node, ifc_pfx)
1207 if ifc_name and sw_if_index is None:
1208 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1210 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1211 if sw_if_index and ifc_name is None:
1212 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1213 Topology.update_interface_name(node, if_key, ifc_name)
1214 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1215 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1216 if host_if_key is not None:
1217 Topology.set_interface_numa_node(
1218 node, if_key, Topology.get_interface_numa_node(
1222 Topology.update_interface_pci_address(
1223 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1227 def vpp_create_avf_interface(
1228 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1229 """Create AVF interface on VPP node.
1231 :param node: DUT node from topology.
1232 :param if_key: Interface key from topology file of interface
1233 to be bound to i40evf driver.
1234 :param num_rx_queues: Number of RX queues.
1235 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1236 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1239 :type num_rx_queues: int
1242 :returns: AVF interface key (name) in topology.
1244 :raises RuntimeError: If it is not possible to create AVF interface on
1247 PapiSocketExecutor.run_cli_cmd(
1248 node, u"set logging class avf level debug"
1252 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1254 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1256 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1260 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1261 with PapiSocketExecutor(node) as papi_exec:
1262 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1264 InterfaceUtil.add_eth_interface(
1265 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1269 return Topology.get_interface_by_sw_index(node, sw_if_index)
1272 def vpp_create_af_xdp_interface(
1273 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1275 """Create AF_XDP interface on VPP node.
1277 :param node: DUT node from topology.
1278 :param if_key: Physical interface key from topology file of interface
1279 to be bound to compatible driver.
1280 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1281 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1282 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1283 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1286 :type num_rx_queues: int
1290 :returns: Interface key (name) in topology file.
1292 :raises RuntimeError: If it is not possible to create AF_XDP interface
1295 PapiSocketExecutor.run_cli_cmd(
1296 node, u"set logging class af_xdp level debug"
1299 cmd = u"af_xdp_create"
1300 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1302 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1303 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1304 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1307 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1309 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1310 with PapiSocketExecutor(node) as papi_exec:
1311 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1313 InterfaceUtil.vpp_set_interface_mac(
1314 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1316 InterfaceUtil.add_eth_interface(
1317 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1321 return Topology.get_interface_by_sw_index(node, sw_if_index)
1324 def vpp_create_rdma_interface(
1325 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1327 """Create RDMA interface on VPP node.
1329 :param node: DUT node from topology.
1330 :param if_key: Physical interface key from topology file of interface
1331 to be bound to rdma-core driver.
1332 :param num_rx_queues: Number of RX queues.
1333 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1334 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1335 :param mode: RDMA interface mode - auto/ibv/dv.
1338 :type num_rx_queues: int
1342 :returns: Interface key (name) in topology file.
1344 :raises RuntimeError: If it is not possible to create RDMA interface on
1347 PapiSocketExecutor.run_cli_cmd(
1348 node, u"set logging class rdma level debug"
1351 cmd = u"rdma_create_v2"
1352 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1354 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1355 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1356 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1359 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1360 # Note: Set True for non-jumbo packets.
1364 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1365 with PapiSocketExecutor(node) as papi_exec:
1366 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1368 InterfaceUtil.vpp_set_interface_mac(
1369 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1371 InterfaceUtil.add_eth_interface(
1372 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1376 return Topology.get_interface_by_sw_index(node, sw_if_index)
1379 def vpp_add_bond_member(node, interface, bond_if):
1380 """Add member interface to bond interface on VPP node.
1382 :param node: DUT node from topology.
1383 :param interface: Physical interface key from topology file.
1384 :param bond_if: Load balance
1386 :type interface: str
1388 :raises RuntimeError: If it is not possible to add member to bond
1389 interface on the node.
1391 cmd = u"bond_add_member"
1393 sw_if_index=Topology.get_interface_sw_index(node, interface),
1394 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1396 is_long_timeout=False
1398 err_msg = f"Failed to add member {interface} to bond interface " \
1399 f"{bond_if} on host {node[u'host']}"
1400 with PapiSocketExecutor(node) as papi_exec:
1401 papi_exec.add(cmd, **args).get_reply(err_msg)
1404 def vpp_show_bond_data_on_node(node, verbose=False):
1405 """Show (detailed) bond information on VPP node.
1407 :param node: DUT node from topology.
1408 :param verbose: If detailed information is required or not.
1412 cmd = u"sw_bond_interface_dump"
1413 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1415 data = f"Bond data on node {node[u'host']}:\n"
1416 with PapiSocketExecutor(node) as papi_exec:
1417 details = papi_exec.add(cmd).get_details(err_msg)
1419 for bond in details:
1420 data += f"{bond[u'interface_name']}\n"
1421 data += u" mode: {m}\n".format(
1422 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1424 data += u" load balance: {lb}\n".format(
1425 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1427 data += f" number of active members: {bond[u'active_members']}\n"
1429 member_data = InterfaceUtil.vpp_bond_member_dump(
1430 node, Topology.get_interface_by_sw_index(
1431 node, bond[u"sw_if_index"]
1434 for member in member_data:
1435 if not member[u"is_passive"]:
1436 data += f" {member[u'interface_name']}\n"
1437 data += f" number of members: {bond[u'members']}\n"
1439 for member in member_data:
1440 data += f" {member[u'interface_name']}\n"
1441 data += f" interface id: {bond[u'id']}\n"
1442 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1446 def vpp_bond_member_dump(node, interface):
1447 """Get bond interface slave(s) data on VPP node.
1449 :param node: DUT node from topology.
1450 :param interface: Physical interface key from topology file.
1452 :type interface: str
1453 :returns: Bond slave interface data.
1456 cmd = u"sw_member_interface_dump"
1458 sw_if_index=Topology.get_interface_sw_index(node, interface)
1460 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1462 with PapiSocketExecutor(node) as papi_exec:
1463 details = papi_exec.add(cmd, **args).get_details(err_msg)
1465 logger.debug(f"Member data:\n{details}")
1469 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1470 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1472 :param nodes: Nodes in the topology.
1473 :param verbose: If detailed information is required or not.
1477 for node_data in nodes.values():
1478 if node_data[u"type"] == NodeType.DUT:
1479 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1482 def vpp_enable_input_acl_interface(
1483 node, interface, ip_version, table_index):
1484 """Enable input acl on interface.
1486 :param node: VPP node to setup interface for input acl.
1487 :param interface: Interface to setup input acl.
1488 :param ip_version: Version of IP protocol.
1489 :param table_index: Classify table index.
1491 :type interface: str or int
1492 :type ip_version: str
1493 :type table_index: int
1495 cmd = u"input_acl_set_interface"
1497 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1498 ip4_table_index=table_index if ip_version == u"ip4"
1499 else Constants.BITWISE_NON_ZERO,
1500 ip6_table_index=table_index if ip_version == u"ip6"
1501 else Constants.BITWISE_NON_ZERO,
1502 l2_table_index=table_index if ip_version == u"l2"
1503 else Constants.BITWISE_NON_ZERO,
1505 err_msg = f"Failed to enable input acl on interface {interface}"
1506 with PapiSocketExecutor(node) as papi_exec:
1507 papi_exec.add(cmd, **args).get_reply(err_msg)
1510 def get_interface_classify_table(node, interface):
1511 """Get name of classify table for the given interface.
1513 TODO: Move to Classify.py.
1515 :param node: VPP node to get data from.
1516 :param interface: Name or sw_if_index of a specific interface.
1518 :type interface: str or int
1519 :returns: Classify table name.
1522 if isinstance(interface, str):
1523 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1525 sw_if_index = interface
1527 cmd = u"classify_table_by_interface"
1529 sw_if_index=sw_if_index
1531 err_msg = f"Failed to get classify table name by interface {interface}"
1532 with PapiSocketExecutor(node) as papi_exec:
1533 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1538 def get_sw_if_index(node, interface_name):
1539 """Get sw_if_index for the given interface from actual interface dump.
1541 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1543 :param node: VPP node to get interface data from.
1544 :param interface_name: Name of the specific interface.
1546 :type interface_name: str
1547 :returns: sw_if_index of the given interface.
1550 interface_data = InterfaceUtil.vpp_get_interface_data(
1551 node, interface=interface_name
1553 return interface_data.get(u"sw_if_index")
1556 def vxlan_gpe_dump(node, interface_name=None):
1557 """Get VxLAN GPE data for the given interface.
1559 :param node: VPP node to get interface data from.
1560 :param interface_name: Name of the specific interface. If None,
1561 information about all VxLAN GPE interfaces is returned.
1563 :type interface_name: str
1564 :returns: Dictionary containing data for the given VxLAN GPE interface
1565 or if interface=None, the list of dictionaries with all VxLAN GPE
1567 :rtype: dict or list
1569 def process_vxlan_gpe_dump(vxlan_dump):
1570 """Process vxlan_gpe dump.
1572 :param vxlan_dump: Vxlan_gpe nterface dump.
1573 :type vxlan_dump: dict
1574 :returns: Processed vxlan_gpe interface dump.
1577 if vxlan_dump[u"is_ipv6"]:
1578 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1579 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1581 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1582 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1585 if interface_name is not None:
1586 sw_if_index = InterfaceUtil.get_interface_index(
1587 node, interface_name
1590 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1592 cmd = u"vxlan_gpe_tunnel_dump"
1594 sw_if_index=sw_if_index
1596 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1597 with PapiSocketExecutor(node) as papi_exec:
1598 details = papi_exec.add(cmd, **args).get_details(err_msg)
1600 data = list() if interface_name is None else dict()
1601 for dump in details:
1602 if interface_name is None:
1603 data.append(process_vxlan_gpe_dump(dump))
1604 elif dump[u"sw_if_index"] == sw_if_index:
1605 data = process_vxlan_gpe_dump(dump)
1608 logger.debug(f"VXLAN-GPE data:\n{data}")
1612 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1613 """Assign VPP interface to specific VRF/FIB table.
1615 :param node: VPP node where the FIB and interface are located.
1616 :param interface: Interface to be assigned to FIB.
1617 :param table_id: VRF table ID.
1618 :param ipv6: Assign to IPv6 table. Default False.
1620 :type interface: str or int
1624 cmd = u"sw_interface_set_table"
1626 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1628 vrf_id=int(table_id)
1630 err_msg = f"Failed to assign interface {interface} to FIB table"
1631 with PapiSocketExecutor(node) as papi_exec:
1632 papi_exec.add(cmd, **args).get_reply(err_msg)
1635 def set_linux_interface_mac(
1636 node, interface, mac, namespace=None, vf_id=None):
1637 """Set MAC address for interface in linux.
1639 :param node: Node where to execute command.
1640 :param interface: Interface in namespace.
1641 :param mac: MAC to be assigned to interface.
1642 :param namespace: Execute command in namespace. Optional
1643 :param vf_id: Virtual Function id. Optional
1645 :type interface: str
1647 :type namespace: str
1650 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1651 else f"address {mac}"
1652 ns_str = f"ip netns exec {namespace}" if namespace else u""
1654 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1655 exec_cmd_no_error(node, cmd, sudo=True)
1658 def set_linux_interface_promisc(
1659 node, interface, namespace=None, vf_id=None, state=u"on"):
1660 """Set promisc state for interface in linux.
1662 :param node: Node where to execute command.
1663 :param interface: Interface in namespace.
1664 :param namespace: Exec command in namespace. (Optional, Default: None)
1665 :param vf_id: Virtual Function id. (Optional, Default: None)
1666 :param state: State of feature. (Optional, Default: on)
1668 :type interface: str
1669 :type namespace: str
1673 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1674 else f"promisc {state}"
1675 ns_str = f"ip netns exec {namespace}" if namespace else u""
1677 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1678 exec_cmd_no_error(node, cmd, sudo=True)
1681 def set_linux_interface_trust_on(
1682 node, interface, namespace=None, vf_id=None):
1683 """Set trust on (promisc) for interface in linux.
1685 :param node: Node where to execute command.
1686 :param interface: Interface in namespace.
1687 :param namespace: Execute command in namespace. Optional
1688 :param vf_id: Virtual Function id. Optional
1690 :type interface: str
1691 :type namespace: str
1694 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1695 ns_str = f"ip netns exec {namespace}" if namespace else u""
1697 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1698 exec_cmd_no_error(node, cmd, sudo=True)
1701 def set_linux_interface_spoof_off(
1702 node, interface, namespace=None, vf_id=None):
1703 """Set spoof off for interface in linux.
1705 :param node: Node where to execute command.
1706 :param interface: Interface in namespace.
1707 :param namespace: Execute command in namespace. Optional
1708 :param vf_id: Virtual Function id. Optional
1710 :type interface: str
1711 :type namespace: str
1714 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1716 ns_str = f"ip netns exec {namespace}" if namespace else u""
1718 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1719 exec_cmd_no_error(node, cmd, sudo=True)
1722 def set_linux_interface_state(
1723 node, interface, namespace=None, state=u"up"):
1724 """Set operational state for interface in linux.
1726 :param node: Node where to execute command.
1727 :param interface: Interface in namespace.
1728 :param namespace: Execute command in namespace. Optional
1729 :param state: Up/Down.
1731 :type interface: str
1732 :type namespace: str
1735 ns_str = f"ip netns exec {namespace}" if namespace else u""
1737 cmd = f"{ns_str} ip link set dev {interface} {state}"
1738 exec_cmd_no_error(node, cmd, sudo=True)
1741 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1742 """Init PCI device. Check driver compatibility and bind to proper
1743 drivers. Optionally create NIC VFs.
1745 :param node: DUT node.
1746 :param ifc_key: Interface key from topology file.
1747 :param driver: Base driver to use.
1748 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1749 :param osi_layer: OSI Layer type to initialize TG with.
1750 Default value "L2" sets linux interface spoof off.
1755 :type osi_layer: str
1756 :returns: Virtual Function topology interface keys.
1758 :raises RuntimeError: If a reason preventing initialization is found.
1760 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1762 if driver == u"avf":
1763 if kernel_driver not in (
1764 u"ice", u"iavf", u"i40e", u"i40evf"):
1766 f"AVF needs ice or i40e compatible driver, not "
1767 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1769 vf_keys = InterfaceUtil.init_generic_interface(
1770 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1772 elif driver == u"af_xdp":
1773 if kernel_driver not in (
1774 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
1776 f"AF_XDP needs ice or i40e or rdma compatible driver, not "
1777 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1779 vf_keys = InterfaceUtil.init_generic_interface(
1780 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1785 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1786 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1788 :param node: DUT node.
1789 :param ifc_key: Interface key from topology file.
1790 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1791 :param osi_layer: OSI Layer type to initialize TG with.
1792 Default value "L2" sets linux interface spoof off.
1796 :type osi_layer: str
1797 :returns: Virtual Function topology interface keys.
1799 :raises RuntimeError: If a reason preventing initialization is found.
1801 # Read PCI address and driver.
1802 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1803 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1804 uio_driver = Topology.get_uio_driver(node)
1805 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1806 current_driver = DUTSetup.get_pci_dev_driver(
1807 node, pf_pci_addr.replace(u":", r"\:"))
1808 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1810 VPPUtil.stop_vpp_service(node)
1811 if current_driver != kernel_driver:
1812 # PCI device must be re-bound to kernel driver before creating VFs.
1813 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1814 # Stop VPP to prevent deadlock.
1815 # Unbind from current driver.
1816 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1817 # Bind to kernel driver.
1818 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1820 # Initialize PCI VFs.
1821 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1824 if osi_layer == u"L2":
1825 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1828 # Set MAC address and bind each virtual function to uio driver.
1829 for vf_id in range(numvfs):
1830 vf_mac_addr = u":".join(
1831 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1832 pf_mac_addr[5], f"{vf_id:02x}"
1836 InterfaceUtil.set_linux_interface_trust_on(
1837 node, pf_dev, vf_id=vf_id
1839 if osi_layer == u"L2":
1840 InterfaceUtil.set_linux_interface_spoof_off(
1841 node, pf_dev, vf_id=vf_id
1843 InterfaceUtil.set_linux_interface_mac(
1844 node, pf_dev, vf_mac_addr, vf_id=vf_id
1846 InterfaceUtil.set_linux_interface_state(
1847 node, pf_dev, state=u"up"
1850 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1851 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1853 # Add newly created ports into topology file
1854 vf_ifc_name = f"{ifc_key}_vif"
1855 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1856 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1857 Topology.update_interface_name(
1858 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1860 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1861 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1862 Topology.set_interface_numa_node(
1863 node, vf_ifc_key, Topology.get_interface_numa_node(
1867 vf_ifc_keys.append(vf_ifc_key)
1872 def vpp_sw_interface_rx_placement_dump(node):
1873 """Dump VPP interface RX placement on node.
1875 :param node: Node to run command on.
1877 :returns: Thread mapping information as a list of dictionaries.
1880 cmd = u"sw_interface_rx_placement_dump"
1881 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1882 with PapiSocketExecutor(node) as papi_exec:
1883 for ifc in node[u"interfaces"].values():
1884 if ifc[u"vpp_sw_index"] is not None:
1885 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1886 details = papi_exec.get_details(err_msg)
1887 return sorted(details, key=lambda k: k[u"sw_if_index"])
1890 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1891 """Dump VPP interface RX placement on all given nodes.
1893 :param nodes: Nodes to run command on.
1895 :returns: Thread mapping information as a list of dictionaries.
1898 for node in nodes.values():
1899 if node[u"type"] == NodeType.DUT:
1900 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1903 def vpp_sw_interface_set_rx_placement(
1904 node, sw_if_index, queue_id, worker_id):
1905 """Set interface RX placement to worker on node.
1907 :param node: Node to run command on.
1908 :param sw_if_index: VPP SW interface index.
1909 :param queue_id: VPP interface queue ID.
1910 :param worker_id: VPP worker ID (indexing from 0).
1912 :type sw_if_index: int
1914 :type worker_id: int
1915 :raises RuntimeError: If failed to run command on host or if no API
1918 cmd = u"sw_interface_set_rx_placement"
1919 err_msg = f"Failed to set interface RX placement to worker " \
1920 f"on host {node[u'host']}!"
1922 sw_if_index=sw_if_index,
1924 worker_id=worker_id,
1927 with PapiSocketExecutor(node) as papi_exec:
1928 papi_exec.add(cmd, **args).get_reply(err_msg)
1931 def vpp_round_robin_rx_placement(
1932 node, prefix, workers=None):
1933 """Set Round Robin interface RX placement on all worker threads
1936 If specified, workers limits the number of physical cores used
1937 for data plane I/O work. Other cores are presumed to do something else,
1938 e.g. asynchronous crypto processing.
1939 None means all workers are used for data plane work.
1941 :param node: Topology nodes.
1942 :param prefix: Interface name prefix.
1943 :param workers: Comma separated worker index numbers intended for
1949 thread_data = VPPUtil.vpp_show_threads(node)
1950 worker_cnt = len(thread_data) - 1
1955 for item in thread_data:
1956 if str(item.cpu_id) in workers.split(u","):
1957 worker_ids.append(item.id)
1959 for item in thread_data:
1960 if u"vpp_main" not in item.name:
1961 worker_ids.append(item.id)
1964 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1965 for interface in node[u"interfaces"].values():
1966 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1967 and prefix in interface[u"name"]:
1968 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1969 node, placement[u"sw_if_index"], placement[u"queue_id"],
1970 worker_ids[worker_idx % len(worker_ids)] - 1
1975 def vpp_round_robin_rx_placement_on_all_duts(
1976 nodes, prefix, workers=None):
1977 """Set Round Robin interface RX placement on worker threads
1980 If specified, workers limits the number of physical cores used
1981 for data plane I/O work. Other cores are presumed to do something else,
1982 e.g. asynchronous crypto processing.
1983 None means all cores are used for data plane work.
1985 :param nodes: Topology nodes.
1986 :param prefix: Interface name prefix.
1987 :param workers: Comma separated worker index numbers intended for
1993 for node in nodes.values():
1994 if node[u"type"] == NodeType.DUT:
1995 InterfaceUtil.vpp_round_robin_rx_placement(
1996 node, prefix, workers