1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
216 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
217 elif state == u"down":
220 raise ValueError(f"Unexpected interface state: {state}")
221 cmd = u"sw_interface_set_flags"
222 err_msg = f"Failed to set interface state on host {node[u'host']}"
224 sw_if_index=int(sw_if_index),
227 with PapiSocketExecutor(node) as papi_exec:
228 papi_exec.add(cmd, **args).get_reply(err_msg)
229 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
230 cmd = f"ip link set {iface_name} {state}"
231 exec_cmd_no_error(node, cmd, sudo=True)
234 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
238 def set_interface_state_pci(
239 node, pf_pcis, namespace=None, state=u"up"):
240 """Set operational state for interface specified by PCI address.
242 :param node: Topology node.
243 :param pf_pcis: List of node's interfaces PCI addresses.
244 :param namespace: Exec command in namespace. (Optional, Default: none)
245 :param state: Up/Down. (Optional, default: up)
251 for pf_pci in pf_pcis:
252 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
253 InterfaceUtil.set_linux_interface_state(
254 node, pf_eth, namespace=namespace, state=state
258 def set_interface_mtu(node, pf_pcis, mtu=9200):
259 """Set Ethernet MTU for specified interfaces.
261 :param node: Topology node.
262 :param pf_pcis: List of node's interfaces PCI addresses.
263 :param mtu: MTU to set. Default: 9200.
267 :raises RuntimeError: If failed to set MTU on interface.
269 for pf_pci in pf_pcis:
270 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
271 cmd = f"ip link set {pf_eth} mtu {mtu}"
272 exec_cmd_no_error(node, cmd, sudo=True)
275 def set_interface_channels(
276 node, pf_pcis, num_queues=1, channel=u"combined"):
277 """Set interface channels for specified interfaces.
279 :param node: Topology node.
280 :param pf_pcis: List of node's interfaces PCI addresses.
281 :param num_queues: Number of channels. (Optional, Default: 1)
282 :param channel: Channel type. (Optional, Default: combined)
285 :type num_queues: int
288 for pf_pci in pf_pcis:
289 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
290 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
291 exec_cmd_no_error(node, cmd, sudo=True)
294 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
295 """Set Ethernet flow control for specified interfaces.
297 :param node: Topology node.
298 :param pf_pcis: List of node's interfaces PCI addresses.
299 :param rxf: RX flow. (Optional, Default: off).
300 :param txf: TX flow. (Optional, Default: off).
306 for pf_pci in pf_pcis:
307 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
308 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
309 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
310 if int(ret_code) not in (0, 78):
311 raise RuntimeError("Failed to set flow control on {pf_eth}!")
314 def set_pci_parameter(node, pf_pcis, key, value):
315 """Set PCI parameter for specified interfaces.
317 :param node: Topology node.
318 :param pf_pcis: List of node's interfaces PCI addresses.
319 :param key: Key to set.
320 :param value: Value to set.
326 for pf_pci in pf_pcis:
327 cmd = f"setpci -s {pf_pci} {key}={value}"
328 exec_cmd_no_error(node, cmd, sudo=True)
331 def vpp_set_interface_mtu(node, interface, mtu=9200):
332 """Set Ethernet MTU on interface.
334 :param node: VPP node.
335 :param interface: Interface to setup MTU. Default: 9200.
336 :param mtu: Ethernet MTU size in Bytes.
338 :type interface: str or int
341 if isinstance(interface, str):
342 sw_if_index = Topology.get_interface_sw_index(node, interface)
344 sw_if_index = interface
346 cmd = u"hw_interface_set_mtu"
347 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
349 sw_if_index=sw_if_index,
353 with PapiSocketExecutor(node) as papi_exec:
354 papi_exec.add(cmd, **args).get_reply(err_msg)
355 except AssertionError as err:
356 logger.debug(f"Setting MTU failed.\n{err}")
359 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
360 """Set Ethernet MTU on all interfaces.
362 :param node: VPP node.
363 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
367 for interface in node[u"interfaces"]:
368 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
371 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
372 """Set Ethernet MTU on all interfaces on all DUTs.
374 :param nodes: VPP nodes.
375 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
379 for node in nodes.values():
380 if node[u"type"] == NodeType.DUT:
381 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
384 def vpp_node_interfaces_ready_wait(node, retries=15):
385 """Wait until all interfaces with admin-up are in link-up state.
387 :param node: Node to wait on.
388 :param retries: Number of retries to check interface status (optional,
393 :raises RuntimeError: If any interface is not in link-up state after
394 defined number of retries.
396 for _ in range(0, retries):
398 out = InterfaceUtil.vpp_get_interface_data(node)
399 for interface in out:
400 if interface.get(u"flags") == 1:
401 not_ready.append(interface.get(u"interface_name"))
404 f"Interfaces still not in link-up state:\n{not_ready}"
410 err = f"Timeout, interfaces not up:\n{not_ready}" \
411 if u"not_ready" in locals() else u"No check executed!"
412 raise RuntimeError(err)
415 def all_vpp_interfaces_ready_wait(nodes, retries=15):
416 """Wait until all interfaces with admin-up are in link-up state for all
417 nodes in the topology.
419 :param nodes: Nodes in the topology.
420 :param retries: Number of retries to check interface status (optional,
426 for node in nodes.values():
427 if node[u"type"] == NodeType.DUT:
428 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
431 def vpp_get_interface_data(node, interface=None):
432 """Get all interface data from a VPP node. If a name or
433 sw_interface_index is provided, return only data for the matching
436 :param node: VPP node to get interface data from.
437 :param interface: Numeric index or name string of a specific interface.
439 :type interface: int or str
440 :returns: List of dictionaries containing data for each interface, or a
441 single dictionary for the specified interface.
443 :raises TypeError: if the data type of interface is neither basestring
446 def process_if_dump(if_dump):
447 """Process interface dump.
449 :param if_dump: Interface dump.
451 :returns: Processed interface dump.
454 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
455 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
456 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
457 if_dump[u"flags"] = if_dump[u"flags"].value
458 if_dump[u"type"] = if_dump[u"type"].value
459 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
460 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
461 if hasattr(if_dump[u"sub_if_flags"], u"value") \
462 else int(if_dump[u"sub_if_flags"])
466 if interface is not None:
467 if isinstance(interface, str):
468 param = u"interface_name"
469 elif isinstance(interface, int):
470 param = u"sw_if_index"
472 raise TypeError(f"Wrong interface format {interface}")
476 cmd = u"sw_interface_dump"
478 name_filter_valid=False,
481 err_msg = f"Failed to get interface dump on host {node[u'host']}"
483 with PapiSocketExecutor(node) as papi_exec:
484 details = papi_exec.add(cmd, **args).get_details(err_msg)
485 logger.debug(f"Received data:\n{details!r}")
487 data = list() if interface is None else dict()
489 if interface is None:
490 data.append(process_if_dump(dump))
491 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
492 data = process_if_dump(dump)
495 logger.debug(f"Interface data:\n{data}")
499 def vpp_get_interface_name(node, sw_if_index):
500 """Get interface name for the given SW interface index from actual
503 :param node: VPP node to get interface data from.
504 :param sw_if_index: SW interface index of the specific interface.
506 :type sw_if_index: int
507 :returns: Name of the given interface.
510 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
511 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
512 if_data = InterfaceUtil.vpp_get_interface_data(
513 node, if_data[u"sup_sw_if_index"]
516 return if_data.get(u"interface_name")
519 def vpp_get_interface_sw_index(node, interface_name):
520 """Get interface name for the given SW interface index from actual
523 :param node: VPP node to get interface data from.
524 :param interface_name: Interface name.
526 :type interface_name: str
527 :returns: Name of the given interface.
530 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
532 return if_data.get(u"sw_if_index")
535 def vpp_get_interface_mac(node, interface):
536 """Get MAC address for the given interface from actual interface dump.
538 :param node: VPP node to get interface data from.
539 :param interface: Numeric index or name string of a specific interface.
541 :type interface: int or str
542 :returns: MAC address.
545 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
546 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
547 if_data = InterfaceUtil.vpp_get_interface_data(
548 node, if_data[u"sup_sw_if_index"])
550 return if_data.get(u"l2_address")
553 def vpp_set_interface_mac(node, interface, mac):
554 """Set MAC address for the given interface.
556 :param node: VPP node to set interface MAC.
557 :param interface: Numeric index or name string of a specific interface.
558 :param mac: Required MAC address.
560 :type interface: int or str
563 cmd = u"sw_interface_set_mac_address"
565 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
566 mac_address=L2Util.mac_to_bin(mac)
568 err_msg = f"Failed to set MAC address of interface {interface}" \
569 f"on host {node[u'host']}"
570 with PapiSocketExecutor(node) as papi_exec:
571 papi_exec.add(cmd, **args).get_reply(err_msg)
574 def tg_set_interface_driver(node, pci_addr, driver):
575 """Set interface driver on the TG node.
577 :param node: Node to set interface driver on (must be TG node).
578 :param pci_addr: PCI address of the interface.
579 :param driver: Driver name.
583 :raises RuntimeError: If unbinding from the current driver fails.
584 :raises RuntimeError: If binding to the new driver fails.
586 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
587 if old_driver == driver:
593 # Unbind from current driver
594 if old_driver is not None:
595 cmd = f"sh -c \"echo {pci_addr} > " \
596 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
597 ret_code, _, _ = ssh.exec_command_sudo(cmd)
598 if int(ret_code) != 0:
599 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
601 # Bind to the new driver
602 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
603 ret_code, _, _ = ssh.exec_command_sudo(cmd)
604 if int(ret_code) != 0:
605 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
608 def tg_get_interface_driver(node, pci_addr):
609 """Get interface driver from the TG node.
611 :param node: Node to get interface driver on (must be TG node).
612 :param pci_addr: PCI address of the interface.
615 :returns: Interface driver or None if not found.
617 :raises RuntimeError: If PCI rescan or lspci command execution failed.
619 return DUTSetup.get_pci_dev_driver(node, pci_addr)
622 def tg_set_interfaces_default_driver(node):
623 """Set interfaces default driver specified in topology yaml file.
625 :param node: Node to setup interfaces driver on (must be TG node).
628 for interface in node[u"interfaces"].values():
629 InterfaceUtil.tg_set_interface_driver(
630 node, interface[u"pci_address"], interface[u"driver"]
634 def update_vpp_interface_data_on_node(node):
635 """Update vpp generated interface data for a given node in DICT__nodes.
637 Updates interface names, software if index numbers and any other details
638 generated specifically by vpp that are unknown before testcase run.
639 It does this by dumping interface list from all devices using python
640 api, and pairing known information from topology (mac address) to state
643 :param node: Node selected from DICT__nodes.
646 interface_list = InterfaceUtil.vpp_get_interface_data(node)
647 interface_dict = dict()
648 for ifc in interface_list:
649 interface_dict[ifc[u"l2_address"]] = ifc
651 for if_name, if_data in node[u"interfaces"].items():
652 ifc_dict = interface_dict.get(if_data[u"mac_address"])
653 if ifc_dict is not None:
654 if_data[u"name"] = ifc_dict[u"interface_name"]
655 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
656 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
658 f"Interface {if_name} found by MAC "
659 f"{if_data[u'mac_address']}"
663 f"Interface {if_name} not found by MAC "
664 f"{if_data[u'mac_address']}"
666 if_data[u"vpp_sw_index"] = None
669 def update_nic_interface_names(node):
670 """Update interface names based on nic type and PCI address.
672 This method updates interface names in the same format as VPP does.
674 :param node: Node dictionary.
677 for ifc in node[u"interfaces"].values():
678 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
679 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
680 f"{int(if_pci[3], 16):x}"
681 if ifc[u"model"] == u"Intel-XL710":
682 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
683 elif ifc[u"model"] == u"Intel-X710":
684 ifc[u"name"] = f"TenGigabitEthernet{loc}"
685 elif ifc[u"model"] == u"Intel-X520-DA2":
686 ifc[u"name"] = f"TenGigabitEthernet{loc}"
687 elif ifc[u"model"] == u"Cisco-VIC-1385":
688 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
689 elif ifc[u"model"] == u"Cisco-VIC-1227":
690 ifc[u"name"] = f"TenGigabitEthernet{loc}"
692 ifc[u"name"] = f"UnknownEthernet{loc}"
695 def update_nic_interface_names_on_all_duts(nodes):
696 """Update interface names based on nic type and PCI address on all DUTs.
698 This method updates interface names in the same format as VPP does.
700 :param nodes: Topology nodes.
703 for node in nodes.values():
704 if node[u"type"] == NodeType.DUT:
705 InterfaceUtil.update_nic_interface_names(node)
708 def update_tg_interface_data_on_node(node):
709 """Update interface name for TG/linux node in DICT__nodes.
712 # for dev in `ls /sys/class/net/`;
713 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
714 "52:54:00:9f:82:63": "eth0"
715 "52:54:00:77:ae:a9": "eth1"
716 "52:54:00:e1:8a:0f": "eth2"
717 "00:00:00:00:00:00": "lo"
719 :param node: Node selected from DICT__nodes.
721 :raises RuntimeError: If getting of interface name and MAC fails.
723 # First setup interface driver specified in yaml file
724 InterfaceUtil.tg_set_interfaces_default_driver(node)
726 # Get interface names
730 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
731 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
733 ret_code, stdout, _ = ssh.exec_command(cmd)
734 if int(ret_code) != 0:
735 raise RuntimeError(u"Get interface name and MAC failed")
736 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
738 interfaces = JsonParser().parse_data(tmp)
739 for interface in node[u"interfaces"].values():
740 name = interfaces.get(interface[u"mac_address"])
743 interface[u"name"] = name
746 def iface_update_numa_node(node):
747 """For all interfaces from topology file update numa node based on
748 information from the node.
750 :param node: Node from topology.
753 :raises ValueError: If numa node ia less than 0.
754 :raises RuntimeError: If update of numa node failed.
757 for if_key in Topology.get_node_interfaces(node):
758 if_pci = Topology.get_interface_pci_addr(node, if_key)
760 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
762 ret, out, _ = ssh.exec_command(cmd)
765 numa_node = 0 if int(out) < 0 else int(out)
768 f"Reading numa location failed for: {if_pci}"
771 Topology.set_interface_numa_node(
772 node, if_key, numa_node
776 raise RuntimeError(f"Update numa node failed for: {if_pci}")
779 def update_all_interface_data_on_all_nodes(
780 nodes, skip_tg=False, skip_vpp=False):
781 """Update interface names on all nodes in DICT__nodes.
783 This method updates the topology dictionary by querying interface lists
784 of all nodes mentioned in the topology dictionary.
786 :param nodes: Nodes in the topology.
787 :param skip_tg: Skip TG node.
788 :param skip_vpp: Skip VPP node.
793 for node in nodes.values():
794 if node[u"type"] == NodeType.DUT and not skip_vpp:
795 InterfaceUtil.update_vpp_interface_data_on_node(node)
796 elif node[u"type"] == NodeType.TG and not skip_tg:
797 InterfaceUtil.update_tg_interface_data_on_node(node)
798 InterfaceUtil.iface_update_numa_node(node)
801 def create_vlan_subinterface(node, interface, vlan):
802 """Create VLAN sub-interface on node.
804 :param node: Node to add VLAN subinterface on.
805 :param interface: Interface name or index on which create VLAN
807 :param vlan: VLAN ID of the subinterface to be created.
809 :type interface: str on int
811 :returns: Name and index of created subinterface.
813 :raises RuntimeError: if it is unable to create VLAN subinterface on the
814 node or interface cannot be converted.
816 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
818 cmd = u"create_vlan_subif"
820 sw_if_index=sw_if_index,
823 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
825 with PapiSocketExecutor(node) as papi_exec:
826 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
828 if_key = Topology.add_new_port(node, u"vlan_subif")
829 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
830 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
831 Topology.update_interface_name(node, if_key, ifc_name)
833 return f"{interface}.{vlan}", sw_if_index
836 def create_vxlan_interface(node, vni, source_ip, destination_ip):
837 """Create VXLAN interface and return sw if index of created interface.
839 :param node: Node where to create VXLAN interface.
840 :param vni: VXLAN Network Identifier.
841 :param source_ip: Source IP of a VXLAN Tunnel End Point.
842 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
846 :type destination_ip: str
847 :returns: SW IF INDEX of created interface.
849 :raises RuntimeError: if it is unable to create VxLAN interface on the
852 cmd = u"vxlan_add_del_tunnel"
855 instance=Constants.BITWISE_NON_ZERO,
856 src_address=IPAddress.create_ip_address_object(
857 ip_address(source_ip)
859 dst_address=IPAddress.create_ip_address_object(
860 ip_address(destination_ip)
862 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
864 decap_next_index=Constants.BITWISE_NON_ZERO,
867 err_msg = f"Failed to create VXLAN tunnel interface " \
868 f"on host {node[u'host']}"
869 with PapiSocketExecutor(node) as papi_exec:
870 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
872 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
873 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
874 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
875 Topology.update_interface_name(node, if_key, ifc_name)
880 def set_vxlan_bypass(node, interface=None):
881 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
883 By adding the IPv4 vxlan-bypass graph node to an interface, the node
884 checks for and validate input vxlan packet and bypass ip4-lookup,
885 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
886 This node will cause extra overhead to for non-vxlan packets which is
889 :param node: Node where to set VXLAN bypass.
890 :param interface: Numeric index or name string of a specific interface.
892 :type interface: int or str
893 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
895 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
897 cmd = u"sw_interface_set_vxlan_bypass"
900 sw_if_index=sw_if_index,
903 err_msg = f"Failed to set VXLAN bypass on interface " \
904 f"on host {node[u'host']}"
905 with PapiSocketExecutor(node) as papi_exec:
906 papi_exec.add(cmd, **args).get_replies(err_msg)
909 def vxlan_dump(node, interface=None):
910 """Get VxLAN data for the given interface.
912 :param node: VPP node to get interface data from.
913 :param interface: Numeric index or name string of a specific interface.
914 If None, information about all VxLAN interfaces is returned.
916 :type interface: int or str
917 :returns: Dictionary containing data for the given VxLAN interface or if
918 interface=None, the list of dictionaries with all VxLAN interfaces.
920 :raises TypeError: if the data type of interface is neither basestring
923 def process_vxlan_dump(vxlan_dump):
924 """Process vxlan dump.
926 :param vxlan_dump: Vxlan interface dump.
927 :type vxlan_dump: dict
928 :returns: Processed vxlan interface dump.
931 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
932 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
935 if interface is not None:
936 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
938 sw_if_index = int(Constants.BITWISE_NON_ZERO)
940 cmd = u"vxlan_tunnel_dump"
942 sw_if_index=sw_if_index
944 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
946 with PapiSocketExecutor(node) as papi_exec:
947 details = papi_exec.add(cmd, **args).get_details(err_msg)
949 data = list() if interface is None else dict()
951 if interface is None:
952 data.append(process_vxlan_dump(dump))
953 elif dump[u"sw_if_index"] == sw_if_index:
954 data = process_vxlan_dump(dump)
957 logger.debug(f"VXLAN data:\n{data}")
961 def create_subinterface(
962 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
964 """Create sub-interface on node. It is possible to set required
965 sub-interface type and VLAN tag(s).
967 :param node: Node to add sub-interface.
968 :param interface: Interface name on which create sub-interface.
969 :param sub_id: ID of the sub-interface to be created.
970 :param outer_vlan_id: Optional outer VLAN ID.
971 :param inner_vlan_id: Optional inner VLAN ID.
972 :param type_subif: Optional type of sub-interface. Values supported by
973 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
976 :type interface: str or int
978 :type outer_vlan_id: int
979 :type inner_vlan_id: int
980 :type type_subif: str
981 :returns: Name and index of created sub-interface.
983 :raises RuntimeError: If it is not possible to create sub-interface.
985 subif_types = type_subif.split()
988 if u"no_tags" in subif_types:
989 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
990 if u"one_tag" in subif_types:
991 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
992 if u"two_tags" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
994 if u"dot1ad" in subif_types:
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
996 if u"exact_match" in subif_types:
997 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
998 if u"default_sub" in subif_types:
999 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1000 if type_subif == u"default_sub":
1001 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1002 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1004 cmd = u"create_subif"
1006 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1008 sub_if_flags=flags.value if hasattr(flags, u"value")
1010 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1011 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1013 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1014 with PapiSocketExecutor(node) as papi_exec:
1015 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1017 if_key = Topology.add_new_port(node, u"subinterface")
1018 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1019 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1020 Topology.update_interface_name(node, if_key, ifc_name)
1022 return f"{interface}.{sub_id}", sw_if_index
1025 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1026 """Create GRE tunnel interface on node.
1028 :param node: VPP node to add tunnel interface.
1029 :param source_ip: Source of the GRE tunnel.
1030 :param destination_ip: Destination of the GRE tunnel.
1032 :type source_ip: str
1033 :type destination_ip: str
1034 :returns: Name and index of created GRE tunnel interface.
1036 :raises RuntimeError: If unable to create GRE tunnel interface.
1038 cmd = u"gre_tunnel_add_del"
1041 instance=Constants.BITWISE_NON_ZERO,
1043 dst=str(destination_ip),
1051 err_msg = f"Failed to create GRE tunnel interface " \
1052 f"on host {node[u'host']}"
1053 with PapiSocketExecutor(node) as papi_exec:
1054 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1056 if_key = Topology.add_new_port(node, u"gre_tunnel")
1057 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1058 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1059 Topology.update_interface_name(node, if_key, ifc_name)
1061 return ifc_name, sw_if_index
1064 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1065 """Create GTPU interface and return sw if index of created interface.
1067 :param node: Node where to create GTPU interface.
1068 :param teid: GTPU Tunnel Endpoint Identifier.
1069 :param source_ip: Source IP of a GTPU Tunnel End Point.
1070 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1073 :type source_ip: str
1074 :type destination_ip: str
1075 :returns: SW IF INDEX of created interface.
1077 :raises RuntimeError: if it is unable to create GTPU interface on the
1080 cmd = u"gtpu_add_del_tunnel"
1083 src_address=IPAddress.create_ip_address_object(
1084 ip_address(source_ip)
1086 dst_address=IPAddress.create_ip_address_object(
1087 ip_address(destination_ip)
1089 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1094 err_msg = f"Failed to create GTPU tunnel interface " \
1095 f"on host {node[u'host']}"
1096 with PapiSocketExecutor(node) as papi_exec:
1097 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1099 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1100 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1101 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1102 Topology.update_interface_name(node, if_key, ifc_name)
1107 def vpp_create_loopback(node, mac=None):
1108 """Create loopback interface on VPP node.
1110 :param node: Node to create loopback interface on.
1111 :param mac: Optional MAC address for loopback interface.
1114 :returns: SW interface index.
1116 :raises RuntimeError: If it is not possible to create loopback on the
1119 cmd = u"create_loopback_instance"
1121 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1125 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1126 with PapiSocketExecutor(node) as papi_exec:
1127 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1129 if_key = Topology.add_new_port(node, u"loopback")
1130 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1131 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1132 Topology.update_interface_name(node, if_key, ifc_name)
1134 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1135 Topology.update_interface_mac_address(node, if_key, mac)
1140 def vpp_create_bond_interface(
1141 node, mode, load_balance=None, mac=None, gso=False):
1142 """Create bond interface on VPP node.
1144 :param node: DUT node from topology.
1145 :param mode: Link bonding mode.
1146 :param load_balance: Load balance (optional, valid for xor and lacp
1147 modes, otherwise ignored). Default: None.
1148 :param mac: MAC address to assign to the bond interface (optional).
1150 :param gso: Enable GSO support (optional). Default: False.
1153 :type load_balance: str
1156 :returns: Interface key (name) in topology.
1158 :raises RuntimeError: If it is not possible to create bond interface on
1161 cmd = u"bond_create2"
1163 id=int(Constants.BITWISE_NON_ZERO),
1164 use_custom_mac=bool(mac is not None),
1165 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1168 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1170 lb=0 if load_balance is None else getattr(
1171 LinkBondLoadBalanceAlgo,
1172 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1177 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1178 with PapiSocketExecutor(node) as papi_exec:
1179 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1181 InterfaceUtil.add_eth_interface(
1182 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1184 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1189 def add_eth_interface(
1190 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1192 """Add ethernet interface to current topology.
1194 :param node: DUT node from topology.
1195 :param ifc_name: Name of the interface.
1196 :param sw_if_index: SW interface index.
1197 :param ifc_pfx: Interface key prefix.
1198 :param host_if_key: Host interface key from topology file.
1201 :type sw_if_index: int
1203 :type host_if_key: str
1205 if_key = Topology.add_new_port(node, ifc_pfx)
1207 if ifc_name and sw_if_index is None:
1208 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1210 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1211 if sw_if_index and ifc_name is None:
1212 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1213 Topology.update_interface_name(node, if_key, ifc_name)
1214 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1215 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1216 if host_if_key is not None:
1217 Topology.set_interface_numa_node(
1218 node, if_key, Topology.get_interface_numa_node(
1222 Topology.update_interface_pci_address(
1223 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1227 def vpp_create_avf_interface(
1228 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1229 """Create AVF interface on VPP node.
1231 :param node: DUT node from topology.
1232 :param if_key: Interface key from topology file of interface
1233 to be bound to i40evf driver.
1234 :param num_rx_queues: Number of RX queues.
1235 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1236 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1239 :type num_rx_queues: int
1242 :returns: AVF interface key (name) in topology.
1244 :raises RuntimeError: If it is not possible to create AVF interface on
1247 PapiSocketExecutor.run_cli_cmd(
1248 node, u"set logging class avf level debug"
1252 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1254 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1256 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1260 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1262 # FIXME: Remove once the fw/driver is upgraded.
1264 with PapiSocketExecutor(node) as papi_exec:
1266 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1270 except AssertionError:
1271 logger.error(err_msg)
1273 raise AssertionError(err_msg)
1275 InterfaceUtil.add_eth_interface(
1276 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1280 return Topology.get_interface_by_sw_index(node, sw_if_index)
1283 def vpp_create_af_xdp_interface(
1284 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1286 """Create AF_XDP interface on VPP node.
1288 :param node: DUT node from topology.
1289 :param if_key: Physical interface key from topology file of interface
1290 to be bound to compatible driver.
1291 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1292 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1293 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1294 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1297 :type num_rx_queues: int
1301 :returns: Interface key (name) in topology file.
1303 :raises RuntimeError: If it is not possible to create AF_XDP interface
1306 PapiSocketExecutor.run_cli_cmd(
1307 node, u"set logging class af_xdp level debug"
1310 cmd = u"af_xdp_create"
1311 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1313 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1314 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1315 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1318 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1320 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1321 with PapiSocketExecutor(node) as papi_exec:
1322 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1324 InterfaceUtil.vpp_set_interface_mac(
1325 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1327 InterfaceUtil.add_eth_interface(
1328 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1332 return Topology.get_interface_by_sw_index(node, sw_if_index)
1335 def vpp_create_rdma_interface(
1336 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1338 """Create RDMA interface on VPP node.
1340 :param node: DUT node from topology.
1341 :param if_key: Physical interface key from topology file of interface
1342 to be bound to rdma-core driver.
1343 :param num_rx_queues: Number of RX queues.
1344 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1345 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1346 :param mode: RDMA interface mode - auto/ibv/dv.
1349 :type num_rx_queues: int
1353 :returns: Interface key (name) in topology file.
1355 :raises RuntimeError: If it is not possible to create RDMA interface on
1358 PapiSocketExecutor.run_cli_cmd(
1359 node, u"set logging class rdma level debug"
1362 cmd = u"rdma_create_v2"
1363 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1365 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1366 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1367 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1370 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1371 # Note: Set True for non-jumbo packets.
1375 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1376 with PapiSocketExecutor(node) as papi_exec:
1377 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1379 InterfaceUtil.vpp_set_interface_mac(
1380 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1382 InterfaceUtil.add_eth_interface(
1383 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1387 return Topology.get_interface_by_sw_index(node, sw_if_index)
1390 def vpp_add_bond_member(node, interface, bond_if):
1391 """Add member interface to bond interface on VPP node.
1393 :param node: DUT node from topology.
1394 :param interface: Physical interface key from topology file.
1395 :param bond_if: Load balance
1397 :type interface: str
1399 :raises RuntimeError: If it is not possible to add member to bond
1400 interface on the node.
1402 cmd = u"bond_add_member"
1404 sw_if_index=Topology.get_interface_sw_index(node, interface),
1405 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1407 is_long_timeout=False
1409 err_msg = f"Failed to add member {interface} to bond interface " \
1410 f"{bond_if} on host {node[u'host']}"
1411 with PapiSocketExecutor(node) as papi_exec:
1412 papi_exec.add(cmd, **args).get_reply(err_msg)
1415 def vpp_show_bond_data_on_node(node, verbose=False):
1416 """Show (detailed) bond information on VPP node.
1418 :param node: DUT node from topology.
1419 :param verbose: If detailed information is required or not.
1423 cmd = u"sw_bond_interface_dump"
1424 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1426 data = f"Bond data on node {node[u'host']}:\n"
1427 with PapiSocketExecutor(node) as papi_exec:
1428 details = papi_exec.add(cmd).get_details(err_msg)
1430 for bond in details:
1431 data += f"{bond[u'interface_name']}\n"
1432 data += u" mode: {m}\n".format(
1433 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1435 data += u" load balance: {lb}\n".format(
1436 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1438 data += f" number of active members: {bond[u'active_members']}\n"
1440 member_data = InterfaceUtil.vpp_bond_member_dump(
1441 node, Topology.get_interface_by_sw_index(
1442 node, bond[u"sw_if_index"]
1445 for member in member_data:
1446 if not member[u"is_passive"]:
1447 data += f" {member[u'interface_name']}\n"
1448 data += f" number of members: {bond[u'members']}\n"
1450 for member in member_data:
1451 data += f" {member[u'interface_name']}\n"
1452 data += f" interface id: {bond[u'id']}\n"
1453 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1457 def vpp_bond_member_dump(node, interface):
1458 """Get bond interface slave(s) data on VPP node.
1460 :param node: DUT node from topology.
1461 :param interface: Physical interface key from topology file.
1463 :type interface: str
1464 :returns: Bond slave interface data.
1467 cmd = u"sw_member_interface_dump"
1469 sw_if_index=Topology.get_interface_sw_index(node, interface)
1471 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1473 with PapiSocketExecutor(node) as papi_exec:
1474 details = papi_exec.add(cmd, **args).get_details(err_msg)
1476 logger.debug(f"Member data:\n{details}")
1480 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1481 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1483 :param nodes: Nodes in the topology.
1484 :param verbose: If detailed information is required or not.
1488 for node_data in nodes.values():
1489 if node_data[u"type"] == NodeType.DUT:
1490 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1493 def vpp_enable_input_acl_interface(
1494 node, interface, ip_version, table_index):
1495 """Enable input acl on interface.
1497 :param node: VPP node to setup interface for input acl.
1498 :param interface: Interface to setup input acl.
1499 :param ip_version: Version of IP protocol.
1500 :param table_index: Classify table index.
1502 :type interface: str or int
1503 :type ip_version: str
1504 :type table_index: int
1506 cmd = u"input_acl_set_interface"
1508 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1509 ip4_table_index=table_index if ip_version == u"ip4"
1510 else Constants.BITWISE_NON_ZERO,
1511 ip6_table_index=table_index if ip_version == u"ip6"
1512 else Constants.BITWISE_NON_ZERO,
1513 l2_table_index=table_index if ip_version == u"l2"
1514 else Constants.BITWISE_NON_ZERO,
1516 err_msg = f"Failed to enable input acl on interface {interface}"
1517 with PapiSocketExecutor(node) as papi_exec:
1518 papi_exec.add(cmd, **args).get_reply(err_msg)
1521 def get_interface_classify_table(node, interface):
1522 """Get name of classify table for the given interface.
1524 TODO: Move to Classify.py.
1526 :param node: VPP node to get data from.
1527 :param interface: Name or sw_if_index of a specific interface.
1529 :type interface: str or int
1530 :returns: Classify table name.
1533 if isinstance(interface, str):
1534 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1536 sw_if_index = interface
1538 cmd = u"classify_table_by_interface"
1540 sw_if_index=sw_if_index
1542 err_msg = f"Failed to get classify table name by interface {interface}"
1543 with PapiSocketExecutor(node) as papi_exec:
1544 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1549 def get_sw_if_index(node, interface_name):
1550 """Get sw_if_index for the given interface from actual interface dump.
1552 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1554 :param node: VPP node to get interface data from.
1555 :param interface_name: Name of the specific interface.
1557 :type interface_name: str
1558 :returns: sw_if_index of the given interface.
1561 interface_data = InterfaceUtil.vpp_get_interface_data(
1562 node, interface=interface_name
1564 return interface_data.get(u"sw_if_index")
1567 def vxlan_gpe_dump(node, interface_name=None):
1568 """Get VxLAN GPE data for the given interface.
1570 :param node: VPP node to get interface data from.
1571 :param interface_name: Name of the specific interface. If None,
1572 information about all VxLAN GPE interfaces is returned.
1574 :type interface_name: str
1575 :returns: Dictionary containing data for the given VxLAN GPE interface
1576 or if interface=None, the list of dictionaries with all VxLAN GPE
1578 :rtype: dict or list
1580 def process_vxlan_gpe_dump(vxlan_dump):
1581 """Process vxlan_gpe dump.
1583 :param vxlan_dump: Vxlan_gpe nterface dump.
1584 :type vxlan_dump: dict
1585 :returns: Processed vxlan_gpe interface dump.
1588 if vxlan_dump[u"is_ipv6"]:
1589 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1590 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1592 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1593 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1596 if interface_name is not None:
1597 sw_if_index = InterfaceUtil.get_interface_index(
1598 node, interface_name
1601 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1603 cmd = u"vxlan_gpe_tunnel_dump"
1605 sw_if_index=sw_if_index
1607 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1608 with PapiSocketExecutor(node) as papi_exec:
1609 details = papi_exec.add(cmd, **args).get_details(err_msg)
1611 data = list() if interface_name is None else dict()
1612 for dump in details:
1613 if interface_name is None:
1614 data.append(process_vxlan_gpe_dump(dump))
1615 elif dump[u"sw_if_index"] == sw_if_index:
1616 data = process_vxlan_gpe_dump(dump)
1619 logger.debug(f"VXLAN-GPE data:\n{data}")
1623 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1624 """Assign VPP interface to specific VRF/FIB table.
1626 :param node: VPP node where the FIB and interface are located.
1627 :param interface: Interface to be assigned to FIB.
1628 :param table_id: VRF table ID.
1629 :param ipv6: Assign to IPv6 table. Default False.
1631 :type interface: str or int
1635 cmd = u"sw_interface_set_table"
1637 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1639 vrf_id=int(table_id)
1641 err_msg = f"Failed to assign interface {interface} to FIB table"
1642 with PapiSocketExecutor(node) as papi_exec:
1643 papi_exec.add(cmd, **args).get_reply(err_msg)
1646 def set_linux_interface_mac(
1647 node, interface, mac, namespace=None, vf_id=None):
1648 """Set MAC address for interface in linux.
1650 :param node: Node where to execute command.
1651 :param interface: Interface in namespace.
1652 :param mac: MAC to be assigned to interface.
1653 :param namespace: Execute command in namespace. Optional
1654 :param vf_id: Virtual Function id. Optional
1656 :type interface: str
1658 :type namespace: str
1661 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1662 else f"address {mac}"
1663 ns_str = f"ip netns exec {namespace}" if namespace else u""
1665 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1666 exec_cmd_no_error(node, cmd, sudo=True)
1669 def set_linux_interface_promisc(
1670 node, interface, namespace=None, vf_id=None, state=u"on"):
1671 """Set promisc state for interface in linux.
1673 :param node: Node where to execute command.
1674 :param interface: Interface in namespace.
1675 :param namespace: Exec command in namespace. (Optional, Default: None)
1676 :param vf_id: Virtual Function id. (Optional, Default: None)
1677 :param state: State of feature. (Optional, Default: on)
1679 :type interface: str
1680 :type namespace: str
1684 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1685 else f"promisc {state}"
1686 ns_str = f"ip netns exec {namespace}" if namespace else u""
1688 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1689 exec_cmd_no_error(node, cmd, sudo=True)
1692 def set_linux_interface_trust_on(
1693 node, interface, namespace=None, vf_id=None):
1694 """Set trust on (promisc) for interface in linux.
1696 :param node: Node where to execute command.
1697 :param interface: Interface in namespace.
1698 :param namespace: Execute command in namespace. Optional
1699 :param vf_id: Virtual Function id. Optional
1701 :type interface: str
1702 :type namespace: str
1705 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1706 ns_str = f"ip netns exec {namespace}" if namespace else u""
1708 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1709 exec_cmd_no_error(node, cmd, sudo=True)
1712 def set_linux_interface_spoof_off(
1713 node, interface, namespace=None, vf_id=None):
1714 """Set spoof off for interface in linux.
1716 :param node: Node where to execute command.
1717 :param interface: Interface in namespace.
1718 :param namespace: Execute command in namespace. Optional
1719 :param vf_id: Virtual Function id. Optional
1721 :type interface: str
1722 :type namespace: str
1725 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1727 ns_str = f"ip netns exec {namespace}" if namespace else u""
1729 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1730 exec_cmd_no_error(node, cmd, sudo=True)
1733 def set_linux_interface_state(
1734 node, interface, namespace=None, state=u"up"):
1735 """Set operational state for interface in linux.
1737 :param node: Node where to execute command.
1738 :param interface: Interface in namespace.
1739 :param namespace: Execute command in namespace. Optional
1740 :param state: Up/Down.
1742 :type interface: str
1743 :type namespace: str
1746 ns_str = f"ip netns exec {namespace}" if namespace else u""
1748 cmd = f"{ns_str} ip link set dev {interface} {state}"
1749 exec_cmd_no_error(node, cmd, sudo=True)
1752 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1753 """Init PCI device. Check driver compatibility and bind to proper
1754 drivers. Optionally create NIC VFs.
1756 :param node: DUT node.
1757 :param ifc_key: Interface key from topology file.
1758 :param driver: Base driver to use.
1759 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1760 :param osi_layer: OSI Layer type to initialize TG with.
1761 Default value "L2" sets linux interface spoof off.
1766 :type osi_layer: str
1767 :returns: Virtual Function topology interface keys.
1769 :raises RuntimeError: If a reason preventing initialization is found.
1771 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1773 if driver == u"avf":
1774 if kernel_driver not in (
1775 u"ice", u"iavf", u"i40e", u"i40evf"):
1777 f"AVF needs ice or i40e compatible driver, not "
1778 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1780 vf_keys = InterfaceUtil.init_generic_interface(
1781 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1783 elif driver == u"af_xdp":
1784 if kernel_driver not in (
1785 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1788 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1789 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1791 vf_keys = InterfaceUtil.init_generic_interface(
1792 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1794 elif driver == u"rdma-core":
1795 vf_keys = InterfaceUtil.init_generic_interface(
1796 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1801 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1802 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1804 :param node: DUT node.
1805 :param ifc_key: Interface key from topology file.
1806 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1807 :param osi_layer: OSI Layer type to initialize TG with.
1808 Default value "L2" sets linux interface spoof off.
1812 :type osi_layer: str
1813 :returns: Virtual Function topology interface keys.
1815 :raises RuntimeError: If a reason preventing initialization is found.
1817 # Read PCI address and driver.
1818 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1819 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1820 uio_driver = Topology.get_uio_driver(node)
1821 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1822 current_driver = DUTSetup.get_pci_dev_driver(
1823 node, pf_pci_addr.replace(u":", r"\:"))
1824 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1826 VPPUtil.stop_vpp_service(node)
1827 if current_driver != kernel_driver:
1828 # PCI device must be re-bound to kernel driver before creating VFs.
1829 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1830 # Stop VPP to prevent deadlock.
1831 # Unbind from current driver if bound.
1833 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1834 # Bind to kernel driver.
1835 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1837 # Initialize PCI VFs.
1838 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1841 if osi_layer == u"L2":
1842 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1845 # Set MAC address and bind each virtual function to uio driver.
1846 for vf_id in range(numvfs):
1847 vf_mac_addr = u":".join(
1848 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1849 pf_mac_addr[5], f"{vf_id:02x}"
1853 InterfaceUtil.set_linux_interface_trust_on(
1854 node, pf_dev, vf_id=vf_id
1856 if osi_layer == u"L2":
1857 InterfaceUtil.set_linux_interface_spoof_off(
1858 node, pf_dev, vf_id=vf_id
1860 InterfaceUtil.set_linux_interface_mac(
1861 node, pf_dev, vf_mac_addr, vf_id=vf_id
1863 InterfaceUtil.set_linux_interface_state(
1864 node, pf_dev, state=u"up"
1867 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1868 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1870 # Add newly created ports into topology file
1871 vf_ifc_name = f"{ifc_key}_vif"
1872 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1873 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1874 Topology.update_interface_name(
1875 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1877 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1878 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1879 Topology.set_interface_numa_node(
1880 node, vf_ifc_key, Topology.get_interface_numa_node(
1884 vf_ifc_keys.append(vf_ifc_key)
1889 def vpp_sw_interface_rx_placement_dump(node):
1890 """Dump VPP interface RX placement on node.
1892 :param node: Node to run command on.
1894 :returns: Thread mapping information as a list of dictionaries.
1897 cmd = u"sw_interface_rx_placement_dump"
1898 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1899 with PapiSocketExecutor(node) as papi_exec:
1900 for ifc in node[u"interfaces"].values():
1901 if ifc[u"vpp_sw_index"] is not None:
1902 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1903 details = papi_exec.get_details(err_msg)
1904 return sorted(details, key=lambda k: k[u"sw_if_index"])
1907 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1908 """Dump VPP interface RX placement on all given nodes.
1910 :param nodes: Nodes to run command on.
1912 :returns: Thread mapping information as a list of dictionaries.
1915 for node in nodes.values():
1916 if node[u"type"] == NodeType.DUT:
1917 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1920 def vpp_sw_interface_set_rx_placement(
1921 node, sw_if_index, queue_id, worker_id):
1922 """Set interface RX placement to worker on node.
1924 :param node: Node to run command on.
1925 :param sw_if_index: VPP SW interface index.
1926 :param queue_id: VPP interface queue ID.
1927 :param worker_id: VPP worker ID (indexing from 0).
1929 :type sw_if_index: int
1931 :type worker_id: int
1932 :raises RuntimeError: If failed to run command on host or if no API
1935 cmd = u"sw_interface_set_rx_placement"
1936 err_msg = f"Failed to set interface RX placement to worker " \
1937 f"on host {node[u'host']}!"
1939 sw_if_index=sw_if_index,
1941 worker_id=worker_id,
1944 with PapiSocketExecutor(node) as papi_exec:
1945 papi_exec.add(cmd, **args).get_reply(err_msg)
1948 def vpp_round_robin_rx_placement(
1949 node, prefix, workers=None):
1950 """Set Round Robin interface RX placement on all worker threads
1953 If specified, workers limits the number of physical cores used
1954 for data plane I/O work. Other cores are presumed to do something else,
1955 e.g. asynchronous crypto processing.
1956 None means all workers are used for data plane work.
1958 :param node: Topology nodes.
1959 :param prefix: Interface name prefix.
1960 :param workers: Comma separated worker index numbers intended for
1966 thread_data = VPPUtil.vpp_show_threads(node)
1967 worker_cnt = len(thread_data) - 1
1972 for item in thread_data:
1973 if str(item.cpu_id) in workers.split(u","):
1974 worker_ids.append(item.id)
1976 for item in thread_data:
1977 if u"vpp_main" not in item.name:
1978 worker_ids.append(item.id)
1981 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1982 for interface in node[u"interfaces"].values():
1983 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1984 and prefix in interface[u"name"]:
1985 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1986 node, placement[u"sw_if_index"], placement[u"queue_id"],
1987 worker_ids[worker_idx % len(worker_ids)] - 1
1992 def vpp_round_robin_rx_placement_on_all_duts(
1993 nodes, prefix, workers=None):
1994 """Set Round Robin interface RX placement on worker threads
1997 If specified, workers limits the number of physical cores used
1998 for data plane I/O work. Other cores are presumed to do something else,
1999 e.g. asynchronous crypto processing.
2000 None means all cores are used for data plane work.
2002 :param nodes: Topology nodes.
2003 :param prefix: Interface name prefix.
2004 :param workers: Comma separated worker index numbers intended for
2010 for node in nodes.values():
2011 if node[u"type"] == NodeType.DUT:
2012 InterfaceUtil.vpp_round_robin_rx_placement(
2013 node, prefix, workers