1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.DUTSetup import DUTSetup
24 from resources.libraries.python.IPAddress import IPAddress
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
113 class AfXdpMode(IntEnum):
114 """AF_XDP interface mode."""
115 AF_XDP_API_MODE_AUTO = 0
116 AF_XDP_API_MODE_COPY = 1
117 AF_XDP_API_MODE_ZERO_COPY = 2
121 """General utilities for managing interfaces"""
124 def pci_to_int(pci_str):
125 """Convert PCI address from string format (0000:18:0a.0) to
126 integer representation (169345024).
128 :param pci_str: PCI address in string representation.
130 :returns: Integer representation of PCI address.
133 pci = list(pci_str.split(u":")[0:2])
134 pci.extend(pci_str.split(u":")[2].split(u"."))
136 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
137 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
140 def pci_to_eth(node, pci_str):
141 """Convert PCI address on DUT to Linux ethernet name.
143 :param node: DUT node
144 :param pci_str: PCI address.
147 :returns: Ethernet name.
150 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
152 stdout, _ = exec_cmd_no_error(node, cmd)
154 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
156 return stdout.strip()
159 def get_interface_index(node, interface):
160 """Get interface sw_if_index from topology file.
162 :param node: Node where the interface is.
163 :param interface: Numeric index or name string of a specific interface.
165 :type interface: str or int
166 :returns: SW interface index.
170 sw_if_index = int(interface)
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 if sw_if_index is None:
175 Topology.get_interface_sw_index_by_name(node, interface)
176 except TypeError as err:
177 raise TypeError(f"Wrong interface format {interface}") from err
182 def set_interface_state(node, interface, state, if_type=u"key"):
183 """Set interface state on a node.
185 Function can be used for DUTs as well as for TGs.
187 :param node: Node where the interface is.
188 :param interface: Interface key or sw_if_index or name.
189 :param state: One of 'up' or 'down'.
190 :param if_type: Interface type
192 :type interface: str or int
196 :raises ValueError: If the interface type is unknown.
197 :raises ValueError: If the state of interface is unexpected.
198 :raises ValueError: If the node has an unknown node type.
200 if if_type == u"key":
201 if isinstance(interface, str):
202 sw_if_index = Topology.get_interface_sw_index(node, interface)
203 iface_name = Topology.get_interface_name(node, interface)
205 sw_if_index = interface
206 elif if_type == u"name":
207 iface_key = Topology.get_interface_by_name(node, interface)
208 if iface_key is not None:
209 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
210 iface_name = interface
212 raise ValueError(f"Unknown if_type: {if_type}")
214 if node[u"type"] == NodeType.DUT:
216 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
217 elif state == u"down":
220 raise ValueError(f"Unexpected interface state: {state}")
221 cmd = u"sw_interface_set_flags"
222 err_msg = f"Failed to set interface state on host {node[u'host']}"
224 sw_if_index=int(sw_if_index),
227 with PapiSocketExecutor(node) as papi_exec:
228 papi_exec.add(cmd, **args).get_reply(err_msg)
229 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
230 cmd = f"ip link set {iface_name} {state}"
231 exec_cmd_no_error(node, cmd, sudo=True)
234 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
238 def set_interface_state_pci(
239 node, pf_pcis, namespace=None, state=u"up"):
240 """Set operational state for interface specified by PCI address.
242 :param node: Topology node.
243 :param pf_pcis: List of node's interfaces PCI addresses.
244 :param namespace: Exec command in namespace. (Optional, Default: none)
245 :param state: Up/Down. (Optional, default: up)
251 for pf_pci in pf_pcis:
252 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
253 InterfaceUtil.set_linux_interface_state(
254 node, pf_eth, namespace=namespace, state=state
258 def set_interface_mtu(node, pf_pcis, mtu=9200):
259 """Set Ethernet MTU for specified interfaces.
261 :param node: Topology node.
262 :param pf_pcis: List of node's interfaces PCI addresses.
263 :param mtu: MTU to set. Default: 9200.
267 :raises RuntimeError: If failed to set MTU on interface.
269 for pf_pci in pf_pcis:
270 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
271 cmd = f"ip link set {pf_eth} mtu {mtu}"
272 exec_cmd_no_error(node, cmd, sudo=True)
275 def set_interface_channels(
276 node, pf_pcis, num_queues=1, channel=u"combined"):
277 """Set interface channels for specified interfaces.
279 :param node: Topology node.
280 :param pf_pcis: List of node's interfaces PCI addresses.
281 :param num_queues: Number of channels. (Optional, Default: 1)
282 :param channel: Channel type. (Optional, Default: combined)
285 :type num_queues: int
288 for pf_pci in pf_pcis:
289 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
290 cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
291 exec_cmd_no_error(node, cmd, sudo=True)
294 def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
295 """Set Ethernet flow control for specified interfaces.
297 :param node: Topology node.
298 :param pf_pcis: List of node's interfaces PCI addresses.
299 :param rxf: RX flow. (Optional, Default: off).
300 :param txf: TX flow. (Optional, Default: off).
306 for pf_pci in pf_pcis:
307 pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
308 cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
309 ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
310 if int(ret_code) not in (0, 78):
311 raise RuntimeError("Failed to set flow control on {pf_eth}!")
314 def set_pci_parameter(node, pf_pcis, key, value):
315 """Set PCI parameter for specified interfaces.
317 :param node: Topology node.
318 :param pf_pcis: List of node's interfaces PCI addresses.
319 :param key: Key to set.
320 :param value: Value to set.
326 for pf_pci in pf_pcis:
327 cmd = f"setpci -s {pf_pci} {key}={value}"
328 exec_cmd_no_error(node, cmd, sudo=True)
331 def vpp_set_interface_mtu(node, interface, mtu=9200):
332 """Set Ethernet MTU on interface.
334 :param node: VPP node.
335 :param interface: Interface to setup MTU. Default: 9200.
336 :param mtu: Ethernet MTU size in Bytes.
338 :type interface: str or int
341 if isinstance(interface, str):
342 sw_if_index = Topology.get_interface_sw_index(node, interface)
344 sw_if_index = interface
346 cmd = u"hw_interface_set_mtu"
347 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
349 sw_if_index=sw_if_index,
353 with PapiSocketExecutor(node) as papi_exec:
354 papi_exec.add(cmd, **args).get_reply(err_msg)
355 except AssertionError as err:
356 logger.debug(f"Setting MTU failed.\n{err}")
359 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
360 """Set Ethernet MTU on all interfaces.
362 :param node: VPP node.
363 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
367 for interface in node[u"interfaces"]:
368 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
371 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
372 """Set Ethernet MTU on all interfaces on all DUTs.
374 :param nodes: VPP nodes.
375 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
379 for node in nodes.values():
380 if node[u"type"] == NodeType.DUT:
381 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
384 def vpp_node_interfaces_ready_wait(node, retries=15):
385 """Wait until all interfaces with admin-up are in link-up state.
387 :param node: Node to wait on.
388 :param retries: Number of retries to check interface status (optional,
393 :raises RuntimeError: If any interface is not in link-up state after
394 defined number of retries.
396 for _ in range(0, retries):
398 out = InterfaceUtil.vpp_get_interface_data(node)
399 for interface in out:
400 if interface.get(u"flags") == 1:
401 not_ready.append(interface.get(u"interface_name"))
404 f"Interfaces still not in link-up state:\n{not_ready}"
410 err = f"Timeout, interfaces not up:\n{not_ready}" \
411 if u"not_ready" in locals() else u"No check executed!"
412 raise RuntimeError(err)
415 def all_vpp_interfaces_ready_wait(nodes, retries=15):
416 """Wait until all interfaces with admin-up are in link-up state for all
417 nodes in the topology.
419 :param nodes: Nodes in the topology.
420 :param retries: Number of retries to check interface status (optional,
426 for node in nodes.values():
427 if node[u"type"] == NodeType.DUT:
428 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
431 def vpp_get_interface_data(node, interface=None):
432 """Get all interface data from a VPP node. If a name or
433 sw_interface_index is provided, return only data for the matching
436 :param node: VPP node to get interface data from.
437 :param interface: Numeric index or name string of a specific interface.
439 :type interface: int or str
440 :returns: List of dictionaries containing data for each interface, or a
441 single dictionary for the specified interface.
443 :raises TypeError: if the data type of interface is neither basestring
446 def process_if_dump(if_dump):
447 """Process interface dump.
449 :param if_dump: Interface dump.
451 :returns: Processed interface dump.
454 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
455 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
456 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
457 if_dump[u"flags"] = if_dump[u"flags"].value
458 if_dump[u"type"] = if_dump[u"type"].value
459 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
460 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
461 if hasattr(if_dump[u"sub_if_flags"], u"value") \
462 else int(if_dump[u"sub_if_flags"])
466 if interface is not None:
467 if isinstance(interface, str):
468 param = u"interface_name"
469 elif isinstance(interface, int):
470 param = u"sw_if_index"
472 raise TypeError(f"Wrong interface format {interface}")
476 cmd = u"sw_interface_dump"
478 name_filter_valid=False,
481 err_msg = f"Failed to get interface dump on host {node[u'host']}"
483 with PapiSocketExecutor(node) as papi_exec:
484 details = papi_exec.add(cmd, **args).get_details(err_msg)
485 logger.debug(f"Received data:\n{details!r}")
487 data = list() if interface is None else dict()
489 if interface is None:
490 data.append(process_if_dump(dump))
491 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
492 data = process_if_dump(dump)
495 logger.debug(f"Interface data:\n{data}")
499 def vpp_get_interface_name(node, sw_if_index):
500 """Get interface name for the given SW interface index from actual
503 :param node: VPP node to get interface data from.
504 :param sw_if_index: SW interface index of the specific interface.
506 :type sw_if_index: int
507 :returns: Name of the given interface.
510 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
511 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
512 if_data = InterfaceUtil.vpp_get_interface_data(
513 node, if_data[u"sup_sw_if_index"]
516 return if_data.get(u"interface_name")
519 def vpp_get_interface_sw_index(node, interface_name):
520 """Get interface name for the given SW interface index from actual
523 :param node: VPP node to get interface data from.
524 :param interface_name: Interface name.
526 :type interface_name: str
527 :returns: Name of the given interface.
530 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
532 return if_data.get(u"sw_if_index")
535 def vpp_get_interface_mac(node, interface):
536 """Get MAC address for the given interface from actual interface dump.
538 :param node: VPP node to get interface data from.
539 :param interface: Numeric index or name string of a specific interface.
541 :type interface: int or str
542 :returns: MAC address.
545 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
546 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
547 if_data = InterfaceUtil.vpp_get_interface_data(
548 node, if_data[u"sup_sw_if_index"])
550 return if_data.get(u"l2_address")
553 def vpp_set_interface_mac(node, interface, mac):
554 """Set MAC address for the given interface.
556 :param node: VPP node to set interface MAC.
557 :param interface: Numeric index or name string of a specific interface.
558 :param mac: Required MAC address.
560 :type interface: int or str
563 cmd = u"sw_interface_set_mac_address"
565 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
566 mac_address=L2Util.mac_to_bin(mac)
568 err_msg = f"Failed to set MAC address of interface {interface}" \
569 f"on host {node[u'host']}"
570 with PapiSocketExecutor(node) as papi_exec:
571 papi_exec.add(cmd, **args).get_reply(err_msg)
574 def tg_set_interface_driver(node, pci_addr, driver):
575 """Set interface driver on the TG node.
577 :param node: Node to set interface driver on (must be TG node).
578 :param pci_addr: PCI address of the interface.
579 :param driver: Driver name.
583 :raises RuntimeError: If unbinding from the current driver fails.
584 :raises RuntimeError: If binding to the new driver fails.
586 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
587 if old_driver == driver:
593 # Unbind from current driver
594 if old_driver is not None:
595 cmd = f"sh -c \"echo {pci_addr} > " \
596 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
597 ret_code, _, _ = ssh.exec_command_sudo(cmd)
598 if int(ret_code) != 0:
599 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
601 # Bind to the new driver
602 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
603 ret_code, _, _ = ssh.exec_command_sudo(cmd)
604 if int(ret_code) != 0:
605 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
608 def tg_get_interface_driver(node, pci_addr):
609 """Get interface driver from the TG node.
611 :param node: Node to get interface driver on (must be TG node).
612 :param pci_addr: PCI address of the interface.
615 :returns: Interface driver or None if not found.
617 :raises RuntimeError: If PCI rescan or lspci command execution failed.
619 return DUTSetup.get_pci_dev_driver(node, pci_addr)
622 def tg_set_interfaces_default_driver(node):
623 """Set interfaces default driver specified in topology yaml file.
625 :param node: Node to setup interfaces driver on (must be TG node).
628 for interface in node[u"interfaces"].values():
629 InterfaceUtil.tg_set_interface_driver(
630 node, interface[u"pci_address"], interface[u"driver"]
634 def update_vpp_interface_data_on_node(node):
635 """Update vpp generated interface data for a given node in DICT__nodes.
637 Updates interface names, software if index numbers and any other details
638 generated specifically by vpp that are unknown before testcase run.
639 It does this by dumping interface list from all devices using python
640 api, and pairing known information from topology (mac address) to state
643 :param node: Node selected from DICT__nodes.
646 interface_list = InterfaceUtil.vpp_get_interface_data(node)
647 interface_dict = dict()
648 for ifc in interface_list:
649 interface_dict[ifc[u"l2_address"]] = ifc
651 for if_name, if_data in node[u"interfaces"].items():
652 ifc_dict = interface_dict.get(if_data[u"mac_address"])
653 if ifc_dict is not None:
654 if_data[u"name"] = ifc_dict[u"interface_name"]
655 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
656 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
658 f"Interface {if_name} found by MAC "
659 f"{if_data[u'mac_address']}"
663 f"Interface {if_name} not found by MAC "
664 f"{if_data[u'mac_address']}"
666 if_data[u"vpp_sw_index"] = None
669 def update_nic_interface_names(node):
670 """Update interface names based on nic type and PCI address.
672 This method updates interface names in the same format as VPP does.
674 :param node: Node dictionary.
677 for ifc in node[u"interfaces"].values():
678 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
679 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
680 f"{int(if_pci[3], 16):x}"
681 if ifc[u"model"] == u"Intel-XL710":
682 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
683 elif ifc[u"model"] == u"Intel-X710":
684 ifc[u"name"] = f"TenGigabitEthernet{loc}"
685 elif ifc[u"model"] == u"Intel-X520-DA2":
686 ifc[u"name"] = f"TenGigabitEthernet{loc}"
687 elif ifc[u"model"] == u"Cisco-VIC-1385":
688 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
689 elif ifc[u"model"] == u"Cisco-VIC-1227":
690 ifc[u"name"] = f"TenGigabitEthernet{loc}"
692 ifc[u"name"] = f"UnknownEthernet{loc}"
695 def update_nic_interface_names_on_all_duts(nodes):
696 """Update interface names based on nic type and PCI address on all DUTs.
698 This method updates interface names in the same format as VPP does.
700 :param nodes: Topology nodes.
703 for node in nodes.values():
704 if node[u"type"] == NodeType.DUT:
705 InterfaceUtil.update_nic_interface_names(node)
708 def update_tg_interface_data_on_node(node):
709 """Update interface name for TG/linux node in DICT__nodes.
712 # for dev in `ls /sys/class/net/`;
713 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
714 "52:54:00:9f:82:63": "eth0"
715 "52:54:00:77:ae:a9": "eth1"
716 "52:54:00:e1:8a:0f": "eth2"
717 "00:00:00:00:00:00": "lo"
719 :param node: Node selected from DICT__nodes.
721 :raises RuntimeError: If getting of interface name and MAC fails.
723 # First setup interface driver specified in yaml file
724 InterfaceUtil.tg_set_interfaces_default_driver(node)
726 # Get interface names
730 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
731 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
733 ret_code, stdout, _ = ssh.exec_command(cmd)
734 if int(ret_code) != 0:
735 raise RuntimeError(u"Get interface name and MAC failed")
736 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
738 interfaces = JsonParser().parse_data(tmp)
739 for interface in node[u"interfaces"].values():
740 name = interfaces.get(interface[u"mac_address"])
743 interface[u"name"] = name
746 def iface_update_numa_node(node):
747 """For all interfaces from topology file update numa node based on
748 information from the node.
750 :param node: Node from topology.
753 :raises ValueError: If numa node ia less than 0.
754 :raises RuntimeError: If update of numa node failed.
757 for if_key in Topology.get_node_interfaces(node):
758 if_pci = Topology.get_interface_pci_addr(node, if_key)
760 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
762 ret, out, _ = ssh.exec_command(cmd)
765 numa_node = 0 if int(out) < 0 else int(out)
768 f"Reading numa location failed for: {if_pci}"
771 Topology.set_interface_numa_node(
772 node, if_key, numa_node
776 raise RuntimeError(f"Update numa node failed for: {if_pci}")
779 def update_all_interface_data_on_all_nodes(
780 nodes, skip_tg=False, skip_vpp=False):
781 """Update interface names on all nodes in DICT__nodes.
783 This method updates the topology dictionary by querying interface lists
784 of all nodes mentioned in the topology dictionary.
786 :param nodes: Nodes in the topology.
787 :param skip_tg: Skip TG node.
788 :param skip_vpp: Skip VPP node.
793 for node in nodes.values():
794 if node[u"type"] == NodeType.DUT and not skip_vpp:
795 InterfaceUtil.update_vpp_interface_data_on_node(node)
796 elif node[u"type"] == NodeType.TG and not skip_tg:
797 InterfaceUtil.update_tg_interface_data_on_node(node)
798 InterfaceUtil.iface_update_numa_node(node)
801 def create_vlan_subinterface(node, interface, vlan):
802 """Create VLAN sub-interface on node.
804 :param node: Node to add VLAN subinterface on.
805 :param interface: Interface name or index on which create VLAN
807 :param vlan: VLAN ID of the subinterface to be created.
809 :type interface: str on int
811 :returns: Name and index of created subinterface.
813 :raises RuntimeError: if it is unable to create VLAN subinterface on the
814 node or interface cannot be converted.
816 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
818 cmd = u"create_vlan_subif"
820 sw_if_index=sw_if_index,
823 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
825 with PapiSocketExecutor(node) as papi_exec:
826 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
828 if_key = Topology.add_new_port(node, u"vlan_subif")
829 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
830 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
831 Topology.update_interface_name(node, if_key, ifc_name)
833 return f"{interface}.{vlan}", sw_if_index
836 def create_vxlan_interface(node, vni, source_ip, destination_ip):
837 """Create VXLAN interface and return sw if index of created interface.
839 :param node: Node where to create VXLAN interface.
840 :param vni: VXLAN Network Identifier.
841 :param source_ip: Source IP of a VXLAN Tunnel End Point.
842 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
846 :type destination_ip: str
847 :returns: SW IF INDEX of created interface.
849 :raises RuntimeError: if it is unable to create VxLAN interface on the
852 cmd = u"vxlan_add_del_tunnel_v3"
855 instance=Constants.BITWISE_NON_ZERO,
856 src_address=IPAddress.create_ip_address_object(
857 ip_address(source_ip)
859 dst_address=IPAddress.create_ip_address_object(
860 ip_address(destination_ip)
862 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
864 decap_next_index=Constants.BITWISE_NON_ZERO,
867 err_msg = f"Failed to create VXLAN tunnel interface " \
868 f"on host {node[u'host']}"
869 with PapiSocketExecutor(node) as papi_exec:
870 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
872 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
873 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
874 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
875 Topology.update_interface_name(node, if_key, ifc_name)
880 def set_vxlan_bypass(node, interface=None):
881 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
883 By adding the IPv4 vxlan-bypass graph node to an interface, the node
884 checks for and validate input vxlan packet and bypass ip4-lookup,
885 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
886 This node will cause extra overhead to for non-vxlan packets which is
889 :param node: Node where to set VXLAN bypass.
890 :param interface: Numeric index or name string of a specific interface.
892 :type interface: int or str
893 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
895 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
897 cmd = u"sw_interface_set_vxlan_bypass"
900 sw_if_index=sw_if_index,
903 err_msg = f"Failed to set VXLAN bypass on interface " \
904 f"on host {node[u'host']}"
905 with PapiSocketExecutor(node) as papi_exec:
906 papi_exec.add(cmd, **args).get_replies(err_msg)
909 def vxlan_dump(node, interface=None):
910 """Get VxLAN data for the given interface.
912 :param node: VPP node to get interface data from.
913 :param interface: Numeric index or name string of a specific interface.
914 If None, information about all VxLAN interfaces is returned.
916 :type interface: int or str
917 :returns: Dictionary containing data for the given VxLAN interface or if
918 interface=None, the list of dictionaries with all VxLAN interfaces.
920 :raises TypeError: if the data type of interface is neither basestring
923 def process_vxlan_dump(vxlan_dump):
924 """Process vxlan dump.
926 :param vxlan_dump: Vxlan interface dump.
927 :type vxlan_dump: dict
928 :returns: Processed vxlan interface dump.
931 vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
932 vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
935 if interface is not None:
936 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
938 sw_if_index = int(Constants.BITWISE_NON_ZERO)
940 cmd = u"vxlan_tunnel_dump"
942 sw_if_index=sw_if_index
944 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
946 with PapiSocketExecutor(node) as papi_exec:
947 details = papi_exec.add(cmd, **args).get_details(err_msg)
949 data = list() if interface is None else dict()
951 if interface is None:
952 data.append(process_vxlan_dump(dump))
953 elif dump[u"sw_if_index"] == sw_if_index:
954 data = process_vxlan_dump(dump)
957 logger.debug(f"VXLAN data:\n{data}")
961 def create_subinterface(
962 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
964 """Create sub-interface on node. It is possible to set required
965 sub-interface type and VLAN tag(s).
967 :param node: Node to add sub-interface.
968 :param interface: Interface name on which create sub-interface.
969 :param sub_id: ID of the sub-interface to be created.
970 :param outer_vlan_id: Optional outer VLAN ID.
971 :param inner_vlan_id: Optional inner VLAN ID.
972 :param type_subif: Optional type of sub-interface. Values supported by
973 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
976 :type interface: str or int
978 :type outer_vlan_id: int
979 :type inner_vlan_id: int
980 :type type_subif: str
981 :returns: Name and index of created sub-interface.
983 :raises RuntimeError: If it is not possible to create sub-interface.
985 subif_types = type_subif.split()
988 if u"no_tags" in subif_types:
989 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
990 if u"one_tag" in subif_types:
991 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
992 if u"two_tags" in subif_types:
993 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
994 if u"dot1ad" in subif_types:
995 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
996 if u"exact_match" in subif_types:
997 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
998 if u"default_sub" in subif_types:
999 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1000 if type_subif == u"default_sub":
1001 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1002 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1004 cmd = u"create_subif"
1006 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1008 sub_if_flags=flags.value if hasattr(flags, u"value")
1010 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1011 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1013 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1014 with PapiSocketExecutor(node) as papi_exec:
1015 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1017 if_key = Topology.add_new_port(node, u"subinterface")
1018 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1019 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1020 Topology.update_interface_name(node, if_key, ifc_name)
1022 return f"{interface}.{sub_id}", sw_if_index
1025 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1026 """Create GRE tunnel interface on node.
1028 :param node: VPP node to add tunnel interface.
1029 :param source_ip: Source of the GRE tunnel.
1030 :param destination_ip: Destination of the GRE tunnel.
1032 :type source_ip: str
1033 :type destination_ip: str
1034 :returns: Name and index of created GRE tunnel interface.
1036 :raises RuntimeError: If unable to create GRE tunnel interface.
1038 cmd = u"gre_tunnel_add_del"
1041 instance=Constants.BITWISE_NON_ZERO,
1043 dst=str(destination_ip),
1051 err_msg = f"Failed to create GRE tunnel interface " \
1052 f"on host {node[u'host']}"
1053 with PapiSocketExecutor(node) as papi_exec:
1054 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1056 if_key = Topology.add_new_port(node, u"gre_tunnel")
1057 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1058 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1059 Topology.update_interface_name(node, if_key, ifc_name)
1061 return ifc_name, sw_if_index
1064 def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
1065 """Create GTPU interface and return sw if index of created interface.
1067 :param node: Node where to create GTPU interface.
1068 :param teid: GTPU Tunnel Endpoint Identifier.
1069 :param source_ip: Source IP of a GTPU Tunnel End Point.
1070 :param destination_ip: Destination IP of a GTPU Tunnel End Point.
1073 :type source_ip: str
1074 :type destination_ip: str
1075 :returns: SW IF INDEX of created interface.
1077 :raises RuntimeError: if it is unable to create GTPU interface on the
1080 cmd = u"gtpu_add_del_tunnel"
1083 src_address=IPAddress.create_ip_address_object(
1084 ip_address(source_ip)
1086 dst_address=IPAddress.create_ip_address_object(
1087 ip_address(destination_ip)
1089 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1094 err_msg = f"Failed to create GTPU tunnel interface " \
1095 f"on host {node[u'host']}"
1096 with PapiSocketExecutor(node) as papi_exec:
1097 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1099 if_key = Topology.add_new_port(node, u"gtpu_tunnel")
1100 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1101 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1102 Topology.update_interface_name(node, if_key, ifc_name)
1107 def vpp_create_loopback(node, mac=None):
1108 """Create loopback interface on VPP node.
1110 :param node: Node to create loopback interface on.
1111 :param mac: Optional MAC address for loopback interface.
1114 :returns: SW interface index.
1116 :raises RuntimeError: If it is not possible to create loopback on the
1119 cmd = u"create_loopback_instance"
1121 mac_address=L2Util.mac_to_bin(mac) if mac else 0,
1125 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1126 with PapiSocketExecutor(node) as papi_exec:
1127 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1129 if_key = Topology.add_new_port(node, u"loopback")
1130 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1131 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1132 Topology.update_interface_name(node, if_key, ifc_name)
1134 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1135 Topology.update_interface_mac_address(node, if_key, mac)
1140 def vpp_create_bond_interface(
1141 node, mode, load_balance=None, mac=None, gso=False):
1142 """Create bond interface on VPP node.
1144 :param node: DUT node from topology.
1145 :param mode: Link bonding mode.
1146 :param load_balance: Load balance (optional, valid for xor and lacp
1147 modes, otherwise ignored). Default: None.
1148 :param mac: MAC address to assign to the bond interface (optional).
1150 :param gso: Enable GSO support (optional). Default: False.
1153 :type load_balance: str
1156 :returns: Interface key (name) in topology.
1158 :raises RuntimeError: If it is not possible to create bond interface on
1161 cmd = u"bond_create2"
1163 id=int(Constants.BITWISE_NON_ZERO),
1164 use_custom_mac=bool(mac is not None),
1165 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1168 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1170 lb=0 if load_balance is None else getattr(
1171 LinkBondLoadBalanceAlgo,
1172 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1177 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1178 with PapiSocketExecutor(node) as papi_exec:
1179 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1181 InterfaceUtil.add_eth_interface(
1182 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1184 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1189 def add_eth_interface(
1190 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1192 """Add ethernet interface to current topology.
1194 :param node: DUT node from topology.
1195 :param ifc_name: Name of the interface.
1196 :param sw_if_index: SW interface index.
1197 :param ifc_pfx: Interface key prefix.
1198 :param host_if_key: Host interface key from topology file.
1201 :type sw_if_index: int
1203 :type host_if_key: str
1205 if_key = Topology.add_new_port(node, ifc_pfx)
1207 if ifc_name and sw_if_index is None:
1208 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1210 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1211 if sw_if_index and ifc_name is None:
1212 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1213 Topology.update_interface_name(node, if_key, ifc_name)
1214 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1215 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1216 if host_if_key is not None:
1217 Topology.set_interface_numa_node(
1218 node, if_key, Topology.get_interface_numa_node(
1222 Topology.update_interface_pci_address(
1223 node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
1227 def vpp_create_avf_interface(
1228 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
1229 """Create AVF interface on VPP node.
1231 :param node: DUT node from topology.
1232 :param if_key: Interface key from topology file of interface
1233 to be bound to i40evf driver.
1234 :param num_rx_queues: Number of RX queues.
1235 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1236 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1239 :type num_rx_queues: int
1242 :returns: AVF interface key (name) in topology.
1244 :raises RuntimeError: If it is not possible to create AVF interface on
1247 PapiSocketExecutor.run_cli_cmd(
1248 node, u"set logging class avf level debug"
1252 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1254 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1256 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1260 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1262 # FIXME: Remove once the fw/driver is upgraded.
1264 with PapiSocketExecutor(node) as papi_exec:
1266 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
1270 except AssertionError:
1271 logger.error(err_msg)
1273 raise AssertionError(err_msg)
1275 InterfaceUtil.add_eth_interface(
1276 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1280 return Topology.get_interface_by_sw_index(node, sw_if_index)
1283 def vpp_create_af_xdp_interface(
1284 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1286 """Create AF_XDP interface on VPP node.
1288 :param node: DUT node from topology.
1289 :param if_key: Physical interface key from topology file of interface
1290 to be bound to compatible driver.
1291 :param num_rx_queues: Number of RX queues. (Optional, Default: none)
1292 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1293 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1294 :param mode: AF_XDP interface mode. (Optional, Default: auto).
1297 :type num_rx_queues: int
1301 :returns: Interface key (name) in topology file.
1303 :raises RuntimeError: If it is not possible to create AF_XDP interface
1306 PapiSocketExecutor.run_cli_cmd(
1307 node, u"set logging class af_xdp level debug"
1310 cmd = u"af_xdp_create"
1311 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1313 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1314 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1315 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1318 mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
1320 err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
1321 with PapiSocketExecutor(node) as papi_exec:
1322 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1324 InterfaceUtil.vpp_set_interface_mac(
1325 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1327 InterfaceUtil.add_eth_interface(
1328 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
1332 return Topology.get_interface_by_sw_index(node, sw_if_index)
1335 def vpp_create_rdma_interface(
1336 node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
1338 """Create RDMA interface on VPP node.
1340 :param node: DUT node from topology.
1341 :param if_key: Physical interface key from topology file of interface
1342 to be bound to rdma-core driver.
1343 :param num_rx_queues: Number of RX queues.
1344 :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
1345 :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
1346 :param mode: RDMA interface mode - auto/ibv/dv.
1349 :type num_rx_queues: int
1353 :returns: Interface key (name) in topology file.
1355 :raises RuntimeError: If it is not possible to create RDMA interface on
1358 PapiSocketExecutor.run_cli_cmd(
1359 node, u"set logging class rdma level debug"
1362 cmd = u"rdma_create_v3"
1363 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1365 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1366 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1367 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1370 mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
1371 # Note: Set True for non-jumbo packets.
1374 # TODO: Apply desired RSS flags.
1376 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1377 with PapiSocketExecutor(node) as papi_exec:
1378 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1380 InterfaceUtil.vpp_set_interface_mac(
1381 node, sw_if_index, Topology.get_interface_mac(node, if_key)
1383 InterfaceUtil.add_eth_interface(
1384 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1388 return Topology.get_interface_by_sw_index(node, sw_if_index)
1391 def vpp_add_bond_member(node, interface, bond_if):
1392 """Add member interface to bond interface on VPP node.
1394 :param node: DUT node from topology.
1395 :param interface: Physical interface key from topology file.
1396 :param bond_if: Load balance
1398 :type interface: str
1400 :raises RuntimeError: If it is not possible to add member to bond
1401 interface on the node.
1403 cmd = u"bond_add_member"
1405 sw_if_index=Topology.get_interface_sw_index(node, interface),
1406 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1408 is_long_timeout=False
1410 err_msg = f"Failed to add member {interface} to bond interface " \
1411 f"{bond_if} on host {node[u'host']}"
1412 with PapiSocketExecutor(node) as papi_exec:
1413 papi_exec.add(cmd, **args).get_reply(err_msg)
1416 def vpp_show_bond_data_on_node(node, verbose=False):
1417 """Show (detailed) bond information on VPP node.
1419 :param node: DUT node from topology.
1420 :param verbose: If detailed information is required or not.
1424 cmd = u"sw_bond_interface_dump"
1425 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1427 data = f"Bond data on node {node[u'host']}:\n"
1428 with PapiSocketExecutor(node) as papi_exec:
1429 details = papi_exec.add(cmd).get_details(err_msg)
1431 for bond in details:
1432 data += f"{bond[u'interface_name']}\n"
1433 data += u" mode: {m}\n".format(
1434 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1436 data += u" load balance: {lb}\n".format(
1437 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1439 data += f" number of active members: {bond[u'active_members']}\n"
1441 member_data = InterfaceUtil.vpp_bond_member_dump(
1442 node, Topology.get_interface_by_sw_index(
1443 node, bond[u"sw_if_index"]
1446 for member in member_data:
1447 if not member[u"is_passive"]:
1448 data += f" {member[u'interface_name']}\n"
1449 data += f" number of members: {bond[u'members']}\n"
1451 for member in member_data:
1452 data += f" {member[u'interface_name']}\n"
1453 data += f" interface id: {bond[u'id']}\n"
1454 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1458 def vpp_bond_member_dump(node, interface):
1459 """Get bond interface slave(s) data on VPP node.
1461 :param node: DUT node from topology.
1462 :param interface: Physical interface key from topology file.
1464 :type interface: str
1465 :returns: Bond slave interface data.
1468 cmd = u"sw_member_interface_dump"
1470 sw_if_index=Topology.get_interface_sw_index(node, interface)
1472 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1474 with PapiSocketExecutor(node) as papi_exec:
1475 details = papi_exec.add(cmd, **args).get_details(err_msg)
1477 logger.debug(f"Member data:\n{details}")
1481 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1482 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1484 :param nodes: Nodes in the topology.
1485 :param verbose: If detailed information is required or not.
1489 for node_data in nodes.values():
1490 if node_data[u"type"] == NodeType.DUT:
1491 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1494 def vpp_enable_input_acl_interface(
1495 node, interface, ip_version, table_index):
1496 """Enable input acl on interface.
1498 :param node: VPP node to setup interface for input acl.
1499 :param interface: Interface to setup input acl.
1500 :param ip_version: Version of IP protocol.
1501 :param table_index: Classify table index.
1503 :type interface: str or int
1504 :type ip_version: str
1505 :type table_index: int
1507 cmd = u"input_acl_set_interface"
1509 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1510 ip4_table_index=table_index if ip_version == u"ip4"
1511 else Constants.BITWISE_NON_ZERO,
1512 ip6_table_index=table_index if ip_version == u"ip6"
1513 else Constants.BITWISE_NON_ZERO,
1514 l2_table_index=table_index if ip_version == u"l2"
1515 else Constants.BITWISE_NON_ZERO,
1517 err_msg = f"Failed to enable input acl on interface {interface}"
1518 with PapiSocketExecutor(node) as papi_exec:
1519 papi_exec.add(cmd, **args).get_reply(err_msg)
1522 def get_interface_classify_table(node, interface):
1523 """Get name of classify table for the given interface.
1525 TODO: Move to Classify.py.
1527 :param node: VPP node to get data from.
1528 :param interface: Name or sw_if_index of a specific interface.
1530 :type interface: str or int
1531 :returns: Classify table name.
1534 if isinstance(interface, str):
1535 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1537 sw_if_index = interface
1539 cmd = u"classify_table_by_interface"
1541 sw_if_index=sw_if_index
1543 err_msg = f"Failed to get classify table name by interface {interface}"
1544 with PapiSocketExecutor(node) as papi_exec:
1545 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1550 def get_sw_if_index(node, interface_name):
1551 """Get sw_if_index for the given interface from actual interface dump.
1553 FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
1555 :param node: VPP node to get interface data from.
1556 :param interface_name: Name of the specific interface.
1558 :type interface_name: str
1559 :returns: sw_if_index of the given interface.
1562 interface_data = InterfaceUtil.vpp_get_interface_data(
1563 node, interface=interface_name
1565 return interface_data.get(u"sw_if_index")
1568 def vxlan_gpe_dump(node, interface_name=None):
1569 """Get VxLAN GPE data for the given interface.
1571 :param node: VPP node to get interface data from.
1572 :param interface_name: Name of the specific interface. If None,
1573 information about all VxLAN GPE interfaces is returned.
1575 :type interface_name: str
1576 :returns: Dictionary containing data for the given VxLAN GPE interface
1577 or if interface=None, the list of dictionaries with all VxLAN GPE
1579 :rtype: dict or list
1581 def process_vxlan_gpe_dump(vxlan_dump):
1582 """Process vxlan_gpe dump.
1584 :param vxlan_dump: Vxlan_gpe nterface dump.
1585 :type vxlan_dump: dict
1586 :returns: Processed vxlan_gpe interface dump.
1589 if vxlan_dump[u"is_ipv6"]:
1590 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1591 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1593 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1594 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1597 if interface_name is not None:
1598 sw_if_index = InterfaceUtil.get_interface_index(
1599 node, interface_name
1602 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1604 cmd = u"vxlan_gpe_tunnel_dump"
1606 sw_if_index=sw_if_index
1608 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1609 with PapiSocketExecutor(node) as papi_exec:
1610 details = papi_exec.add(cmd, **args).get_details(err_msg)
1612 data = list() if interface_name is None else dict()
1613 for dump in details:
1614 if interface_name is None:
1615 data.append(process_vxlan_gpe_dump(dump))
1616 elif dump[u"sw_if_index"] == sw_if_index:
1617 data = process_vxlan_gpe_dump(dump)
1620 logger.debug(f"VXLAN-GPE data:\n{data}")
1624 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1625 """Assign VPP interface to specific VRF/FIB table.
1627 :param node: VPP node where the FIB and interface are located.
1628 :param interface: Interface to be assigned to FIB.
1629 :param table_id: VRF table ID.
1630 :param ipv6: Assign to IPv6 table. Default False.
1632 :type interface: str or int
1636 cmd = u"sw_interface_set_table"
1638 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1640 vrf_id=int(table_id)
1642 err_msg = f"Failed to assign interface {interface} to FIB table"
1643 with PapiSocketExecutor(node) as papi_exec:
1644 papi_exec.add(cmd, **args).get_reply(err_msg)
1647 def set_linux_interface_mac(
1648 node, interface, mac, namespace=None, vf_id=None):
1649 """Set MAC address for interface in linux.
1651 :param node: Node where to execute command.
1652 :param interface: Interface in namespace.
1653 :param mac: MAC to be assigned to interface.
1654 :param namespace: Execute command in namespace. Optional
1655 :param vf_id: Virtual Function id. Optional
1657 :type interface: str
1659 :type namespace: str
1662 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1663 else f"address {mac}"
1664 ns_str = f"ip netns exec {namespace}" if namespace else u""
1666 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1667 exec_cmd_no_error(node, cmd, sudo=True)
1670 def set_linux_interface_promisc(
1671 node, interface, namespace=None, vf_id=None, state=u"on"):
1672 """Set promisc state for interface in linux.
1674 :param node: Node where to execute command.
1675 :param interface: Interface in namespace.
1676 :param namespace: Exec command in namespace. (Optional, Default: None)
1677 :param vf_id: Virtual Function id. (Optional, Default: None)
1678 :param state: State of feature. (Optional, Default: on)
1680 :type interface: str
1681 :type namespace: str
1685 promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
1686 else f"promisc {state}"
1687 ns_str = f"ip netns exec {namespace}" if namespace else u""
1689 cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
1690 exec_cmd_no_error(node, cmd, sudo=True)
1693 def set_linux_interface_trust_on(
1694 node, interface, namespace=None, vf_id=None):
1695 """Set trust on (promisc) for interface in linux.
1697 :param node: Node where to execute command.
1698 :param interface: Interface in namespace.
1699 :param namespace: Execute command in namespace. Optional
1700 :param vf_id: Virtual Function id. Optional
1702 :type interface: str
1703 :type namespace: str
1706 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1707 ns_str = f"ip netns exec {namespace}" if namespace else u""
1709 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1710 exec_cmd_no_error(node, cmd, sudo=True)
1713 def set_linux_interface_spoof_off(
1714 node, interface, namespace=None, vf_id=None):
1715 """Set spoof off for interface in linux.
1717 :param node: Node where to execute command.
1718 :param interface: Interface in namespace.
1719 :param namespace: Execute command in namespace. Optional
1720 :param vf_id: Virtual Function id. Optional
1722 :type interface: str
1723 :type namespace: str
1726 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1728 ns_str = f"ip netns exec {namespace}" if namespace else u""
1730 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1731 exec_cmd_no_error(node, cmd, sudo=True)
1734 def set_linux_interface_state(
1735 node, interface, namespace=None, state=u"up"):
1736 """Set operational state for interface in linux.
1738 :param node: Node where to execute command.
1739 :param interface: Interface in namespace.
1740 :param namespace: Execute command in namespace. Optional
1741 :param state: Up/Down.
1743 :type interface: str
1744 :type namespace: str
1747 ns_str = f"ip netns exec {namespace}" if namespace else u""
1749 cmd = f"{ns_str} ip link set dev {interface} {state}"
1750 exec_cmd_no_error(node, cmd, sudo=True)
1753 def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
1754 """Init PCI device. Check driver compatibility and bind to proper
1755 drivers. Optionally create NIC VFs.
1757 :param node: DUT node.
1758 :param ifc_key: Interface key from topology file.
1759 :param driver: Base driver to use.
1760 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1761 :param osi_layer: OSI Layer type to initialize TG with.
1762 Default value "L2" sets linux interface spoof off.
1767 :type osi_layer: str
1768 :returns: Virtual Function topology interface keys.
1770 :raises RuntimeError: If a reason preventing initialization is found.
1772 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1774 if driver == u"avf":
1775 if kernel_driver not in (
1776 u"ice", u"iavf", u"i40e", u"i40evf"):
1778 f"AVF needs ice or i40e compatible driver, not "
1779 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1781 vf_keys = InterfaceUtil.init_generic_interface(
1782 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1784 elif driver == u"af_xdp":
1785 if kernel_driver not in (
1786 u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
1789 f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
1790 f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
1792 vf_keys = InterfaceUtil.init_generic_interface(
1793 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1795 elif driver == u"rdma-core":
1796 vf_keys = InterfaceUtil.init_generic_interface(
1797 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
1802 def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
1803 """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
1805 :param node: DUT node.
1806 :param ifc_key: Interface key from topology file.
1807 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1808 :param osi_layer: OSI Layer type to initialize TG with.
1809 Default value "L2" sets linux interface spoof off.
1813 :type osi_layer: str
1814 :returns: Virtual Function topology interface keys.
1816 :raises RuntimeError: If a reason preventing initialization is found.
1818 # Read PCI address and driver.
1819 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1820 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1821 uio_driver = Topology.get_uio_driver(node)
1822 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1823 current_driver = DUTSetup.get_pci_dev_driver(
1824 node, pf_pci_addr.replace(u":", r"\:"))
1825 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1827 VPPUtil.stop_vpp_service(node)
1828 if current_driver != kernel_driver:
1829 # PCI device must be re-bound to kernel driver before creating VFs.
1830 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1831 # Stop VPP to prevent deadlock.
1832 # Unbind from current driver if bound.
1834 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1835 # Bind to kernel driver.
1836 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1838 # Initialize PCI VFs.
1839 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1842 if osi_layer == u"L2":
1843 InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
1846 # Set MAC address and bind each virtual function to uio driver.
1847 for vf_id in range(numvfs):
1848 vf_mac_addr = u":".join(
1849 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1850 pf_mac_addr[5], f"{vf_id:02x}"
1854 InterfaceUtil.set_linux_interface_trust_on(
1855 node, pf_dev, vf_id=vf_id
1857 if osi_layer == u"L2":
1858 InterfaceUtil.set_linux_interface_spoof_off(
1859 node, pf_dev, vf_id=vf_id
1861 InterfaceUtil.set_linux_interface_mac(
1862 node, pf_dev, vf_mac_addr, vf_id=vf_id
1864 InterfaceUtil.set_linux_interface_state(
1865 node, pf_dev, state=u"up"
1868 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1869 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1871 # Add newly created ports into topology file
1872 vf_ifc_name = f"{ifc_key}_vif"
1873 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1874 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1875 Topology.update_interface_name(
1876 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1878 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1879 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1880 Topology.set_interface_numa_node(
1881 node, vf_ifc_key, Topology.get_interface_numa_node(
1885 vf_ifc_keys.append(vf_ifc_key)
1890 def vpp_sw_interface_rx_placement_dump(node):
1891 """Dump VPP interface RX placement on node.
1893 :param node: Node to run command on.
1895 :returns: Thread mapping information as a list of dictionaries.
1898 cmd = u"sw_interface_rx_placement_dump"
1899 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1900 with PapiSocketExecutor(node) as papi_exec:
1901 for ifc in node[u"interfaces"].values():
1902 if ifc[u"vpp_sw_index"] is not None:
1903 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1904 details = papi_exec.get_details(err_msg)
1905 return sorted(details, key=lambda k: k[u"sw_if_index"])
1908 def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
1909 """Dump VPP interface RX placement on all given nodes.
1911 :param nodes: Nodes to run command on.
1913 :returns: Thread mapping information as a list of dictionaries.
1916 for node in nodes.values():
1917 if node[u"type"] == NodeType.DUT:
1918 InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
1921 def vpp_sw_interface_set_rx_placement(
1922 node, sw_if_index, queue_id, worker_id):
1923 """Set interface RX placement to worker on node.
1925 :param node: Node to run command on.
1926 :param sw_if_index: VPP SW interface index.
1927 :param queue_id: VPP interface queue ID.
1928 :param worker_id: VPP worker ID (indexing from 0).
1930 :type sw_if_index: int
1932 :type worker_id: int
1933 :raises RuntimeError: If failed to run command on host or if no API
1936 cmd = u"sw_interface_set_rx_placement"
1937 err_msg = f"Failed to set interface RX placement to worker " \
1938 f"on host {node[u'host']}!"
1940 sw_if_index=sw_if_index,
1942 worker_id=worker_id,
1945 with PapiSocketExecutor(node) as papi_exec:
1946 papi_exec.add(cmd, **args).get_reply(err_msg)
1949 def vpp_round_robin_rx_placement(
1950 node, prefix, workers=None):
1951 """Set Round Robin interface RX placement on all worker threads
1954 If specified, workers limits the number of physical cores used
1955 for data plane I/O work. Other cores are presumed to do something else,
1956 e.g. asynchronous crypto processing.
1957 None means all workers are used for data plane work.
1959 :param node: Topology nodes.
1960 :param prefix: Interface name prefix.
1961 :param workers: Comma separated worker index numbers intended for
1967 thread_data = VPPUtil.vpp_show_threads(node)
1968 worker_cnt = len(thread_data) - 1
1973 for item in thread_data:
1974 if str(item.cpu_id) in workers.split(u","):
1975 worker_ids.append(item.id)
1977 for item in thread_data:
1978 if u"vpp_main" not in item.name:
1979 worker_ids.append(item.id)
1982 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1983 for interface in node[u"interfaces"].values():
1984 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1985 and prefix in interface[u"name"]:
1986 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1987 node, placement[u"sw_if_index"], placement[u"queue_id"],
1988 worker_ids[worker_idx % len(worker_ids)] - 1
1993 def vpp_round_robin_rx_placement_on_all_duts(
1994 nodes, prefix, workers=None):
1995 """Set Round Robin interface RX placement on worker threads
1998 If specified, workers limits the number of physical cores used
1999 for data plane I/O work. Other cores are presumed to do something else,
2000 e.g. asynchronous crypto processing.
2001 None means all cores are used for data plane work.
2003 :param nodes: Topology nodes.
2004 :param prefix: Interface name prefix.
2005 :param workers: Comma separated worker index numbers intended for
2011 for node in nodes.values():
2012 if node[u"type"] == NodeType.DUT:
2013 InterfaceUtil.vpp_round_robin_rx_placement(
2014 node, prefix, workers