1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
17 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class RdmaMode(IntEnum):
107 """RDMA interface mode."""
108 RDMA_API_MODE_AUTO = 0
109 RDMA_API_MODE_IBV = 1
114 """General utilities for managing interfaces"""
116 __UDEV_IF_RULES_FILE = u"/etc/udev/rules.d/10-network.rules"
119 def pci_to_int(pci_str):
120 """Convert PCI address from string format (0000:18:0a.0) to
121 integer representation (169345024).
123 :param pci_str: PCI address in string representation.
125 :returns: Integer representation of PCI address.
128 pci = list(pci_str.split(u":")[0:2])
129 pci.extend(pci_str.split(u":")[2].split(u"."))
131 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
132 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
135 def pci_to_eth(node, pci_str):
136 """Convert PCI address on DUT to Linux ethernet name.
138 :param node: DUT node
139 :param pci_str: PCI address.
142 :returns: Ethernet name.
145 cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
147 stdout, _ = exec_cmd_no_error(node, cmd)
149 raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
151 return stdout.strip()
154 def get_interface_index(node, interface):
155 """Get interface sw_if_index from topology file.
157 :param node: Node where the interface is.
158 :param interface: Numeric index or name string of a specific interface.
160 :type interface: str or int
161 :returns: SW interface index.
165 sw_if_index = int(interface)
167 sw_if_index = Topology.get_interface_sw_index(node, interface)
168 if sw_if_index is None:
170 Topology.get_interface_sw_index_by_name(node, interface)
171 except TypeError as err:
172 raise TypeError(f"Wrong interface format {interface}") from err
177 def set_interface_state(node, interface, state, if_type=u"key"):
178 """Set interface state on a node.
180 Function can be used for DUTs as well as for TGs.
182 :param node: Node where the interface is.
183 :param interface: Interface key or sw_if_index or name.
184 :param state: One of 'up' or 'down'.
185 :param if_type: Interface type
187 :type interface: str or int
191 :raises ValueError: If the interface type is unknown.
192 :raises ValueError: If the state of interface is unexpected.
193 :raises ValueError: If the node has an unknown node type.
195 if if_type == u"key":
196 if isinstance(interface, str):
197 sw_if_index = Topology.get_interface_sw_index(node, interface)
198 iface_name = Topology.get_interface_name(node, interface)
200 sw_if_index = interface
201 elif if_type == u"name":
202 iface_key = Topology.get_interface_by_name(node, interface)
203 if iface_key is not None:
204 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
205 iface_name = interface
207 raise ValueError(f"Unknown if_type: {if_type}")
209 if node[u"type"] == NodeType.DUT:
211 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
212 elif state == u"down":
215 raise ValueError(f"Unexpected interface state: {state}")
216 cmd = u"sw_interface_set_flags"
217 err_msg = f"Failed to set interface state on host {node[u'host']}"
219 sw_if_index=int(sw_if_index),
222 with PapiSocketExecutor(node) as papi_exec:
223 papi_exec.add(cmd, **args).get_reply(err_msg)
224 elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
225 cmd = f"ip link set {iface_name} {state}"
226 exec_cmd_no_error(node, cmd, sudo=True)
229 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
233 def set_interface_ethernet_mtu(node, iface_key, mtu):
234 """Set Ethernet MTU for specified interface.
236 Function can be used only for TGs.
238 :param node: Node where the interface is.
239 :param iface_key: Interface key from topology file.
240 :param mtu: MTU to set.
245 :raises ValueError: If the node type is "DUT".
246 :raises ValueError: If the node has an unknown node type.
248 if node[u"type"] == NodeType.DUT:
249 msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \
250 f"on DUT nodes not supported"
251 elif node[u"type"] != NodeType.TG:
252 msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
254 iface_name = Topology.get_interface_name(node, iface_key)
255 cmd = f"ip link set {iface_name} mtu {mtu}"
256 exec_cmd_no_error(node, cmd, sudo=True)
258 raise ValueError(msg)
261 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
262 """Set default Ethernet MTU on all interfaces on node.
264 Function can be used only for TGs.
266 :param node: Node where to set default MTU.
270 for ifc in node[u"interfaces"]:
271 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
274 def vpp_set_interface_mtu(node, interface, mtu=9200):
275 """Set Ethernet MTU on interface.
277 :param node: VPP node.
278 :param interface: Interface to setup MTU. Default: 9200.
279 :param mtu: Ethernet MTU size in Bytes.
281 :type interface: str or int
284 if isinstance(interface, str):
285 sw_if_index = Topology.get_interface_sw_index(node, interface)
287 sw_if_index = interface
289 cmd = u"hw_interface_set_mtu"
290 err_msg = f"Failed to set interface MTU on host {node[u'host']}"
292 sw_if_index=sw_if_index,
296 with PapiSocketExecutor(node) as papi_exec:
297 papi_exec.add(cmd, **args).get_reply(err_msg)
298 except AssertionError as err:
299 # TODO: Make failure tolerance optional.
300 logger.debug(f"Setting MTU failed. Expected?\n{err}")
303 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
304 """Set Ethernet MTU on all interfaces.
306 :param node: VPP node.
307 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
311 for interface in node[u"interfaces"]:
312 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
315 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
316 """Set Ethernet MTU on all interfaces on all DUTs.
318 :param nodes: VPP nodes.
319 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
323 for node in nodes.values():
324 if node[u"type"] == NodeType.DUT:
325 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
328 def vpp_node_interfaces_ready_wait(node, retries=15):
329 """Wait until all interfaces with admin-up are in link-up state.
331 :param node: Node to wait on.
332 :param retries: Number of retries to check interface status (optional,
337 :raises RuntimeError: If any interface is not in link-up state after
338 defined number of retries.
340 for _ in range(0, retries):
342 out = InterfaceUtil.vpp_get_interface_data(node)
343 for interface in out:
344 if interface.get(u"flags") == 1:
345 not_ready.append(interface.get(u"interface_name"))
348 f"Interfaces still not in link-up state:\n{not_ready}"
354 err = f"Timeout, interfaces not up:\n{not_ready}" \
355 if u"not_ready" in locals() else u"No check executed!"
356 raise RuntimeError(err)
359 def all_vpp_interfaces_ready_wait(nodes, retries=15):
360 """Wait until all interfaces with admin-up are in link-up state for all
361 nodes in the topology.
363 :param nodes: Nodes in the topology.
364 :param retries: Number of retries to check interface status (optional,
370 for node in nodes.values():
371 if node[u"type"] == NodeType.DUT:
372 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
375 def vpp_get_interface_data(node, interface=None):
376 """Get all interface data from a VPP node. If a name or
377 sw_interface_index is provided, return only data for the matching
380 :param node: VPP node to get interface data from.
381 :param interface: Numeric index or name string of a specific interface.
383 :type interface: int or str
384 :returns: List of dictionaries containing data for each interface, or a
385 single dictionary for the specified interface.
387 :raises TypeError: if the data type of interface is neither basestring
390 def process_if_dump(if_dump):
391 """Process interface dump.
393 :param if_dump: Interface dump.
395 :returns: Processed interface dump.
398 if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
399 if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
400 if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
401 if_dump[u"flags"] = if_dump[u"flags"].value
402 if_dump[u"type"] = if_dump[u"type"].value
403 if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
404 if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
405 if hasattr(if_dump[u"sub_if_flags"], u"value") \
406 else int(if_dump[u"sub_if_flags"])
410 if interface is not None:
411 if isinstance(interface, str):
412 param = u"interface_name"
413 elif isinstance(interface, int):
414 param = u"sw_if_index"
416 raise TypeError(f"Wrong interface format {interface}")
420 cmd = u"sw_interface_dump"
422 name_filter_valid=False,
425 err_msg = f"Failed to get interface dump on host {node[u'host']}"
427 with PapiSocketExecutor(node) as papi_exec:
428 details = papi_exec.add(cmd, **args).get_details(err_msg)
429 logger.debug(f"Received data:\n{details!r}")
431 data = list() if interface is None else dict()
433 if interface is None:
434 data.append(process_if_dump(dump))
435 elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
436 data = process_if_dump(dump)
439 logger.debug(f"Interface data:\n{data}")
443 def vpp_get_interface_name(node, sw_if_index):
444 """Get interface name for the given SW interface index from actual
447 :param node: VPP node to get interface data from.
448 :param sw_if_index: SW interface index of the specific interface.
450 :type sw_if_index: int
451 :returns: Name of the given interface.
454 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
455 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
456 if_data = InterfaceUtil.vpp_get_interface_data(
457 node, if_data[u"sup_sw_if_index"]
460 return if_data.get(u"interface_name")
463 def vpp_get_interface_sw_index(node, interface_name):
464 """Get interface name for the given SW interface index from actual
467 :param node: VPP node to get interface data from.
468 :param interface_name: Interface name.
470 :type interface_name: str
471 :returns: Name of the given interface.
474 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
476 return if_data.get(u"sw_if_index")
479 def vpp_get_interface_mac(node, interface):
480 """Get MAC address for the given interface from actual interface dump.
482 :param node: VPP node to get interface data from.
483 :param interface: Numeric index or name string of a specific interface.
485 :type interface: int or str
486 :returns: MAC address.
489 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
490 if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
491 if_data = InterfaceUtil.vpp_get_interface_data(
492 node, if_data[u"sup_sw_if_index"])
494 return if_data.get(u"l2_address")
497 def tg_set_interface_driver(node, pci_addr, driver):
498 """Set interface driver on the TG node.
500 :param node: Node to set interface driver on (must be TG node).
501 :param pci_addr: PCI address of the interface.
502 :param driver: Driver name.
506 :raises RuntimeError: If unbinding from the current driver fails.
507 :raises RuntimeError: If binding to the new driver fails.
509 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
510 if old_driver == driver:
516 # Unbind from current driver
517 if old_driver is not None:
518 cmd = f"sh -c \"echo {pci_addr} > " \
519 f"/sys/bus/pci/drivers/{old_driver}/unbind\""
520 ret_code, _, _ = ssh.exec_command_sudo(cmd)
521 if int(ret_code) != 0:
522 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
524 # Bind to the new driver
525 cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
526 ret_code, _, _ = ssh.exec_command_sudo(cmd)
527 if int(ret_code) != 0:
528 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
531 def tg_get_interface_driver(node, pci_addr):
532 """Get interface driver from the TG node.
534 :param node: Node to get interface driver on (must be TG node).
535 :param pci_addr: PCI address of the interface.
538 :returns: Interface driver or None if not found.
540 :raises RuntimeError: If PCI rescan or lspci command execution failed.
542 return DUTSetup.get_pci_dev_driver(node, pci_addr)
545 def tg_set_interfaces_udev_rules(node):
546 """Set udev rules for interfaces.
548 Create udev rules file in /etc/udev/rules.d where are rules for each
549 interface used by TG node, based on MAC interface has specific name.
550 So after unbind and bind again to kernel driver interface has same
551 name as before. This must be called after TG has set name for each
552 port in topology dictionary.
554 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
557 :param node: Node to set udev rules on (must be TG node).
559 :raises RuntimeError: If setting of udev rules fails.
564 cmd = f"rm -f {InterfaceUtil.__UDEV_IF_RULES_FILE}"
565 ret_code, _, _ = ssh.exec_command_sudo(cmd)
566 if int(ret_code) != 0:
567 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
569 for interface in node[u"interfaces"].values():
570 rule = u'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
571 u'==\\"' + interface[u"mac_address"] + u'\\", NAME=\\"' + \
572 interface[u"name"] + u'\\"'
573 cmd = f"sh -c \"echo '{rule}'\" >> " \
574 f"{InterfaceUtil.__UDEV_IF_RULES_FILE}'"
576 ret_code, _, _ = ssh.exec_command_sudo(cmd)
577 if int(ret_code) != 0:
578 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
580 cmd = u"/etc/init.d/udev restart"
581 ssh.exec_command_sudo(cmd)
584 def tg_set_interfaces_default_driver(node):
585 """Set interfaces default driver specified in topology yaml file.
587 :param node: Node to setup interfaces driver on (must be TG node).
590 for interface in node[u"interfaces"].values():
591 InterfaceUtil.tg_set_interface_driver(
592 node, interface[u"pci_address"], interface[u"driver"]
596 def update_vpp_interface_data_on_node(node):
597 """Update vpp generated interface data for a given node in DICT__nodes.
599 Updates interface names, software if index numbers and any other details
600 generated specifically by vpp that are unknown before testcase run.
601 It does this by dumping interface list from all devices using python
602 api, and pairing known information from topology (mac address) to state
605 :param node: Node selected from DICT__nodes.
608 interface_list = InterfaceUtil.vpp_get_interface_data(node)
609 interface_dict = dict()
610 for ifc in interface_list:
611 interface_dict[ifc[u"l2_address"]] = ifc
613 for if_name, if_data in node[u"interfaces"].items():
614 ifc_dict = interface_dict.get(if_data[u"mac_address"])
615 if ifc_dict is not None:
616 if_data[u"name"] = ifc_dict[u"interface_name"]
617 if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
618 if_data[u"mtu"] = ifc_dict[u"mtu"][0]
620 f"Interface {if_name} found by MAC "
621 f"{if_data[u'mac_address']}"
625 f"Interface {if_name} not found by MAC "
626 f"{if_data[u'mac_address']}"
628 if_data[u"vpp_sw_index"] = None
631 def update_nic_interface_names(node):
632 """Update interface names based on nic type and PCI address.
634 This method updates interface names in the same format as VPP does.
636 :param node: Node dictionary.
639 for ifc in node[u"interfaces"].values():
640 if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
641 loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
642 f"{int(if_pci[3], 16):x}"
643 if ifc[u"model"] == u"Intel-XL710":
644 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
645 elif ifc[u"model"] == u"Intel-X710":
646 ifc[u"name"] = f"TenGigabitEthernet{loc}"
647 elif ifc[u"model"] == u"Intel-X520-DA2":
648 ifc[u"name"] = f"TenGigabitEthernet{loc}"
649 elif ifc[u"model"] == u"Cisco-VIC-1385":
650 ifc[u"name"] = f"FortyGigabitEthernet{loc}"
651 elif ifc[u"model"] == u"Cisco-VIC-1227":
652 ifc[u"name"] = f"TenGigabitEthernet{loc}"
654 ifc[u"name"] = f"UnknownEthernet{loc}"
657 def update_nic_interface_names_on_all_duts(nodes):
658 """Update interface names based on nic type and PCI address on all DUTs.
660 This method updates interface names in the same format as VPP does.
662 :param nodes: Topology nodes.
665 for node in nodes.values():
666 if node[u"type"] == NodeType.DUT:
667 InterfaceUtil.update_nic_interface_names(node)
670 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
671 """Update interface name for TG/linux node in DICT__nodes.
674 # for dev in `ls /sys/class/net/`;
675 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
676 "52:54:00:9f:82:63": "eth0"
677 "52:54:00:77:ae:a9": "eth1"
678 "52:54:00:e1:8a:0f": "eth2"
679 "00:00:00:00:00:00": "lo"
681 :param node: Node selected from DICT__nodes.
682 :param skip_tg_udev: Skip udev rename on TG node.
684 :type skip_tg_udev: bool
685 :raises RuntimeError: If getting of interface name and MAC fails.
687 # First setup interface driver specified in yaml file
688 InterfaceUtil.tg_set_interfaces_default_driver(node)
690 # Get interface names
694 cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
695 u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
697 ret_code, stdout, _ = ssh.exec_command(cmd)
698 if int(ret_code) != 0:
699 raise RuntimeError(u"Get interface name and MAC failed")
700 tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
702 interfaces = JsonParser().parse_data(tmp)
703 for interface in node[u"interfaces"].values():
704 name = interfaces.get(interface[u"mac_address"])
707 interface[u"name"] = name
709 # Set udev rules for interfaces
711 InterfaceUtil.tg_set_interfaces_udev_rules(node)
714 def iface_update_numa_node(node):
715 """For all interfaces from topology file update numa node based on
716 information from the node.
718 :param node: Node from topology.
721 :raises ValueError: If numa node ia less than 0.
722 :raises RuntimeError: If update of numa node failed.
724 def check_cpu_node_count(node_n, val):
727 if CpuUtils.cpu_node_count(node_n) == 1:
733 for if_key in Topology.get_node_interfaces(node):
734 if_pci = Topology.get_interface_pci_addr(node, if_key)
736 cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
738 ret, out, _ = ssh.exec_command(cmd)
741 numa_node = check_cpu_node_count(node, out)
744 f"Reading numa location failed for: {if_pci}"
747 Topology.set_interface_numa_node(
748 node, if_key, numa_node
752 raise RuntimeError(f"Update numa node failed for: {if_pci}")
755 def update_all_numa_nodes(nodes, skip_tg=False):
756 """For all nodes and all their interfaces from topology file update numa
757 node information based on information from the node.
759 :param nodes: Nodes in the topology.
760 :param skip_tg: Skip TG node
765 for node in nodes.values():
766 if node[u"type"] == NodeType.DUT:
767 InterfaceUtil.iface_update_numa_node(node)
768 elif node[u"type"] == NodeType.TG and not skip_tg:
769 InterfaceUtil.iface_update_numa_node(node)
772 def update_all_interface_data_on_all_nodes(
773 nodes, skip_tg=False, skip_tg_udev=False, numa_node=False):
774 """Update interface names on all nodes in DICT__nodes.
776 This method updates the topology dictionary by querying interface lists
777 of all nodes mentioned in the topology dictionary.
779 :param nodes: Nodes in the topology.
780 :param skip_tg: Skip TG node.
781 :param skip_tg_udev: Skip udev rename on TG node.
782 :param numa_node: Retrieve numa_node location.
785 :type skip_tg_udev: bool
786 :type numa_node: bool
788 for node_data in nodes.values():
789 if node_data[u"type"] == NodeType.DUT:
790 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
791 elif node_data[u"type"] == NodeType.TG and not skip_tg:
792 InterfaceUtil.update_tg_interface_data_on_node(
793 node_data, skip_tg_udev)
796 if node_data[u"type"] == NodeType.DUT:
797 InterfaceUtil.iface_update_numa_node(node_data)
798 elif node_data[u"type"] == NodeType.TG and not skip_tg:
799 InterfaceUtil.iface_update_numa_node(node_data)
802 def create_vlan_subinterface(node, interface, vlan):
803 """Create VLAN sub-interface on node.
805 :param node: Node to add VLAN subinterface on.
806 :param interface: Interface name or index on which create VLAN
808 :param vlan: VLAN ID of the subinterface to be created.
810 :type interface: str on int
812 :returns: Name and index of created subinterface.
814 :raises RuntimeError: if it is unable to create VLAN subinterface on the
815 node or interface cannot be converted.
817 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
819 cmd = u"create_vlan_subif"
821 sw_if_index=sw_if_index,
824 err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
826 with PapiSocketExecutor(node) as papi_exec:
827 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
829 if_key = Topology.add_new_port(node, u"vlan_subif")
830 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
831 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
832 Topology.update_interface_name(node, if_key, ifc_name)
834 return f"{interface}.{vlan}", sw_if_index
837 def create_vxlan_interface(node, vni, source_ip, destination_ip):
838 """Create VXLAN interface and return sw if index of created interface.
840 :param node: Node where to create VXLAN interface.
841 :param vni: VXLAN Network Identifier.
842 :param source_ip: Source IP of a VXLAN Tunnel End Point.
843 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
847 :type destination_ip: str
848 :returns: SW IF INDEX of created interface.
850 :raises RuntimeError: if it is unable to create VxLAN interface on the
853 src_address = ip_address(source_ip)
854 dst_address = ip_address(destination_ip)
856 cmd = u"vxlan_add_del_tunnel"
859 is_ipv6=1 if src_address.version == 6 else 0,
860 instance=Constants.BITWISE_NON_ZERO,
861 src_address=src_address.packed,
862 dst_address=dst_address.packed,
863 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
865 decap_next_index=Constants.BITWISE_NON_ZERO,
868 err_msg = f"Failed to create VXLAN tunnel interface " \
869 f"on host {node[u'host']}"
870 with PapiSocketExecutor(node) as papi_exec:
871 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
873 if_key = Topology.add_new_port(node, u"vxlan_tunnel")
874 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
875 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
876 Topology.update_interface_name(node, if_key, ifc_name)
881 def set_vxlan_bypass(node, interface=None):
882 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
884 By adding the IPv4 vxlan-bypass graph node to an interface, the node
885 checks for and validate input vxlan packet and bypass ip4-lookup,
886 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
887 This node will cause extra overhead to for non-vxlan packets which is
890 :param node: Node where to set VXLAN bypass.
891 :param interface: Numeric index or name string of a specific interface.
893 :type interface: int or str
894 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
896 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
898 cmd = u"sw_interface_set_vxlan_bypass"
901 sw_if_index=sw_if_index,
904 err_msg = f"Failed to set VXLAN bypass on interface " \
905 f"on host {node[u'host']}"
906 with PapiSocketExecutor(node) as papi_exec:
907 papi_exec.add(cmd, **args).get_replies(err_msg)
910 def vxlan_dump(node, interface=None):
911 """Get VxLAN data for the given interface.
913 :param node: VPP node to get interface data from.
914 :param interface: Numeric index or name string of a specific interface.
915 If None, information about all VxLAN interfaces is returned.
917 :type interface: int or str
918 :returns: Dictionary containing data for the given VxLAN interface or if
919 interface=None, the list of dictionaries with all VxLAN interfaces.
921 :raises TypeError: if the data type of interface is neither basestring
924 def process_vxlan_dump(vxlan_dump):
925 """Process vxlan dump.
927 :param vxlan_dump: Vxlan interface dump.
928 :type vxlan_dump: dict
929 :returns: Processed vxlan interface dump.
932 if vxlan_dump[u"is_ipv6"]:
933 vxlan_dump[u"src_address"] = \
934 ip_address(vxlan_dump[u"src_address"])
935 vxlan_dump[u"dst_address"] = \
936 ip_address(vxlan_dump[u"dst_address"])
938 vxlan_dump[u"src_address"] = \
939 ip_address(vxlan_dump[u"src_address"][0:4])
940 vxlan_dump[u"dst_address"] = \
941 ip_address(vxlan_dump[u"dst_address"][0:4])
944 if interface is not None:
945 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
947 sw_if_index = int(Constants.BITWISE_NON_ZERO)
949 cmd = u"vxlan_tunnel_dump"
951 sw_if_index=sw_if_index
953 err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
955 with PapiSocketExecutor(node) as papi_exec:
956 details = papi_exec.add(cmd, **args).get_details(err_msg)
958 data = list() if interface is None else dict()
960 if interface is None:
961 data.append(process_vxlan_dump(dump))
962 elif dump[u"sw_if_index"] == sw_if_index:
963 data = process_vxlan_dump(dump)
966 logger.debug(f"VXLAN data:\n{data}")
970 def create_subinterface(
971 node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
973 """Create sub-interface on node. It is possible to set required
974 sub-interface type and VLAN tag(s).
976 :param node: Node to add sub-interface.
977 :param interface: Interface name on which create sub-interface.
978 :param sub_id: ID of the sub-interface to be created.
979 :param outer_vlan_id: Optional outer VLAN ID.
980 :param inner_vlan_id: Optional inner VLAN ID.
981 :param type_subif: Optional type of sub-interface. Values supported by
982 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
985 :type interface: str or int
987 :type outer_vlan_id: int
988 :type inner_vlan_id: int
989 :type type_subif: str
990 :returns: Name and index of created sub-interface.
992 :raises RuntimeError: If it is not possible to create sub-interface.
994 subif_types = type_subif.split()
997 if u"no_tags" in subif_types:
998 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
999 if u"one_tag" in subif_types:
1000 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
1001 if u"two_tags" in subif_types:
1002 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
1003 if u"dot1ad" in subif_types:
1004 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
1005 if u"exact_match" in subif_types:
1006 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
1007 if u"default_sub" in subif_types:
1008 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1009 if type_subif == u"default_sub":
1010 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1011 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1013 cmd = u"create_subif"
1015 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1017 sub_if_flags=flags.value if hasattr(flags, u"value")
1019 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1020 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1022 err_msg = f"Failed to create sub-interface on host {node[u'host']}"
1023 with PapiSocketExecutor(node) as papi_exec:
1024 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1026 if_key = Topology.add_new_port(node, u"subinterface")
1027 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1028 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1029 Topology.update_interface_name(node, if_key, ifc_name)
1031 return f"{interface}.{sub_id}", sw_if_index
1034 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1035 """Create GRE tunnel interface on node.
1037 :param node: VPP node to add tunnel interface.
1038 :param source_ip: Source of the GRE tunnel.
1039 :param destination_ip: Destination of the GRE tunnel.
1041 :type source_ip: str
1042 :type destination_ip: str
1043 :returns: Name and index of created GRE tunnel interface.
1045 :raises RuntimeError: If unable to create GRE tunnel interface.
1047 cmd = u"gre_tunnel_add_del"
1050 instance=Constants.BITWISE_NON_ZERO,
1052 dst=str(destination_ip),
1060 err_msg = f"Failed to create GRE tunnel interface " \
1061 f"on host {node[u'host']}"
1062 with PapiSocketExecutor(node) as papi_exec:
1063 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1065 if_key = Topology.add_new_port(node, u"gre_tunnel")
1066 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1067 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1068 Topology.update_interface_name(node, if_key, ifc_name)
1070 return ifc_name, sw_if_index
1073 def vpp_create_loopback(node, mac=None):
1074 """Create loopback interface on VPP node.
1076 :param node: Node to create loopback interface on.
1077 :param mac: Optional MAC address for loopback interface.
1080 :returns: SW interface index.
1082 :raises RuntimeError: If it is not possible to create loopback on the
1085 cmd = u"create_loopback"
1087 mac_address=L2Util.mac_to_bin(mac) if mac else 0
1089 err_msg = f"Failed to create loopback interface on host {node[u'host']}"
1090 with PapiSocketExecutor(node) as papi_exec:
1091 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1093 if_key = Topology.add_new_port(node, u"loopback")
1094 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1095 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1096 Topology.update_interface_name(node, if_key, ifc_name)
1098 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1099 Topology.update_interface_mac_address(node, if_key, mac)
1104 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1105 """Create bond interface on VPP node.
1107 :param node: DUT node from topology.
1108 :param mode: Link bonding mode.
1109 :param load_balance: Load balance (optional, valid for xor and lacp
1110 modes, otherwise ignored).
1111 :param mac: MAC address to assign to the bond interface (optional).
1114 :type load_balance: str
1116 :returns: Interface key (name) in topology.
1118 :raises RuntimeError: If it is not possible to create bond interface on
1121 cmd = u"bond_create"
1123 id=int(Constants.BITWISE_NON_ZERO),
1124 use_custom_mac=bool(mac is not None),
1125 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1128 f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
1130 lb=0 if load_balance is None else getattr(
1131 LinkBondLoadBalanceAlgo,
1132 f"BOND_API_LB_ALGO_{load_balance.upper()}"
1136 err_msg = f"Failed to create bond interface on host {node[u'host']}"
1137 with PapiSocketExecutor(node) as papi_exec:
1138 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1140 InterfaceUtil.add_eth_interface(
1141 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
1143 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1148 def add_eth_interface(
1149 node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
1151 """Add ethernet interface to current topology.
1153 :param node: DUT node from topology.
1154 :param ifc_name: Name of the interface.
1155 :param sw_if_index: SW interface index.
1156 :param ifc_pfx: Interface key prefix.
1157 :param host_if_key: Host interface key from topology file.
1160 :type sw_if_index: int
1162 :type ifc_pfx: host_if_key
1164 if_key = Topology.add_new_port(node, ifc_pfx)
1166 if ifc_name and sw_if_index is None:
1167 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1169 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1170 if sw_if_index and ifc_name is None:
1171 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1172 Topology.update_interface_name(node, if_key, ifc_name)
1173 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1174 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1175 if host_if_key is not None:
1176 Topology.set_interface_numa_node(
1177 node, if_key, Topology.get_interface_numa_node(
1183 def vpp_create_avf_interface(node, if_key, num_rx_queues=None):
1184 """Create AVF interface on VPP node.
1186 :param node: DUT node from topology.
1187 :param if_key: Interface key from topology file of interface
1188 to be bound to i40evf driver.
1189 :param num_rx_queues: Number of RX queues.
1192 :type num_rx_queues: int
1193 :returns: Interface key (name) in topology.
1195 :raises RuntimeError: If it is not possible to create AVF interface on
1198 PapiSocketExecutor.run_cli_cmd(
1199 node, u"set logging class avf level debug"
1203 vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
1205 pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1207 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1211 err_msg = f"Failed to create AVF interface on host {node[u'host']}"
1212 with PapiSocketExecutor(node) as papi_exec:
1213 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1215 InterfaceUtil.add_eth_interface(
1216 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
1219 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1224 def vpp_create_rdma_interface(
1225 node, if_key, num_rx_queues=None, mode=u"auto"):
1226 """Create RDMA interface on VPP node.
1228 :param node: DUT node from topology.
1229 :param if_key: Physical interface key from topology file of interface
1230 to be bound to rdma-core driver.
1231 :param num_rx_queues: Number of RX queues.
1232 :param mode: RDMA interface mode - auto/ibv/dv.
1235 :type num_rx_queues: int
1237 :returns: Interface key (name) in topology file.
1239 :raises RuntimeError: If it is not possible to create RDMA interface on
1242 cmd = u"rdma_create"
1243 pci_addr = Topology.get_interface_pci_addr(node, if_key)
1245 name=InterfaceUtil.pci_to_eth(node, pci_addr),
1246 host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
1247 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1250 mode=getattr(RdmaMode,f"RDMA_API_MODE_{mode.upper()}").value,
1252 err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
1253 with PapiSocketExecutor(node) as papi_exec:
1254 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1256 InterfaceUtil.add_eth_interface(
1257 node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
1261 return Topology.get_interface_by_sw_index(node, sw_if_index)
1264 def vpp_enslave_physical_interface(node, interface, bond_if):
1265 """Enslave physical interface to bond interface on VPP node.
1267 :param node: DUT node from topology.
1268 :param interface: Physical interface key from topology file.
1269 :param bond_if: Load balance
1271 :type interface: str
1273 :raises RuntimeError: If it is not possible to enslave physical
1274 interface to bond interface on the node.
1276 cmd = u"bond_enslave"
1278 sw_if_index=Topology.get_interface_sw_index(node, interface),
1279 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1281 is_long_timeout=False
1283 err_msg = f"Failed to enslave physical interface {interface} to bond " \
1284 f"interface {bond_if} on host {node[u'host']}"
1285 with PapiSocketExecutor(node) as papi_exec:
1286 papi_exec.add(cmd, **args).get_reply(err_msg)
1289 def vpp_show_bond_data_on_node(node, verbose=False):
1290 """Show (detailed) bond information on VPP node.
1292 :param node: DUT node from topology.
1293 :param verbose: If detailed information is required or not.
1297 cmd = u"sw_interface_bond_dump"
1298 err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
1300 data = f"Bond data on node {node[u'host']}:\n"
1301 with PapiSocketExecutor(node) as papi_exec:
1302 details = papi_exec.add(cmd).get_details(err_msg)
1304 for bond in details:
1305 data += f"{bond[u'interface_name']}\n"
1306 data += u" mode: {m}\n".format(
1307 m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
1309 data += u" load balance: {lb}\n".format(
1310 lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
1312 data += f" number of active slaves: {bond[u'active_slaves']}\n"
1314 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1315 node, Topology.get_interface_by_sw_index(
1316 node, bond[u"sw_if_index"]
1319 for slave in slave_data:
1320 if not slave[u"is_passive"]:
1321 data += f" {slave[u'interface_name']}\n"
1322 data += f" number of slaves: {bond[u'slaves']}\n"
1324 for slave in slave_data:
1325 data += f" {slave[u'interface_name']}\n"
1326 data += f" interface id: {bond[u'id']}\n"
1327 data += f" sw_if_index: {bond[u'sw_if_index']}\n"
1331 def vpp_bond_slave_dump(node, interface):
1332 """Get bond interface slave(s) data on VPP node.
1334 :param node: DUT node from topology.
1335 :param interface: Physical interface key from topology file.
1337 :type interface: str
1338 :returns: Bond slave interface data.
1341 cmd = u"sw_interface_slave_dump"
1343 sw_if_index=Topology.get_interface_sw_index(node, interface)
1345 err_msg = f"Failed to get slave dump on host {node[u'host']}"
1347 with PapiSocketExecutor(node) as papi_exec:
1348 details = papi_exec.add(cmd, **args).get_details(err_msg)
1350 logger.debug(f"Slave data:\n{details}")
1354 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1355 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1357 :param nodes: Nodes in the topology.
1358 :param verbose: If detailed information is required or not.
1362 for node_data in nodes.values():
1363 if node_data[u"type"] == NodeType.DUT:
1364 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1367 def vpp_enable_input_acl_interface(
1368 node, interface, ip_version, table_index):
1369 """Enable input acl on interface.
1371 :param node: VPP node to setup interface for input acl.
1372 :param interface: Interface to setup input acl.
1373 :param ip_version: Version of IP protocol.
1374 :param table_index: Classify table index.
1376 :type interface: str or int
1377 :type ip_version: str
1378 :type table_index: int
1380 cmd = u"input_acl_set_interface"
1382 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1383 ip4_table_index=table_index if ip_version == u"ip4"
1384 else Constants.BITWISE_NON_ZERO,
1385 ip6_table_index=table_index if ip_version == u"ip6"
1386 else Constants.BITWISE_NON_ZERO,
1387 l2_table_index=table_index if ip_version == u"l2"
1388 else Constants.BITWISE_NON_ZERO,
1390 err_msg = f"Failed to enable input acl on interface {interface}"
1391 with PapiSocketExecutor(node) as papi_exec:
1392 papi_exec.add(cmd, **args).get_reply(err_msg)
1395 def get_interface_classify_table(node, interface):
1396 """Get name of classify table for the given interface.
1398 TODO: Move to Classify.py.
1400 :param node: VPP node to get data from.
1401 :param interface: Name or sw_if_index of a specific interface.
1403 :type interface: str or int
1404 :returns: Classify table name.
1407 if isinstance(interface, str):
1408 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1410 sw_if_index = interface
1412 cmd = u"classify_table_by_interface"
1414 sw_if_index=sw_if_index
1416 err_msg = f"Failed to get classify table name by interface {interface}"
1417 with PapiSocketExecutor(node) as papi_exec:
1418 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1423 def get_sw_if_index(node, interface_name):
1424 """Get sw_if_index for the given interface from actual interface dump.
1426 :param node: VPP node to get interface data from.
1427 :param interface_name: Name of the specific interface.
1429 :type interface_name: str
1430 :returns: sw_if_index of the given interface.
1433 interface_data = InterfaceUtil.vpp_get_interface_data(
1434 node, interface=interface_name
1436 return interface_data.get(u"sw_if_index")
1439 def vxlan_gpe_dump(node, interface_name=None):
1440 """Get VxLAN GPE data for the given interface.
1442 :param node: VPP node to get interface data from.
1443 :param interface_name: Name of the specific interface. If None,
1444 information about all VxLAN GPE interfaces is returned.
1446 :type interface_name: str
1447 :returns: Dictionary containing data for the given VxLAN GPE interface
1448 or if interface=None, the list of dictionaries with all VxLAN GPE
1450 :rtype: dict or list
1452 def process_vxlan_gpe_dump(vxlan_dump):
1453 """Process vxlan_gpe dump.
1455 :param vxlan_dump: Vxlan_gpe nterface dump.
1456 :type vxlan_dump: dict
1457 :returns: Processed vxlan_gpe interface dump.
1460 if vxlan_dump[u"is_ipv6"]:
1461 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
1462 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
1464 vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
1465 vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
1468 if interface_name is not None:
1469 sw_if_index = InterfaceUtil.get_interface_index(
1470 node, interface_name
1473 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1475 cmd = u"vxlan_gpe_tunnel_dump"
1477 sw_if_index=sw_if_index
1479 err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
1480 with PapiSocketExecutor(node) as papi_exec:
1481 details = papi_exec.add(cmd, **args).get_details(err_msg)
1483 data = list() if interface_name is None else dict()
1484 for dump in details:
1485 if interface_name is None:
1486 data.append(process_vxlan_gpe_dump(dump))
1487 elif dump[u"sw_if_index"] == sw_if_index:
1488 data = process_vxlan_gpe_dump(dump)
1491 logger.debug(f"VXLAN-GPE data:\n{data}")
1495 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1496 """Assign VPP interface to specific VRF/FIB table.
1498 :param node: VPP node where the FIB and interface are located.
1499 :param interface: Interface to be assigned to FIB.
1500 :param table_id: VRF table ID.
1501 :param ipv6: Assign to IPv6 table. Default False.
1503 :type interface: str or int
1507 cmd = u"sw_interface_set_table"
1509 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1511 vrf_id=int(table_id)
1513 err_msg = f"Failed to assign interface {interface} to FIB table"
1514 with PapiSocketExecutor(node) as papi_exec:
1515 papi_exec.add(cmd, **args).get_reply(err_msg)
1518 def set_linux_interface_mac(
1519 node, interface, mac, namespace=None, vf_id=None):
1520 """Set MAC address for interface in linux.
1522 :param node: Node where to execute command.
1523 :param interface: Interface in namespace.
1524 :param mac: MAC to be assigned to interface.
1525 :param namespace: Execute command in namespace. Optional
1526 :param vf_id: Virtual Function id. Optional
1528 :type interface: str
1530 :type namespace: str
1533 mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
1534 else f"address {mac}"
1535 ns_str = f"ip netns exec {namespace}" if namespace else u""
1537 cmd = f"{ns_str} ip link set {interface} {mac_str}"
1538 exec_cmd_no_error(node, cmd, sudo=True)
1541 def set_linux_interface_trust_on(
1542 node, interface, namespace=None, vf_id=None):
1543 """Set trust on (promisc) for interface in linux.
1545 :param node: Node where to execute command.
1546 :param interface: Interface in namespace.
1547 :param namespace: Execute command in namespace. Optional
1548 :param vf_id: Virtual Function id. Optional
1550 :type interface: str
1551 :type namespace: str
1554 trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
1555 ns_str = f"ip netns exec {namespace}" if namespace else u""
1557 cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
1558 exec_cmd_no_error(node, cmd, sudo=True)
1561 def set_linux_interface_spoof_off(
1562 node, interface, namespace=None, vf_id=None):
1563 """Set spoof off for interface in linux.
1565 :param node: Node where to execute command.
1566 :param interface: Interface in namespace.
1567 :param namespace: Execute command in namespace. Optional
1568 :param vf_id: Virtual Function id. Optional
1570 :type interface: str
1571 :type namespace: str
1574 spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
1576 ns_str = f"ip netns exec {namespace}" if namespace else u""
1578 cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
1579 exec_cmd_no_error(node, cmd, sudo=True)
1582 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
1583 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1584 driver testing on DUT.
1586 :param node: DUT node.
1587 :param ifc_key: Interface key from topology file.
1588 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1589 :param osi_layer: OSI Layer type to initialize TG with.
1590 Default value "L2" sets linux interface spoof off.
1594 :type osi_layer: str
1595 :returns: Virtual Function topology interface keys.
1597 :raises RuntimeError: If a reason preventing initialization is found.
1599 # Read PCI address and driver.
1600 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1601 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1602 uio_driver = Topology.get_uio_driver(node)
1603 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1604 if kernel_driver not in (u"i40e", u"i40evf"):
1606 f"AVF needs i40e-compatible driver, not {kernel_driver} "
1607 f"at node {node[u'host']} ifc {ifc_key}"
1609 current_driver = DUTSetup.get_pci_dev_driver(
1610 node, pf_pci_addr.replace(u":", r"\:"))
1612 VPPUtil.stop_vpp_service(node)
1613 if current_driver != kernel_driver:
1614 # PCI device must be re-bound to kernel driver before creating VFs.
1615 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1616 # Stop VPP to prevent deadlock.
1617 # Unbind from current driver.
1618 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1619 # Bind to kernel driver.
1620 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1622 # Initialize PCI VFs.
1623 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1626 # Set MAC address and bind each virtual function to uio driver.
1627 for vf_id in range(numvfs):
1628 vf_mac_addr = u":".join(
1629 [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
1630 pf_mac_addr[5], f"{vf_id:02x}"
1634 pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
1635 InterfaceUtil.set_linux_interface_trust_on(
1636 node, pf_dev, vf_id=vf_id
1638 if osi_layer == u"L2":
1639 InterfaceUtil.set_linux_interface_spoof_off(
1640 node, pf_dev, vf_id=vf_id
1642 InterfaceUtil.set_linux_interface_mac(
1643 node, pf_dev, vf_mac_addr, vf_id=vf_id
1646 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1647 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1649 # Add newly created ports into topology file
1650 vf_ifc_name = f"{ifc_key}_vif"
1651 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1652 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1653 Topology.update_interface_name(
1654 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
1656 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1657 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1658 Topology.set_interface_numa_node(
1659 node, vf_ifc_key, Topology.get_interface_numa_node(
1663 vf_ifc_keys.append(vf_ifc_key)
1668 def vpp_sw_interface_rx_placement_dump(node):
1669 """Dump VPP interface RX placement on node.
1671 :param node: Node to run command on.
1673 :returns: Thread mapping information as a list of dictionaries.
1676 cmd = u"sw_interface_rx_placement_dump"
1677 err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
1678 with PapiSocketExecutor(node) as papi_exec:
1679 for ifc in node[u"interfaces"].values():
1680 if ifc[u"vpp_sw_index"] is not None:
1681 papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
1682 details = papi_exec.get_details(err_msg)
1683 return sorted(details, key=lambda k: k[u"sw_if_index"])
1686 def vpp_sw_interface_set_rx_placement(
1687 node, sw_if_index, queue_id, worker_id):
1688 """Set interface RX placement to worker on node.
1690 :param node: Node to run command on.
1691 :param sw_if_index: VPP SW interface index.
1692 :param queue_id: VPP interface queue ID.
1693 :param worker_id: VPP worker ID (indexing from 0).
1695 :type sw_if_index: int
1697 :type worker_id: int
1698 :raises RuntimeError: If failed to run command on host or if no API
1701 cmd = u"sw_interface_set_rx_placement"
1702 err_msg = f"Failed to set interface RX placement to worker " \
1703 f"on host {node[u'host']}!"
1705 sw_if_index=sw_if_index,
1707 worker_id=worker_id,
1710 with PapiSocketExecutor(node) as papi_exec:
1711 papi_exec.add(cmd, **args).get_reply(err_msg)
1714 def vpp_round_robin_rx_placement(node, prefix):
1715 """Set Round Robin interface RX placement on all worker threads
1718 :param node: Topology nodes.
1719 :param prefix: Interface name prefix.
1724 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1727 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1728 for interface in node[u"interfaces"].values():
1729 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
1730 and prefix in interface[u"name"]:
1731 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1732 node, placement[u"sw_if_index"], placement[u"queue_id"],
1733 worker_id % worker_cnt
1738 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1739 """Set Round Robin interface RX placement on all worker threads
1742 :param nodes: Topology nodes.
1743 :param prefix: Interface name prefix.
1747 for node in nodes.values():
1748 if node[u"type"] == NodeType.DUT:
1749 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)