1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from time import sleep
18 from enum import IntEnum
19 from ipaddress import ip_address
20 from robot.api import logger
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.DUTSetup import DUTSetup
25 from resources.libraries.python.L2Util import L2Util
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.parsers.JsonParser import JsonParser
28 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VPPUtil import VPPUtil
33 class InterfaceStatusFlags(IntEnum):
34 """Interface status flags."""
35 IF_STATUS_API_FLAG_ADMIN_UP = 1
36 IF_STATUS_API_FLAG_LINK_UP = 2
39 class MtuProto(IntEnum):
44 MTU_PROTO_API_MPLS = 3
48 class LinkDuplex(IntEnum):
50 LINK_DUPLEX_API_UNKNOWN = 0
51 LINK_DUPLEX_API_HALF = 1
52 LINK_DUPLEX_API_FULL = 2
55 class SubInterfaceFlags(IntEnum):
56 """Sub-interface flags."""
57 SUB_IF_API_FLAG_NO_TAGS = 1
58 SUB_IF_API_FLAG_ONE_TAG = 2
59 SUB_IF_API_FLAG_TWO_TAGS = 4
60 SUB_IF_API_FLAG_DOT1AD = 8
61 SUB_IF_API_FLAG_EXACT_MATCH = 16
62 SUB_IF_API_FLAG_DEFAULT = 32
63 SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
64 SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
65 SUB_IF_API_FLAG_DOT1AH = 256
68 class RxMode(IntEnum):
70 RX_MODE_API_UNKNOWN = 0
71 RX_MODE_API_POLLING = 1
72 RX_MODE_API_INTERRUPT = 2
73 RX_MODE_API_ADAPTIVE = 3
74 RX_MODE_API_DEFAULT = 4
77 class IfType(IntEnum):
80 IF_API_TYPE_HARDWARE = 0
87 class LinkBondLoadBalanceAlgo(IntEnum):
88 """Link bonding load balance algorithm."""
89 BOND_API_LB_ALGO_L2 = 0
90 BOND_API_LB_ALGO_L34 = 1
91 BOND_API_LB_ALGO_L23 = 2
92 BOND_API_LB_ALGO_RR = 3
93 BOND_API_LB_ALGO_BC = 4
94 BOND_API_LB_ALGO_AB = 5
97 class LinkBondMode(IntEnum):
98 """Link bonding mode."""
99 BOND_API_MODE_ROUND_ROBIN = 1
100 BOND_API_MODE_ACTIVE_BACKUP = 2
101 BOND_API_MODE_XOR = 3
102 BOND_API_MODE_BROADCAST = 4
103 BOND_API_MODE_LACP = 5
106 class InterfaceUtil(object):
107 """General utilities for managing interfaces"""
109 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
112 def pci_to_int(pci_str):
113 """Convert PCI address from string format (0000:18:0a.0) to
114 integer representation (169345024).
116 :param pci_str: PCI address in string representation.
118 :returns: Integer representation of PCI address.
121 pci = list(pci_str.split(':')[0:2])
122 pci.extend(pci_str.split(':')[2].split('.'))
124 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
125 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
128 def get_interface_index(node, interface):
129 """Get interface sw_if_index from topology file.
131 :param node: Node where the interface is.
132 :param interface: Numeric index or name string of a specific interface.
134 :type interface: str or int
135 :returns: SW interface index.
139 sw_if_index = int(interface)
141 sw_if_index = Topology.get_interface_sw_index(node, interface)
142 if sw_if_index is None:
144 Topology.get_interface_sw_index_by_name(node, interface)
145 except TypeError as err:
146 raise TypeError('Wrong interface format {ifc}: {err}'.format(
147 ifc=interface, err=err.message))
152 def set_interface_state(node, interface, state, if_type='key'):
153 """Set interface state on a node.
155 Function can be used for DUTs as well as for TGs.
157 :param node: Node where the interface is.
158 :param interface: Interface key or sw_if_index or name.
159 :param state: One of 'up' or 'down'.
160 :param if_type: Interface type
162 :type interface: str or int
166 :raises ValueError: If the interface type is unknown.
167 :raises ValueError: If the state of interface is unexpected.
168 :raises ValueError: If the node has an unknown node type.
171 if isinstance(interface, basestring):
172 sw_if_index = Topology.get_interface_sw_index(node, interface)
173 iface_name = Topology.get_interface_name(node, interface)
175 sw_if_index = interface
176 elif if_type == 'name':
177 iface_key = Topology.get_interface_by_name(node, interface)
178 if iface_key is not None:
179 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
180 iface_name = interface
182 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
184 if node['type'] == NodeType.DUT:
186 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
187 elif state == 'down':
190 raise ValueError('Unexpected interface state: {state}'.format(
192 cmd = 'sw_interface_set_flags'
193 err_msg = 'Failed to set interface state on host {host}'.format(
196 sw_if_index=sw_if_index,
198 with PapiSocketExecutor(node) as papi_exec:
199 papi_exec.add(cmd, **args).get_reply(err_msg)
200 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
201 cmd = 'ip link set {ifc} {state}'.format(
202 ifc=iface_name, state=state)
203 exec_cmd_no_error(node, cmd, sudo=True)
205 raise ValueError('Node {} has unknown NodeType: "{}"'
206 .format(node['host'], node['type']))
209 def set_interface_ethernet_mtu(node, iface_key, mtu):
210 """Set Ethernet MTU for specified interface.
212 Function can be used only for TGs.
214 :param node: Node where the interface is.
215 :param iface_key: Interface key from topology file.
216 :param mtu: MTU to set.
221 :raises ValueError: If the node type is "DUT".
222 :raises ValueError: If the node has an unknown node type.
224 if node['type'] == NodeType.DUT:
225 raise ValueError('Node {}: Setting Ethernet MTU for interface '
226 'on DUT nodes not supported', node['host'])
227 elif node['type'] == NodeType.TG:
228 iface_name = Topology.get_interface_name(node, iface_key)
229 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
230 exec_cmd_no_error(node, cmd, sudo=True)
232 raise ValueError('Node {} has unknown NodeType: "{}"'
233 .format(node['host'], node['type']))
236 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
237 """Set default Ethernet MTU on all interfaces on node.
239 Function can be used only for TGs.
241 :param node: Node where to set default MTU.
245 for ifc in node['interfaces']:
246 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
249 def vpp_set_interface_mtu(node, interface, mtu=9200):
250 """Set Ethernet MTU on interface.
252 :param node: VPP node.
253 :param interface: Interface to setup MTU. Default: 9200.
254 :param mtu: Ethernet MTU size in Bytes.
256 :type interface: str or int
259 if isinstance(interface, basestring):
260 sw_if_index = Topology.get_interface_sw_index(node, interface)
262 sw_if_index = interface
264 cmd = 'hw_interface_set_mtu'
265 err_msg = 'Failed to set interface MTU on host {host}'.format(
267 args = dict(sw_if_index=sw_if_index,
270 with PapiSocketExecutor(node) as papi_exec:
271 papi_exec.add(cmd, **args).get_reply(err_msg)
272 except AssertionError as err:
273 # TODO: Make failure tolerance optional.
274 logger.debug("Setting MTU failed. Expected?\n{err}".format(
278 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
279 """Set Ethernet MTU on all interfaces.
281 :param node: VPP node.
282 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
286 for interface in node['interfaces']:
287 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
290 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
291 """Set Ethernet MTU on all interfaces on all DUTs.
293 :param nodes: VPP nodes.
294 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
298 for node in nodes.values():
299 if node['type'] == NodeType.DUT:
300 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
303 def vpp_node_interfaces_ready_wait(node, retries=15):
304 """Wait until all interfaces with admin-up are in link-up state.
306 :param node: Node to wait on.
307 :param retries: Number of retries to check interface status (optional,
312 :raises RuntimeError: If any interface is not in link-up state after
313 defined number of retries.
315 for _ in xrange(0, retries):
317 out = InterfaceUtil.vpp_get_interface_data(node)
318 for interface in out:
319 if interface.get('flags') == 1:
320 not_ready.append(interface.get('interface_name'))
324 logger.debug('Interfaces still not in link-up state:\n{ifs} '
325 '\nWaiting...'.format(ifs=not_ready))
328 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
329 if 'not_ready' in locals() else 'No check executed!'
330 raise RuntimeError(err)
333 def all_vpp_interfaces_ready_wait(nodes, retries=15):
334 """Wait until all interfaces with admin-up are in link-up state for all
335 nodes in the topology.
337 :param nodes: Nodes in the topology.
338 :param retries: Number of retries to check interface status (optional,
344 for node in nodes.values():
345 if node['type'] == NodeType.DUT:
346 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
349 def vpp_get_interface_data(node, interface=None):
350 """Get all interface data from a VPP node. If a name or
351 sw_interface_index is provided, return only data for the matching
354 :param node: VPP node to get interface data from.
355 :param interface: Numeric index or name string of a specific interface.
357 :type interface: int or str
358 :returns: List of dictionaries containing data for each interface, or a
359 single dictionary for the specified interface.
361 :raises TypeError: if the data type of interface is neither basestring
364 def process_if_dump(if_dump):
365 """Process interface dump.
367 :param if_dump: Interface dump.
369 :returns: Processed interface dump.
372 if_dump['l2_address'] = str(if_dump['l2_address'])
373 if_dump['b_dmac'] = str(if_dump['b_dmac'])
374 if_dump['b_smac'] = str(if_dump['b_smac'])
375 if_dump['flags'] = if_dump['flags'].value
376 if_dump['type'] = if_dump['type'].value
377 if_dump['link_duplex'] = if_dump['link_duplex'].value
378 if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \
379 if hasattr(if_dump['sub_if_flags'], 'value') \
380 else int(if_dump['sub_if_flags'])
384 if interface is not None:
385 if isinstance(interface, basestring):
386 param = 'interface_name'
387 elif isinstance(interface, int):
388 param = 'sw_if_index'
390 raise TypeError('Wrong interface format {ifc}'.format(
395 cmd = 'sw_interface_dump'
397 name_filter_valid=False,
400 err_msg = 'Failed to get interface dump on host {host}'.format(
402 with PapiSocketExecutor(node) as papi_exec:
403 details = papi_exec.add(cmd, **args).get_details(err_msg)
404 logger.debug('Received data:\n{d!r}'.format(d=details))
406 data = list() if interface is None else dict()
408 if interface is None:
409 data.append(process_if_dump(dump))
410 elif str(dump.get(param)).rstrip('\x00') == str(interface):
411 data = process_if_dump(dump)
414 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
418 def vpp_get_interface_name(node, sw_if_index):
419 """Get interface name for the given SW interface index from actual
422 :param node: VPP node to get interface data from.
423 :param sw_if_index: SW interface index of the specific interface.
425 :type sw_if_index: int
426 :returns: Name of the given interface.
429 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
430 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
431 if_data = InterfaceUtil.vpp_get_interface_data(
432 node, if_data['sup_sw_if_index'])
434 return if_data.get('interface_name')
437 def vpp_get_interface_sw_index(node, interface_name):
438 """Get interface name for the given SW interface index from actual
441 :param node: VPP node to get interface data from.
442 :param interface_name: Interface name.
444 :type interface_name: str
445 :returns: Name of the given interface.
448 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
450 return if_data.get('sw_if_index')
453 def vpp_get_interface_mac(node, interface):
454 """Get MAC address for the given interface from actual interface dump.
456 :param node: VPP node to get interface data from.
457 :param interface: Numeric index or name string of a specific interface.
459 :type interface: int or str
460 :returns: MAC address.
463 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
464 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
465 if_data = InterfaceUtil.vpp_get_interface_data(
466 node, if_data['sup_sw_if_index'])
468 return if_data.get('l2_address')
471 def tg_set_interface_driver(node, pci_addr, driver):
472 """Set interface driver on the TG node.
474 :param node: Node to set interface driver on (must be TG node).
475 :param pci_addr: PCI address of the interface.
476 :param driver: Driver name.
480 :raises RuntimeError: If unbinding from the current driver fails.
481 :raises RuntimeError: If binding to the new driver fails.
483 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
484 if old_driver == driver:
490 # Unbind from current driver
491 if old_driver is not None:
492 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
493 .format(pci_addr, old_driver)
494 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
495 if int(ret_code) != 0:
496 raise RuntimeError("'{0}' failed on '{1}'"
497 .format(cmd, node['host']))
499 # Bind to the new driver
500 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
501 .format(pci_addr, driver)
502 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
503 if int(ret_code) != 0:
504 raise RuntimeError("'{0}' failed on '{1}'"
505 .format(cmd, node['host']))
508 def tg_get_interface_driver(node, pci_addr):
509 """Get interface driver from the TG node.
511 :param node: Node to get interface driver on (must be TG node).
512 :param pci_addr: PCI address of the interface.
515 :returns: Interface driver or None if not found.
517 :raises RuntimeError: If PCI rescan or lspci command execution failed.
519 return DUTSetup.get_pci_dev_driver(node, pci_addr)
522 def tg_set_interfaces_udev_rules(node):
523 """Set udev rules for interfaces.
525 Create udev rules file in /etc/udev/rules.d where are rules for each
526 interface used by TG node, based on MAC interface has specific name.
527 So after unbind and bind again to kernel driver interface has same
528 name as before. This must be called after TG has set name for each
529 port in topology dictionary.
531 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
534 :param node: Node to set udev rules on (must be TG node).
536 :raises RuntimeError: If setting of udev rules fails.
541 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
542 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
543 if int(ret_code) != 0:
544 raise RuntimeError("'{0}' failed on '{1}'"
545 .format(cmd, node['host']))
547 for interface in node['interfaces'].values():
548 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
549 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
550 interface['name'] + '\\"'
551 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
552 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
553 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
554 if int(ret_code) != 0:
555 raise RuntimeError("'{0}' failed on '{1}'"
556 .format(cmd, node['host']))
558 cmd = '/etc/init.d/udev restart'
559 ssh.exec_command_sudo(cmd)
562 def tg_set_interfaces_default_driver(node):
563 """Set interfaces default driver specified in topology yaml file.
565 :param node: Node to setup interfaces driver on (must be TG node).
568 for interface in node['interfaces'].values():
569 InterfaceUtil.tg_set_interface_driver(node,
570 interface['pci_address'],
574 def update_vpp_interface_data_on_node(node):
575 """Update vpp generated interface data for a given node in DICT__nodes.
577 Updates interface names, software if index numbers and any other details
578 generated specifically by vpp that are unknown before testcase run.
579 It does this by dumping interface list from all devices using python
580 api, and pairing known information from topology (mac address) to state
583 :param node: Node selected from DICT__nodes.
586 interface_list = InterfaceUtil.vpp_get_interface_data(node)
587 interface_dict = dict()
588 for ifc in interface_list:
589 interface_dict[ifc['l2_address']] = ifc
591 for if_name, if_data in node['interfaces'].items():
592 ifc_dict = interface_dict.get(if_data['mac_address'])
593 if ifc_dict is not None:
594 if_data['name'] = ifc_dict['interface_name']
595 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
596 if_data['mtu'] = ifc_dict['mtu'][0]
597 logger.trace('Interface {ifc} found by MAC {mac}'.format(
598 ifc=if_name, mac=if_data['mac_address']))
600 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
601 ifc=if_name, mac=if_data['mac_address']))
602 if_data['vpp_sw_index'] = None
605 def update_nic_interface_names(node):
606 """Update interface names based on nic type and PCI address.
608 This method updates interface names in the same format as VPP does.
610 :param node: Node dictionary.
613 for ifc in node['interfaces'].values():
614 if_pci = ifc['pci_address'].replace('.', ':').split(':')
615 bus = '{:x}'.format(int(if_pci[1], 16))
616 dev = '{:x}'.format(int(if_pci[2], 16))
617 fun = '{:x}'.format(int(if_pci[3], 16))
618 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
619 if ifc['model'] == 'Intel-XL710':
620 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
621 elif ifc['model'] == 'Intel-X710':
622 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
623 elif ifc['model'] == 'Intel-X520-DA2':
624 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
625 elif ifc['model'] == 'Cisco-VIC-1385':
626 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
627 elif ifc['model'] == 'Cisco-VIC-1227':
628 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
630 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
633 def update_nic_interface_names_on_all_duts(nodes):
634 """Update interface names based on nic type and PCI address on all DUTs.
636 This method updates interface names in the same format as VPP does.
638 :param nodes: Topology nodes.
641 for node in nodes.values():
642 if node['type'] == NodeType.DUT:
643 InterfaceUtil.update_nic_interface_names(node)
646 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
647 """Update interface name for TG/linux node in DICT__nodes.
650 # for dev in `ls /sys/class/net/`;
651 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
652 "52:54:00:9f:82:63": "eth0"
653 "52:54:00:77:ae:a9": "eth1"
654 "52:54:00:e1:8a:0f": "eth2"
655 "00:00:00:00:00:00": "lo"
657 :param node: Node selected from DICT__nodes.
658 :param skip_tg_udev: Skip udev rename on TG node.
660 :type skip_tg_udev: bool
661 :raises RuntimeError: If getting of interface name and MAC fails.
663 # First setup interface driver specified in yaml file
664 InterfaceUtil.tg_set_interfaces_default_driver(node)
666 # Get interface names
670 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
671 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
673 (ret_code, stdout, _) = ssh.exec_command(cmd)
674 if int(ret_code) != 0:
675 raise RuntimeError('Get interface name and MAC failed')
676 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
677 interfaces = JsonParser().parse_data(tmp)
678 for interface in node['interfaces'].values():
679 name = interfaces.get(interface['mac_address'])
682 interface['name'] = name
684 # Set udev rules for interfaces
686 InterfaceUtil.tg_set_interfaces_udev_rules(node)
689 def iface_update_numa_node(node):
690 """For all interfaces from topology file update numa node based on
691 information from the node.
693 :param node: Node from topology.
696 :raises ValueError: If numa node ia less than 0.
697 :raises RuntimeError: If update of numa node failes.
700 for if_key in Topology.get_node_interfaces(node):
701 if_pci = Topology.get_interface_pci_addr(node, if_key)
703 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
705 (ret, out, _) = ssh.exec_command(cmd)
710 if CpuUtils.cpu_node_count(node) == 1:
715 logger.trace('Reading numa location failed for: {0}'
718 Topology.set_interface_numa_node(node, if_key,
722 raise RuntimeError('Update numa node failed for: {0}'
726 def update_all_numa_nodes(nodes, skip_tg=False):
727 """For all nodes and all their interfaces from topology file update numa
728 node information based on information from the node.
730 :param nodes: Nodes in the topology.
731 :param skip_tg: Skip TG node
736 for node in nodes.values():
737 if node['type'] == NodeType.DUT:
738 InterfaceUtil.iface_update_numa_node(node)
739 elif node['type'] == NodeType.TG and not skip_tg:
740 InterfaceUtil.iface_update_numa_node(node)
743 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
746 """Update interface names on all nodes in DICT__nodes.
748 This method updates the topology dictionary by querying interface lists
749 of all nodes mentioned in the topology dictionary.
751 :param nodes: Nodes in the topology.
752 :param skip_tg: Skip TG node.
753 :param skip_tg_udev: Skip udev rename on TG node.
754 :param numa_node: Retrieve numa_node location.
757 :type skip_tg_udev: bool
758 :type numa_node: bool
760 for node_data in nodes.values():
761 if node_data['type'] == NodeType.DUT:
762 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
763 elif node_data['type'] == NodeType.TG and not skip_tg:
764 InterfaceUtil.update_tg_interface_data_on_node(
765 node_data, skip_tg_udev)
768 if node_data['type'] == NodeType.DUT:
769 InterfaceUtil.iface_update_numa_node(node_data)
770 elif node_data['type'] == NodeType.TG and not skip_tg:
771 InterfaceUtil.iface_update_numa_node(node_data)
774 def create_vlan_subinterface(node, interface, vlan):
775 """Create VLAN sub-interface on node.
777 :param node: Node to add VLAN subinterface on.
778 :param interface: Interface name or index on which create VLAN
780 :param vlan: VLAN ID of the subinterface to be created.
782 :type interface: str on int
784 :returns: Name and index of created subinterface.
786 :raises RuntimeError: if it is unable to create VLAN subinterface on the
787 node or interface cannot be converted.
789 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
791 cmd = 'create_vlan_subif'
793 sw_if_index=sw_if_index,
796 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
798 with PapiSocketExecutor(node) as papi_exec:
799 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
801 if_key = Topology.add_new_port(node, 'vlan_subif')
802 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
803 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
804 Topology.update_interface_name(node, if_key, ifc_name)
806 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_index
809 def create_vxlan_interface(node, vni, source_ip, destination_ip):
810 """Create VXLAN interface and return sw if index of created interface.
812 :param node: Node where to create VXLAN interface.
813 :param vni: VXLAN Network Identifier.
814 :param source_ip: Source IP of a VXLAN Tunnel End Point.
815 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
819 :type destination_ip: str
820 :returns: SW IF INDEX of created interface.
822 :raises RuntimeError: if it is unable to create VxLAN interface on the
825 src_address = ip_address(unicode(source_ip))
826 dst_address = ip_address(unicode(destination_ip))
828 cmd = 'vxlan_add_del_tunnel'
829 args = dict(is_add=1,
830 is_ipv6=1 if src_address.version == 6 else 0,
831 instance=Constants.BITWISE_NON_ZERO,
832 src_address=src_address.packed,
833 dst_address=dst_address.packed,
834 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
836 decap_next_index=Constants.BITWISE_NON_ZERO,
838 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
839 format(host=node['host'])
840 with PapiSocketExecutor(node) as papi_exec:
841 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
843 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
844 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
845 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
846 Topology.update_interface_name(node, if_key, ifc_name)
851 def set_vxlan_bypass(node, interface=None):
852 """Add the 'ip4-vxlan-bypass' graph node for a given interface.
854 By adding the IPv4 vxlan-bypass graph node to an interface, the node
855 checks for and validate input vxlan packet and bypass ip4-lookup,
856 ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
857 This node will cause extra overhead to for non-vxlan packets which is
860 :param node: Node where to set VXLAN bypass.
861 :param interface: Numeric index or name string of a specific interface.
863 :type interface: int or str
864 :raises RuntimeError: if it failed to set VXLAN bypass on interface.
866 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
868 cmd = 'sw_interface_set_vxlan_bypass'
869 args = dict(is_ipv6=0,
870 sw_if_index=sw_if_index,
872 err_msg = 'Failed to set VXLAN bypass on interface on host {host}'.\
873 format(host=node['host'])
874 with PapiSocketExecutor(node) as papi_exec:
875 papi_exec.add(cmd, **args).get_replies(err_msg)
878 def vxlan_dump(node, interface=None):
879 """Get VxLAN data for the given interface.
881 :param node: VPP node to get interface data from.
882 :param interface: Numeric index or name string of a specific interface.
883 If None, information about all VxLAN interfaces is returned.
885 :type interface: int or str
886 :returns: Dictionary containing data for the given VxLAN interface or if
887 interface=None, the list of dictionaries with all VxLAN interfaces.
889 :raises TypeError: if the data type of interface is neither basestring
892 def process_vxlan_dump(vxlan_dump):
893 """Process vxlan dump.
895 :param vxlan_dump: Vxlan interface dump.
896 :type vxlan_dump: dict
897 :returns: Processed vxlan interface dump.
900 if vxlan_dump['is_ipv6']:
901 vxlan_dump['src_address'] = \
902 ip_address(unicode(vxlan_dump['src_address']))
903 vxlan_dump['dst_address'] = \
904 ip_address(unicode(vxlan_dump['dst_address']))
906 vxlan_dump['src_address'] = \
907 ip_address(unicode(vxlan_dump['src_address'][0:4]))
908 vxlan_dump['dst_address'] = \
909 ip_address(unicode(vxlan_dump['dst_address'][0:4]))
912 if interface is not None:
913 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
915 sw_if_index = int(Constants.BITWISE_NON_ZERO)
917 cmd = 'vxlan_tunnel_dump'
918 args = dict(sw_if_index=sw_if_index)
919 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
921 with PapiSocketExecutor(node) as papi_exec:
922 details = papi_exec.add(cmd, **args).get_details(err_msg)
924 data = list() if interface is None else dict()
926 if interface is None:
927 data.append(process_vxlan_dump(dump))
928 elif dump['sw_if_index'] == sw_if_index:
929 data = process_vxlan_dump(dump)
932 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
936 def vhost_user_dump(node):
937 """Get vhost-user data for the given node.
939 TODO: Move to VhostUser.py
941 :param node: VPP node to get interface data from.
943 :returns: List of dictionaries with all vhost-user interfaces.
946 def process_vhost_dump(vhost_dump):
947 """Process vhost dump.
949 :param vhost_dump: Vhost interface dump.
950 :type vhost_dump: dict
951 :returns: Processed vhost interface dump.
954 vhost_dump['interface_name'] = \
955 vhost_dump['interface_name'].rstrip('\x00')
956 vhost_dump['sock_filename'] = \
957 vhost_dump['sock_filename'].rstrip('\x00')
960 cmd = 'sw_interface_vhost_user_dump'
961 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
963 with PapiSocketExecutor(node) as papi_exec:
964 details = papi_exec.add(cmd).get_details(err_msg)
968 process_vhost_dump(dump)
970 logger.debug('Vhost-user details:\n{vhost_details}'.format(
971 vhost_details=details))
975 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
976 inner_vlan_id=None, type_subif=None):
977 """Create sub-interface on node. It is possible to set required
978 sub-interface type and VLAN tag(s).
980 :param node: Node to add sub-interface.
981 :param interface: Interface name on which create sub-interface.
982 :param sub_id: ID of the sub-interface to be created.
983 :param outer_vlan_id: Optional outer VLAN ID.
984 :param inner_vlan_id: Optional inner VLAN ID.
985 :param type_subif: Optional type of sub-interface. Values supported by
986 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
989 :type interface: str or int
991 :type outer_vlan_id: int
992 :type inner_vlan_id: int
993 :type type_subif: str
994 :returns: Name and index of created sub-interface.
996 :raises RuntimeError: If it is not possible to create sub-interface.
998 subif_types = type_subif.split()
1001 if 'no_tags' in subif_types:
1002 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
1003 if 'one_tag' in subif_types:
1004 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
1005 if 'two_tags' in subif_types:
1006 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
1007 if 'dot1ad' in subif_types:
1008 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
1009 if 'exact_match' in subif_types:
1010 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
1011 if 'default_sub' in subif_types:
1012 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
1013 if type_subif == 'default_sub':
1014 flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
1015 | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
1017 cmd = 'create_subif'
1019 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1021 sub_if_flags=flags.value if hasattr(flags, 'value') else int(flags),
1022 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1023 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
1025 err_msg = 'Failed to create sub-interface on host {host}'.format(
1027 with PapiSocketExecutor(node) as papi_exec:
1028 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1030 if_key = Topology.add_new_port(node, 'subinterface')
1031 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1032 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1033 Topology.update_interface_name(node, if_key, ifc_name)
1035 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_if_index
1038 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1039 """Create GRE tunnel interface on node.
1041 :param node: VPP node to add tunnel interface.
1042 :param source_ip: Source of the GRE tunnel.
1043 :param destination_ip: Destination of the GRE tunnel.
1045 :type source_ip: str
1046 :type destination_ip: str
1047 :returns: Name and index of created GRE tunnel interface.
1049 :raises RuntimeError: If unable to create GRE tunnel interface.
1051 cmd = 'gre_tunnel_add_del'
1052 tunnel = dict(type=0,
1053 instance=Constants.BITWISE_NON_ZERO,
1055 dst=str(destination_ip),
1058 args = dict(is_add=1,
1060 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1062 with PapiSocketExecutor(node) as papi_exec:
1063 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1065 if_key = Topology.add_new_port(node, 'gre_tunnel')
1066 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1067 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1068 Topology.update_interface_name(node, if_key, ifc_name)
1070 return ifc_name, sw_if_index
1073 def vpp_create_loopback(node, mac=None):
1074 """Create loopback interface on VPP node.
1076 :param node: Node to create loopback interface on.
1077 :param mac: Optional MAC address for loopback interface.
1080 :returns: SW interface index.
1082 :raises RuntimeError: If it is not possible to create loopback on the
1085 cmd = 'create_loopback'
1086 args = dict(mac_address=L2Util.mac_to_bin(mac) if mac else 0)
1087 err_msg = 'Failed to create loopback interface on host {host}'.format(
1089 with PapiSocketExecutor(node) as papi_exec:
1090 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1092 if_key = Topology.add_new_port(node, 'loopback')
1093 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1094 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1095 Topology.update_interface_name(node, if_key, ifc_name)
1097 mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
1098 Topology.update_interface_mac_address(node, if_key, mac)
1103 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1104 """Create bond interface on VPP node.
1106 :param node: DUT node from topology.
1107 :param mode: Link bonding mode.
1108 :param load_balance: Load balance (optional, valid for xor and lacp
1109 modes, otherwise ignored).
1110 :param mac: MAC address to assign to the bond interface (optional).
1113 :type load_balance: str
1115 :returns: Interface key (name) in topology.
1117 :raises RuntimeError: If it is not possible to create bond interface on
1122 id=int(Constants.BITWISE_NON_ZERO),
1123 use_custom_mac=False if mac is None else True,
1124 mac_address=L2Util.mac_to_bin(mac) if mac else None,
1125 mode=getattr(LinkBondMode, 'BOND_API_MODE_{md}'.format(
1126 md=mode.replace('-', '_').upper())).value,
1127 lb=0 if load_balance is None else getattr(
1128 LinkBondLoadBalanceAlgo, 'BOND_API_LB_ALGO_{lb}'.format(
1129 lb=load_balance.upper())).value,
1132 err_msg = 'Failed to create bond interface on host {host}'.format(
1134 with PapiSocketExecutor(node) as papi_exec:
1135 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1137 InterfaceUtil.add_eth_interface(
1138 node, sw_if_index=sw_if_index, ifc_pfx='eth_bond')
1139 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1144 def add_eth_interface(node, ifc_name=None, sw_if_index=None, ifc_pfx=None):
1145 """Add ethernet interface to current topology.
1147 :param node: DUT node from topology.
1148 :param ifc_name: Name of the interface.
1149 :param sw_if_index: SW interface index.
1150 :param ifc_pfx: Interface key prefix.
1153 :type sw_if_index: int
1156 if_key = Topology.add_new_port(node, ifc_pfx)
1158 if ifc_name and sw_if_index is None:
1159 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1161 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1162 if sw_if_index and ifc_name is None:
1163 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1164 Topology.update_interface_name(node, if_key, ifc_name)
1165 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1166 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1169 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1170 """Create AVF interface on VPP node.
1172 :param node: DUT node from topology.
1173 :param vf_pci_addr: Virtual Function PCI address.
1174 :param num_rx_queues: Number of RX queues.
1176 :type vf_pci_addr: str
1177 :type num_rx_queues: int
1178 :returns: Interface key (name) in topology.
1180 :raises RuntimeError: If it is not possible to create AVF interface on
1184 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1186 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1189 err_msg = 'Failed to create AVF interface on host {host}'.format(
1191 with PapiSocketExecutor(node) as papi_exec:
1192 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1194 InterfaceUtil.add_eth_interface(node, sw_if_index=sw_if_index,
1196 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1201 def vpp_enslave_physical_interface(node, interface, bond_if):
1202 """Enslave physical interface to bond interface on VPP node.
1204 :param node: DUT node from topology.
1205 :param interface: Physical interface key from topology file.
1206 :param bond_if: Load balance
1208 :type interface: str
1210 :raises RuntimeError: If it is not possible to enslave physical
1211 interface to bond interface on the node.
1213 cmd = 'bond_enslave'
1215 sw_if_index=Topology.get_interface_sw_index(node, interface),
1216 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1218 is_long_timeout=False
1220 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1221 'interface {bond} on host {host}'.format(ifc=interface,
1224 with PapiSocketExecutor(node) as papi_exec:
1225 papi_exec.add(cmd, **args).get_reply(err_msg)
1228 def vpp_show_bond_data_on_node(node, verbose=False):
1229 """Show (detailed) bond information on VPP node.
1231 :param node: DUT node from topology.
1232 :param verbose: If detailed information is required or not.
1236 cmd = 'sw_interface_bond_dump'
1237 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1240 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1241 with PapiSocketExecutor(node) as papi_exec:
1242 details = papi_exec.add(cmd).get_details(err_msg)
1244 for bond in details:
1245 data += ('{b}\n'.format(b=bond['interface_name']))
1246 data += (' mode: {m}\n'.format(
1247 m=bond['mode'].name.replace('BOND_API_MODE_', '').lower()))
1248 data += (' load balance: {lb}\n'.format(
1249 lb=bond['lb'].name.replace('BOND_API_LB_ALGO_', '').lower()))
1250 data += (' number of active slaves: {n}\n'.format(
1251 n=bond['active_slaves']))
1253 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1254 node, Topology.get_interface_by_sw_index(
1255 node, bond['sw_if_index']))
1256 for slave in slave_data:
1257 if not slave['is_passive']:
1258 data += (' {s}\n'.format(s=slave['interface_name']))
1259 data += (' number of slaves: {n}\n'.format(n=bond['slaves']))
1261 for slave in slave_data:
1262 data += (' {s}\n'.format(s=slave['interface_name']))
1263 data += (' interface id: {i}\n'.format(i=bond['id']))
1264 data += (' sw_if_index: {i}\n'.format(i=bond['sw_if_index']))
1268 def vpp_bond_slave_dump(node, interface):
1269 """Get bond interface slave(s) data on VPP node.
1271 :param node: DUT node from topology.
1272 :param interface: Physical interface key from topology file.
1274 :type interface: str
1275 :returns: Bond slave interface data.
1278 cmd = 'sw_interface_slave_dump'
1279 args = dict(sw_if_index=Topology.get_interface_sw_index(
1281 err_msg = 'Failed to get slave dump on host {host}'.format(
1284 with PapiSocketExecutor(node) as papi_exec:
1285 details = papi_exec.add(cmd, **args).get_details(err_msg)
1287 logger.debug('Slave data:\n{slave_data}'.format(slave_data=details))
1291 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1292 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1294 :param nodes: Nodes in the topology.
1295 :param verbose: If detailed information is required or not.
1299 for node_data in nodes.values():
1300 if node_data['type'] == NodeType.DUT:
1301 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1304 def vpp_enable_input_acl_interface(node, interface, ip_version,
1306 """Enable input acl on interface.
1308 :param node: VPP node to setup interface for input acl.
1309 :param interface: Interface to setup input acl.
1310 :param ip_version: Version of IP protocol.
1311 :param table_index: Classify table index.
1313 :type interface: str or int
1314 :type ip_version: str
1315 :type table_index: int
1317 cmd = 'input_acl_set_interface'
1319 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1320 ip4_table_index=table_index if ip_version == 'ip4'
1321 else Constants.BITWISE_NON_ZERO,
1322 ip6_table_index=table_index if ip_version == 'ip6'
1323 else Constants.BITWISE_NON_ZERO,
1324 l2_table_index=table_index if ip_version == 'l2'
1325 else Constants.BITWISE_NON_ZERO,
1327 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1329 with PapiSocketExecutor(node) as papi_exec:
1330 papi_exec.add(cmd, **args).get_reply(err_msg)
1333 def get_interface_classify_table(node, interface):
1334 """Get name of classify table for the given interface.
1336 TODO: Move to Classify.py.
1338 :param node: VPP node to get data from.
1339 :param interface: Name or sw_if_index of a specific interface.
1341 :type interface: str or int
1342 :returns: Classify table name.
1345 if isinstance(interface, basestring):
1346 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1348 sw_if_index = interface
1350 cmd = 'classify_table_by_interface'
1351 args = dict(sw_if_index=sw_if_index)
1352 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1354 with PapiSocketExecutor(node) as papi_exec:
1355 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1360 def get_sw_if_index(node, interface_name):
1361 """Get sw_if_index for the given interface from actual interface dump.
1363 :param node: VPP node to get interface data from.
1364 :param interface_name: Name of the specific interface.
1366 :type interface_name: str
1367 :returns: sw_if_index of the given interface.
1370 interface_data = InterfaceUtil.vpp_get_interface_data(
1371 node, interface=interface_name)
1372 return interface_data.get('sw_if_index')
1375 def vxlan_gpe_dump(node, interface_name=None):
1376 """Get VxLAN GPE data for the given interface.
1378 :param node: VPP node to get interface data from.
1379 :param interface_name: Name of the specific interface. If None,
1380 information about all VxLAN GPE interfaces is returned.
1382 :type interface_name: str
1383 :returns: Dictionary containing data for the given VxLAN GPE interface
1384 or if interface=None, the list of dictionaries with all VxLAN GPE
1386 :rtype: dict or list
1388 def process_vxlan_gpe_dump(vxlan_dump):
1389 """Process vxlan_gpe dump.
1391 :param vxlan_dump: Vxlan_gpe nterface dump.
1392 :type vxlan_dump: dict
1393 :returns: Processed vxlan_gpe interface dump.
1396 if vxlan_dump['is_ipv6']:
1397 vxlan_dump['local'] = \
1398 ip_address(unicode(vxlan_dump['local']))
1399 vxlan_dump['remote'] = \
1400 ip_address(unicode(vxlan_dump['remote']))
1402 vxlan_dump['local'] = \
1403 ip_address(unicode(vxlan_dump['local'][0:4]))
1404 vxlan_dump['remote'] = \
1405 ip_address(unicode(vxlan_dump['remote'][0:4]))
1408 if interface_name is not None:
1409 sw_if_index = InterfaceUtil.get_interface_index(
1410 node, interface_name)
1412 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1414 cmd = 'vxlan_gpe_tunnel_dump'
1415 args = dict(sw_if_index=sw_if_index)
1416 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1418 with PapiSocketExecutor(node) as papi_exec:
1419 details = papi_exec.add(cmd, **args).get_details(err_msg)
1421 data = list() if interface_name is None else dict()
1422 for dump in details:
1423 if interface_name is None:
1424 data.append(process_vxlan_gpe_dump(dump))
1425 elif dump['sw_if_index'] == sw_if_index:
1426 data = process_vxlan_gpe_dump(dump)
1429 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1430 vxlan_gpe_data=data))
1434 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1435 """Assign VPP interface to specific VRF/FIB table.
1437 :param node: VPP node where the FIB and interface are located.
1438 :param interface: Interface to be assigned to FIB.
1439 :param table_id: VRF table ID.
1440 :param ipv6: Assign to IPv6 table. Default False.
1442 :type interface: str or int
1446 cmd = 'sw_interface_set_table'
1448 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1450 vrf_id=int(table_id))
1451 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1453 with PapiSocketExecutor(node) as papi_exec:
1454 papi_exec.add(cmd, **args).get_reply(err_msg)
1457 def set_linux_interface_mac(node, interface, mac, namespace=None,
1459 """Set MAC address for interface in linux.
1461 :param node: Node where to execute command.
1462 :param interface: Interface in namespace.
1463 :param mac: MAC to be assigned to interface.
1464 :param namespace: Execute command in namespace. Optional
1465 :param vf_id: Virtual Function id. Optional
1467 :type interface: str
1469 :type namespace: str
1472 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1473 if vf_id is not None else 'address {mac}'.format(mac=mac)
1474 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1476 cmd = ('{ns} ip link set {interface} {mac}'.
1477 format(ns=ns_str, interface=interface, mac=mac_str))
1478 exec_cmd_no_error(node, cmd, sudo=True)
1481 def set_linux_interface_trust_on(node, interface, namespace=None,
1483 """Set trust on (promisc) for interface in linux.
1485 :param node: Node where to execute command.
1486 :param interface: Interface in namespace.
1487 :param namespace: Execute command in namespace. Optional
1488 :param vf_id: Virtual Function id. Optional
1490 :type interface: str
1491 :type namespace: str
1494 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1495 if vf_id is not None else 'trust on'
1496 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1498 cmd = ('{ns} ip link set dev {interface} {trust}'.
1499 format(ns=ns_str, interface=interface, trust=trust_str))
1500 exec_cmd_no_error(node, cmd, sudo=True)
1503 def set_linux_interface_spoof_off(node, interface, namespace=None,
1505 """Set spoof off for interface in linux.
1507 :param node: Node where to execute command.
1508 :param interface: Interface in namespace.
1509 :param namespace: Execute command in namespace. Optional
1510 :param vf_id: Virtual Function id. Optional
1512 :type interface: str
1513 :type namespace: str
1516 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1517 if vf_id is not None else 'spoof off'
1518 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1520 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1521 format(ns=ns_str, interface=interface, spoof=spoof_str))
1522 exec_cmd_no_error(node, cmd, sudo=True)
1525 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1526 """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
1527 driver testing on DUT.
1529 :param node: DUT node.
1530 :param ifc_key: Interface key from topology file.
1531 :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
1532 :param osi_layer: OSI Layer type to initialize TG with.
1533 Default value "L2" sets linux interface spoof off.
1537 :type osi_layer: str
1538 :returns: Virtual Function topology interface keys.
1540 :raises RuntimeError: If a reason preventing initialization is found.
1542 # Read PCI address and driver.
1543 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1544 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1545 uio_driver = Topology.get_uio_driver(node)
1546 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1547 if kernel_driver not in ("i40e", "i40evf"):
1549 "AVF needs i40e-compatible driver, not {driver} at node {host}"
1550 " ifc {ifc}".format(
1551 driver=kernel_driver, host=node["host"], ifc=ifc_key))
1552 current_driver = DUTSetup.get_pci_dev_driver(
1553 node, pf_pci_addr.replace(':', r'\:'))
1555 VPPUtil.stop_vpp_service(node)
1556 if current_driver != kernel_driver:
1557 # PCI device must be re-bound to kernel driver before creating VFs.
1558 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1559 # Stop VPP to prevent deadlock.
1560 # Unbind from current driver.
1561 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1562 # Bind to kernel driver.
1563 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1565 # Initialize PCI VFs.
1566 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1569 # Set MAC address and bind each virtual function to uio driver.
1570 for vf_id in range(numvfs):
1571 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1572 pf_mac_addr[3], pf_mac_addr[4],
1573 pf_mac_addr[5], "{:02x}".format(vf_id)])
1575 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1576 format(pci=pf_pci_addr)
1577 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1579 if osi_layer == 'L2':
1580 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1582 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1585 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1586 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1588 # Add newly created ports into topology file
1589 vf_ifc_name = '{pf_if_key}_vif'.format(pf_if_key=ifc_key)
1590 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1591 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1592 Topology.update_interface_name(node, vf_ifc_key,
1593 vf_ifc_name+str(vf_id+1))
1594 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1595 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1596 vf_ifc_keys.append(vf_ifc_key)
1601 def vpp_sw_interface_rx_placement_dump(node):
1602 """Dump VPP interface RX placement on node.
1604 :param node: Node to run command on.
1606 :returns: Thread mapping information as a list of dictionaries.
1609 cmd = 'sw_interface_rx_placement_dump'
1610 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
1611 cmd=cmd, host=node['host'])
1612 with PapiSocketExecutor(node) as papi_exec:
1613 for ifc in node['interfaces'].values():
1614 if ifc['vpp_sw_index'] is not None:
1615 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
1616 details = papi_exec.get_details(err_msg)
1617 return sorted(details, key=lambda k: k['sw_if_index'])
1620 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
1622 """Set interface RX placement to worker on node.
1624 :param node: Node to run command on.
1625 :param sw_if_index: VPP SW interface index.
1626 :param queue_id: VPP interface queue ID.
1627 :param worker_id: VPP worker ID (indexing from 0).
1629 :type sw_if_index: int
1631 :type worker_id: int
1632 :raises RuntimeError: If failed to run command on host or if no API
1635 cmd = 'sw_interface_set_rx_placement'
1636 err_msg = "Failed to set interface RX placement to worker on host " \
1637 "{host}!".format(host=node['host'])
1639 sw_if_index=sw_if_index,
1641 worker_id=worker_id,
1644 with PapiSocketExecutor(node) as papi_exec:
1645 papi_exec.add(cmd, **args).get_reply(err_msg)
1648 def vpp_round_robin_rx_placement(node, prefix):
1649 """Set Round Robin interface RX placement on all worker threads
1652 :param node: Topology nodes.
1653 :param prefix: Interface name prefix.
1658 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1661 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1662 for interface in node['interfaces'].values():
1663 if placement['sw_if_index'] == interface['vpp_sw_index'] \
1664 and prefix in interface['name']:
1665 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1666 node, placement['sw_if_index'], placement['queue_id'],
1667 worker_id % worker_cnt)
1671 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1672 """Set Round Robin interface RX placement on all worker threads
1675 :param nodes: Topology nodes.
1676 :param prefix: Interface name prefix.
1680 for node in nodes.values():
1681 if node['type'] == NodeType.DUT:
1682 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)