1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from socket import AF_INET, AF_INET6, inet_ntop
17 from time import sleep
19 from enum import IntEnum
20 from ipaddress import ip_address
21 from robot.api import logger
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.CpuUtils import CpuUtils
25 from resources.libraries.python.DUTSetup import DUTSetup
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class LinkBondLoadBalance(IntEnum):
35 """Link bonding load balance."""
41 class LinkBondMode(IntEnum):
42 """Link bonding load balance."""
50 class InterfaceUtil(object):
51 """General utilities for managing interfaces"""
53 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
56 def pci_to_int(pci_str):
57 """Convert PCI address from string format (0000:18:0a.0) to
58 integer representation (169345024).
60 :param pci_str: PCI address in string representation.
62 :returns: Integer representation of PCI address.
65 pci = list(pci_str.split(':')[0:2])
66 pci.extend(pci_str.split(':')[2].split('.'))
68 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
69 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
72 def get_interface_index(node, interface):
73 """Get interface sw_if_index from topology file.
75 :param node: Node where the interface is.
76 :param interface: Numeric index or name string of a specific interface.
78 :type interface: str or int
79 :returns: SW interface index.
83 sw_if_index = int(interface)
85 sw_if_index = Topology.get_interface_sw_index(node, interface)
86 if sw_if_index is None:
88 Topology.get_interface_sw_index_by_name(node, interface)
89 except TypeError as err:
90 raise TypeError('Wrong interface format {ifc}: {err}'.format(
91 ifc=interface, err=err.message))
96 def set_interface_state(node, interface, state, if_type='key'):
97 """Set interface state on a node.
99 Function can be used for DUTs as well as for TGs.
101 :param node: Node where the interface is.
102 :param interface: Interface key or sw_if_index or name.
103 :param state: One of 'up' or 'down'.
104 :param if_type: Interface type
106 :type interface: str or int
110 :raises ValueError: If the interface type is unknown.
111 :raises ValueError: If the state of interface is unexpected.
112 :raises ValueError: If the node has an unknown node type.
115 if isinstance(interface, basestring):
116 sw_if_index = Topology.get_interface_sw_index(node, interface)
117 iface_name = Topology.get_interface_name(node, interface)
119 sw_if_index = interface
120 elif if_type == 'name':
121 iface_key = Topology.get_interface_by_name(node, interface)
122 if iface_key is not None:
123 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
124 iface_name = interface
126 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
128 if node['type'] == NodeType.DUT:
131 elif state == 'down':
134 raise ValueError('Unexpected interface state: {state}'.format(
136 cmd = 'sw_interface_set_flags'
137 err_msg = 'Failed to set interface state on host {host}'.format(
139 args = dict(sw_if_index=sw_if_index,
140 admin_up_down=admin_up_down)
141 with PapiExecutor(node) as papi_exec:
142 papi_exec.add(cmd, **args).get_reply(err_msg)
143 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
144 cmd = 'ip link set {ifc} {state}'.format(
145 ifc=iface_name, state=state)
146 exec_cmd_no_error(node, cmd, sudo=True)
148 raise ValueError('Node {} has unknown NodeType: "{}"'
149 .format(node['host'], node['type']))
152 def set_interface_ethernet_mtu(node, iface_key, mtu):
153 """Set Ethernet MTU for specified interface.
155 Function can be used only for TGs.
157 :param node: Node where the interface is.
158 :param iface_key: Interface key from topology file.
159 :param mtu: MTU to set.
164 :raises ValueError: If the node type is "DUT".
165 :raises ValueError: If the node has an unknown node type.
167 if node['type'] == NodeType.DUT:
168 raise ValueError('Node {}: Setting Ethernet MTU for interface '
169 'on DUT nodes not supported', node['host'])
170 elif node['type'] == NodeType.TG:
171 iface_name = Topology.get_interface_name(node, iface_key)
172 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
173 exec_cmd_no_error(node, cmd, sudo=True)
175 raise ValueError('Node {} has unknown NodeType: "{}"'
176 .format(node['host'], node['type']))
179 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
180 """Set default Ethernet MTU on all interfaces on node.
182 Function can be used only for TGs.
184 :param node: Node where to set default MTU.
188 for ifc in node['interfaces']:
189 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
192 def vpp_set_interface_mtu(node, interface, mtu=9200):
193 """Set Ethernet MTU on interface.
195 :param node: VPP node.
196 :param interface: Interface to setup MTU. Default: 9200.
197 :param mtu: Ethernet MTU size in Bytes.
199 :type interface: str or int
202 if isinstance(interface, basestring):
203 sw_if_index = Topology.get_interface_sw_index(node, interface)
205 sw_if_index = interface
207 cmd = 'hw_interface_set_mtu'
208 err_msg = 'Failed to set interface MTU on host {host}'.format(
210 args = dict(sw_if_index=sw_if_index,
213 with PapiExecutor(node) as papi_exec:
214 papi_exec.add(cmd, **args).get_reply(err_msg)
215 except AssertionError as err:
216 # TODO: Make failure tolerance optional.
217 logger.debug("Setting MTU failed. Expected?\n{err}".format(
221 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
222 """Set Ethernet MTU on all interfaces.
224 :param node: VPP node.
225 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
229 for interface in node['interfaces']:
230 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
233 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
234 """Set Ethernet MTU on all interfaces on all DUTs.
236 :param nodes: VPP nodes.
237 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
241 for node in nodes.values():
242 if node['type'] == NodeType.DUT:
243 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
246 def vpp_node_interfaces_ready_wait(node, retries=15):
247 """Wait until all interfaces with admin-up are in link-up state.
249 :param node: Node to wait on.
250 :param retries: Number of retries to check interface status (optional,
255 :raises RuntimeError: If any interface is not in link-up state after
256 defined number of retries.
258 for _ in xrange(0, retries):
260 out = InterfaceUtil.vpp_get_interface_data(node)
261 for interface in out:
262 if interface.get('admin_up_down') == 1:
263 if interface.get('link_up_down') != 1:
264 not_ready.append(interface.get('interface_name'))
268 logger.debug('Interfaces still in link-down state:\n{ifs} '
269 '\nWaiting...'.format(ifs=not_ready))
272 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
273 if 'not_ready' in locals() else 'No check executed!'
274 raise RuntimeError(err)
277 def all_vpp_interfaces_ready_wait(nodes, retries=15):
278 """Wait until all interfaces with admin-up are in link-up state for all
279 nodes in the topology.
281 :param nodes: Nodes in the topology.
282 :param retries: Number of retries to check interface status (optional,
288 for node in nodes.values():
289 if node['type'] == NodeType.DUT:
290 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
293 def vpp_get_interface_data(node, interface=None):
294 """Get all interface data from a VPP node. If a name or
295 sw_interface_index is provided, return only data for the matching
298 :param node: VPP node to get interface data from.
299 :param interface: Numeric index or name string of a specific interface.
301 :type interface: int or str
302 :returns: List of dictionaries containing data for each interface, or a
303 single dictionary for the specified interface.
305 :raises TypeError: if the data type of interface is neither basestring
308 if interface is not None:
309 if isinstance(interface, basestring):
310 param = 'interface_name'
311 elif isinstance(interface, int):
312 param = 'sw_if_index'
314 raise TypeError('Wrong interface format {ifc}'.format(
319 cmd = 'sw_interface_dump'
320 args = dict(name_filter_valid=0,
322 err_msg = 'Failed to get interface dump on host {host}'.format(
324 with PapiExecutor(node) as papi_exec:
325 details = papi_exec.add(cmd, **args).get_details(err_msg)
327 def process_if_dump(if_dump):
328 """Process interface dump.
330 :param if_dump: Interface dump.
332 :returns: Processed interface dump.
335 if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
336 if_dump['tag'] = if_dump['tag'].rstrip('\x00')
337 if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
338 if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
339 if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
342 data = list() if interface is None else dict()
343 for if_dump in details:
344 if interface is None:
345 data.append(process_if_dump(if_dump))
346 elif str(if_dump.get(param)).rstrip('\x00') == str(interface):
347 data = process_if_dump(if_dump)
350 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
354 def vpp_get_interface_name(node, sw_if_index):
355 """Get interface name for the given SW interface index from actual
358 :param node: VPP node to get interface data from.
359 :param sw_if_index: SW interface index of the specific interface.
361 :type sw_if_index: int
362 :returns: Name of the given interface.
365 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
366 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
367 if_data = InterfaceUtil.vpp_get_interface_data(
368 node, if_data['sup_sw_if_index'])
370 return if_data.get('interface_name')
373 def vpp_get_interface_sw_index(node, interface_name):
374 """Get interface name for the given SW interface index from actual
377 :param node: VPP node to get interface data from.
378 :param interface_name: Interface name.
380 :type interface_name: str
381 :returns: Name of the given interface.
384 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
386 return if_data.get('sw_if_index')
389 def vpp_get_interface_mac(node, interface):
390 """Get MAC address for the given interface from actual interface dump.
392 :param node: VPP node to get interface data from.
393 :param interface: Numeric index or name string of a specific interface.
395 :type interface: int or str
396 :returns: MAC address.
399 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
400 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
401 if_data = InterfaceUtil.vpp_get_interface_data(
402 node, if_data['sup_sw_if_index'])
404 return if_data.get('l2_address')
407 def tg_set_interface_driver(node, pci_addr, driver):
408 """Set interface driver on the TG node.
410 :param node: Node to set interface driver on (must be TG node).
411 :param pci_addr: PCI address of the interface.
412 :param driver: Driver name.
416 :raises RuntimeError: If unbinding from the current driver fails.
417 :raises RuntimeError: If binding to the new driver fails.
419 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
420 if old_driver == driver:
426 # Unbind from current driver
427 if old_driver is not None:
428 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
429 .format(pci_addr, old_driver)
430 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
431 if int(ret_code) != 0:
432 raise RuntimeError("'{0}' failed on '{1}'"
433 .format(cmd, node['host']))
435 # Bind to the new driver
436 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
437 .format(pci_addr, driver)
438 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
439 if int(ret_code) != 0:
440 raise RuntimeError("'{0}' failed on '{1}'"
441 .format(cmd, node['host']))
444 def tg_get_interface_driver(node, pci_addr):
445 """Get interface driver from the TG node.
447 :param node: Node to get interface driver on (must be TG node).
448 :param pci_addr: PCI address of the interface.
451 :returns: Interface driver or None if not found.
453 :raises RuntimeError: If PCI rescan or lspci command execution failed.
455 return DUTSetup.get_pci_dev_driver(node, pci_addr)
458 def tg_set_interfaces_udev_rules(node):
459 """Set udev rules for interfaces.
461 Create udev rules file in /etc/udev/rules.d where are rules for each
462 interface used by TG node, based on MAC interface has specific name.
463 So after unbind and bind again to kernel driver interface has same
464 name as before. This must be called after TG has set name for each
465 port in topology dictionary.
467 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
470 :param node: Node to set udev rules on (must be TG node).
472 :raises RuntimeError: If setting of udev rules fails.
477 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
478 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
479 if int(ret_code) != 0:
480 raise RuntimeError("'{0}' failed on '{1}'"
481 .format(cmd, node['host']))
483 for interface in node['interfaces'].values():
484 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
485 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
486 interface['name'] + '\\"'
487 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
488 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
489 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
490 if int(ret_code) != 0:
491 raise RuntimeError("'{0}' failed on '{1}'"
492 .format(cmd, node['host']))
494 cmd = '/etc/init.d/udev restart'
495 ssh.exec_command_sudo(cmd)
498 def tg_set_interfaces_default_driver(node):
499 """Set interfaces default driver specified in topology yaml file.
501 :param node: Node to setup interfaces driver on (must be TG node).
504 for interface in node['interfaces'].values():
505 InterfaceUtil.tg_set_interface_driver(node,
506 interface['pci_address'],
510 def update_vpp_interface_data_on_node(node):
511 """Update vpp generated interface data for a given node in DICT__nodes.
513 Updates interface names, software if index numbers and any other details
514 generated specifically by vpp that are unknown before testcase run.
515 It does this by dumping interface list from all devices using python
516 api, and pairing known information from topology (mac address) to state
519 :param node: Node selected from DICT__nodes.
522 interface_list = InterfaceUtil.vpp_get_interface_data(node)
523 interface_dict = dict()
524 for ifc in interface_list:
525 interface_dict[ifc['l2_address']] = ifc
527 for if_name, if_data in node['interfaces'].items():
528 ifc_dict = interface_dict.get(if_data['mac_address'])
529 if ifc_dict is not None:
530 if_data['name'] = ifc_dict['interface_name']
531 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
532 if_data['mtu'] = ifc_dict['mtu'][0]
533 logger.trace('Interface {ifc} found by MAC {mac}'.format(
534 ifc=if_name, mac=if_data['mac_address']))
536 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
537 ifc=if_name, mac=if_data['mac_address']))
538 if_data['vpp_sw_index'] = None
541 def update_nic_interface_names(node):
542 """Update interface names based on nic type and PCI address.
544 This method updates interface names in the same format as VPP does.
546 :param node: Node dictionary.
549 for ifc in node['interfaces'].values():
550 if_pci = ifc['pci_address'].replace('.', ':').split(':')
551 bus = '{:x}'.format(int(if_pci[1], 16))
552 dev = '{:x}'.format(int(if_pci[2], 16))
553 fun = '{:x}'.format(int(if_pci[3], 16))
554 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
555 if ifc['model'] == 'Intel-XL710':
556 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
557 elif ifc['model'] == 'Intel-X710':
558 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
559 elif ifc['model'] == 'Intel-X520-DA2':
560 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
561 elif ifc['model'] == 'Cisco-VIC-1385':
562 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
563 elif ifc['model'] == 'Cisco-VIC-1227':
564 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
566 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
569 def update_nic_interface_names_on_all_duts(nodes):
570 """Update interface names based on nic type and PCI address on all DUTs.
572 This method updates interface names in the same format as VPP does.
574 :param nodes: Topology nodes.
577 for node in nodes.values():
578 if node['type'] == NodeType.DUT:
579 InterfaceUtil.update_nic_interface_names(node)
582 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
583 """Update interface name for TG/linux node in DICT__nodes.
586 # for dev in `ls /sys/class/net/`;
587 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
588 "52:54:00:9f:82:63": "eth0"
589 "52:54:00:77:ae:a9": "eth1"
590 "52:54:00:e1:8a:0f": "eth2"
591 "00:00:00:00:00:00": "lo"
593 :param node: Node selected from DICT__nodes.
594 :param skip_tg_udev: Skip udev rename on TG node.
596 :type skip_tg_udev: bool
597 :raises RuntimeError: If getting of interface name and MAC fails.
599 # First setup interface driver specified in yaml file
600 InterfaceUtil.tg_set_interfaces_default_driver(node)
602 # Get interface names
606 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
607 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
609 (ret_code, stdout, _) = ssh.exec_command(cmd)
610 if int(ret_code) != 0:
611 raise RuntimeError('Get interface name and MAC failed')
612 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
613 interfaces = JsonParser().parse_data(tmp)
614 for interface in node['interfaces'].values():
615 name = interfaces.get(interface['mac_address'])
618 interface['name'] = name
620 # Set udev rules for interfaces
622 InterfaceUtil.tg_set_interfaces_udev_rules(node)
625 def iface_update_numa_node(node):
626 """For all interfaces from topology file update numa node based on
627 information from the node.
629 :param node: Node from topology.
632 :raises ValueError: If numa node ia less than 0.
633 :raises RuntimeError: If update of numa node failes.
636 for if_key in Topology.get_node_interfaces(node):
637 if_pci = Topology.get_interface_pci_addr(node, if_key)
639 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
641 (ret, out, _) = ssh.exec_command(cmd)
646 if CpuUtils.cpu_node_count(node) == 1:
651 logger.trace('Reading numa location failed for: {0}'
654 Topology.set_interface_numa_node(node, if_key,
658 raise RuntimeError('Update numa node failed for: {0}'
662 def update_all_numa_nodes(nodes, skip_tg=False):
663 """For all nodes and all their interfaces from topology file update numa
664 node information based on information from the node.
666 :param nodes: Nodes in the topology.
667 :param skip_tg: Skip TG node
672 for node in nodes.values():
673 if node['type'] == NodeType.DUT:
674 InterfaceUtil.iface_update_numa_node(node)
675 elif node['type'] == NodeType.TG and not skip_tg:
676 InterfaceUtil.iface_update_numa_node(node)
679 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
682 """Update interface names on all nodes in DICT__nodes.
684 This method updates the topology dictionary by querying interface lists
685 of all nodes mentioned in the topology dictionary.
687 :param nodes: Nodes in the topology.
688 :param skip_tg: Skip TG node.
689 :param skip_tg_udev: Skip udev rename on TG node.
690 :param numa_node: Retrieve numa_node location.
693 :type skip_tg_udev: bool
694 :type numa_node: bool
696 for node_data in nodes.values():
697 if node_data['type'] == NodeType.DUT:
698 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
699 elif node_data['type'] == NodeType.TG and not skip_tg:
700 InterfaceUtil.update_tg_interface_data_on_node(
701 node_data, skip_tg_udev)
704 if node_data['type'] == NodeType.DUT:
705 InterfaceUtil.iface_update_numa_node(node_data)
706 elif node_data['type'] == NodeType.TG and not skip_tg:
707 InterfaceUtil.iface_update_numa_node(node_data)
710 def create_vlan_subinterface(node, interface, vlan):
711 """Create VLAN sub-interface on node.
713 :param node: Node to add VLAN subinterface on.
714 :param interface: Interface name on which create VLAN subinterface.
715 :param vlan: VLAN ID of the subinterface to be created.
719 :returns: Name and index of created subinterface.
721 :raises RuntimeError: if it is unable to create VLAN subinterface on the
724 iface_key = Topology.get_interface_by_name(node, interface)
725 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
727 cmd = 'create_vlan_subif'
728 args = dict(sw_if_index=sw_if_index,
730 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
732 with PapiExecutor(node) as papi_exec:
733 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
735 if_key = Topology.add_new_port(node, 'vlan_subif')
736 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
737 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
738 Topology.update_interface_name(node, if_key, ifc_name)
740 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_index
743 def create_vxlan_interface(node, vni, source_ip, destination_ip):
744 """Create VXLAN interface and return sw if index of created interface.
746 :param node: Node where to create VXLAN interface.
747 :param vni: VXLAN Network Identifier.
748 :param source_ip: Source IP of a VXLAN Tunnel End Point.
749 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
753 :type destination_ip: str
754 :returns: SW IF INDEX of created interface.
756 :raises RuntimeError: if it is unable to create VxLAN interface on the
759 src_address = ip_address(unicode(source_ip))
760 dst_address = ip_address(unicode(destination_ip))
762 cmd = 'vxlan_add_del_tunnel'
763 args = dict(is_add=1,
764 is_ipv6=1 if src_address.version == 6 else 0,
765 instance=Constants.BITWISE_NON_ZERO,
766 src_address=src_address.packed,
767 dst_address=dst_address.packed,
768 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
770 decap_next_index=Constants.BITWISE_NON_ZERO,
772 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
773 format(host=node['host'])
774 with PapiExecutor(node) as papi_exec:
775 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
777 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
778 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
779 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
780 Topology.update_interface_name(node, if_key, ifc_name)
785 def vxlan_dump(node, interface=None):
786 """Get VxLAN data for the given interface.
788 :param node: VPP node to get interface data from.
789 :param interface: Numeric index or name string of a specific interface.
790 If None, information about all VxLAN interfaces is returned.
792 :type interface: int or str
793 :returns: Dictionary containing data for the given VxLAN interface or if
794 interface=None, the list of dictionaries with all VxLAN interfaces.
796 :raises TypeError: if the data type of interface is neither basestring
799 if interface is not None:
800 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
802 sw_if_index = int(Constants.BITWISE_NON_ZERO)
804 cmd = 'vxlan_tunnel_dump'
805 args = dict(sw_if_index=sw_if_index)
806 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
808 with PapiExecutor(node) as papi_exec:
809 details = papi_exec.add(cmd, **args).get_details(err_msg)
811 def process_vxlan_dump(vxlan_dump):
812 """Process vxlan dump.
814 :param vxlan_dump: Vxlan interface dump.
815 :type vxlan_dump: dict
816 :returns: Processed vxlan interface dump.
819 if vxlan_dump['is_ipv6']:
820 vxlan_dump['src_address'] = \
821 inet_ntop(AF_INET6, vxlan_dump['src_address'])
822 vxlan_dump['dst_address'] = \
823 inet_ntop(AF_INET6, vxlan_dump['dst_address'])
825 vxlan_dump['src_address'] = \
826 inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
827 vxlan_dump['dst_address'] = \
828 inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
831 data = list() if interface is None else dict()
832 for vxlan_dump in details:
833 if interface is None:
834 data.append(process_vxlan_dump(vxlan_dump))
835 elif vxlan_dump['sw_if_index'] == sw_if_index:
836 data = process_vxlan_dump(vxlan_dump)
839 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
843 def vhost_user_dump(node):
844 """Get vhost-user data for the given node.
846 TODO: Move to VhostUser.py
848 :param node: VPP node to get interface data from.
850 :returns: List of dictionaries with all vhost-user interfaces.
853 cmd = 'sw_interface_vhost_user_dump'
854 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
856 with PapiExecutor(node) as papi_exec:
857 details = papi_exec.add(cmd).get_details(err_msg)
859 def process_vhost_dump(vhost_dump):
860 """Process vhost dump.
862 :param vhost_dump: Vhost interface dump.
863 :type vhost_dump: dict
864 :returns: Processed vhost interface dump.
867 vhost_dump['interface_name'] = \
868 vhost_dump['interface_name'].rstrip('\x00')
869 vhost_dump['sock_filename'] = \
870 vhost_dump['sock_filename'].rstrip('\x00')
873 for vhost_dump in details:
875 process_vhost_dump(vhost_dump)
877 logger.debug('Vhost-user details:\n{vhost_details}'.format(
878 vhost_details=details))
882 def tap_dump(node, name=None):
883 """Get all TAP interface data from the given node, or data about
884 a specific TAP interface.
888 :param node: VPP node to get data from.
889 :param name: Optional name of a specific TAP interface.
892 :returns: Dictionary of information about a specific TAP interface, or
893 a List of dictionaries containing all TAP data for the given node.
896 cmd = 'sw_interface_tap_v2_dump'
897 err_msg = 'Failed to get TAP dump on host {host}'.format(
899 with PapiExecutor(node) as papi_exec:
900 details = papi_exec.add(cmd).get_details(err_msg)
902 def process_tap_dump(tap_dump):
905 :param tap_dump: Tap interface dump.
907 :returns: Processed tap interface dump.
910 tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
911 tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
912 tap_dump['host_namespace'] = \
913 tap_dump['host_namespace'].rstrip('\x00')
914 tap_dump['host_mac_addr'] = \
915 L2Util.bin_to_mac(tap_dump['host_mac_addr'])
916 tap_dump['host_ip4_addr'] = \
917 inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
918 tap_dump['host_ip6_addr'] = \
919 inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
922 data = list() if name is None else dict()
923 for tap_dump in details:
925 data.append(process_tap_dump(tap_dump))
926 elif tap_dump.get('dev_name').rstrip('\x00') == name:
927 data = process_tap_dump(tap_dump)
930 logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
934 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
935 inner_vlan_id=None, type_subif=None):
936 """Create sub-interface on node. It is possible to set required
937 sub-interface type and VLAN tag(s).
939 :param node: Node to add sub-interface.
940 :param interface: Interface name on which create sub-interface.
941 :param sub_id: ID of the sub-interface to be created.
942 :param outer_vlan_id: Optional outer VLAN ID.
943 :param inner_vlan_id: Optional inner VLAN ID.
944 :param type_subif: Optional type of sub-interface. Values supported by
945 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
948 :type interface: str or int
950 :type outer_vlan_id: int
951 :type inner_vlan_id: int
952 :type type_subif: str
953 :returns: Name and index of created sub-interface.
955 :raises RuntimeError: If it is not possible to create sub-interface.
957 subif_types = type_subif.split()
961 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
963 no_tags=1 if 'no_tags' in subif_types else 0,
964 one_tag=1 if 'one_tag' in subif_types else 0,
965 two_tags=1 if 'two_tags' in subif_types else 0,
966 dot1ad=1 if 'dot1ad' in subif_types else 0,
967 exact_match=1 if 'exact_match' in subif_types else 0,
968 default_sub=1 if 'default_sub' in subif_types else 0,
969 outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
970 inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
971 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
972 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
973 err_msg = 'Failed to create sub-interface on host {host}'.format(
975 with PapiExecutor(node) as papi_exec:
976 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
978 if_key = Topology.add_new_port(node, 'subinterface')
979 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
980 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
981 Topology.update_interface_name(node, if_key, ifc_name)
983 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_if_index
986 def create_gre_tunnel_interface(node, source_ip, destination_ip):
987 """Create GRE tunnel interface on node.
989 :param node: VPP node to add tunnel interface.
990 :param source_ip: Source of the GRE tunnel.
991 :param destination_ip: Destination of the GRE tunnel.
994 :type destination_ip: str
995 :returns: Name and index of created GRE tunnel interface.
997 :raises RuntimeError: If unable to create GRE tunnel interface.
999 cmd = 'gre_tunnel_add_del'
1000 tunnel = dict(type=0,
1001 instance=Constants.BITWISE_NON_ZERO,
1003 dst=str(destination_ip),
1006 args = dict(is_add=1,
1008 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1010 with PapiExecutor(node) as papi_exec:
1011 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1013 if_key = Topology.add_new_port(node, 'gre_tunnel')
1014 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1015 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1016 Topology.update_interface_name(node, if_key, ifc_name)
1018 return ifc_name, sw_if_index
1021 def vpp_create_loopback(node):
1022 """Create loopback interface on VPP node.
1024 :param node: Node to create loopback interface on.
1026 :returns: SW interface index.
1028 :raises RuntimeError: If it is not possible to create loopback on the
1031 cmd = 'create_loopback'
1032 args = dict(mac_address=0)
1033 err_msg = 'Failed to create loopback interface on host {host}'.format(
1035 with PapiExecutor(node) as papi_exec:
1036 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1038 if_key = Topology.add_new_port(node, 'loopback')
1039 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1040 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1041 Topology.update_interface_name(node, if_key, ifc_name)
1046 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1047 """Create bond interface on VPP node.
1049 :param node: DUT node from topology.
1050 :param mode: Link bonding mode.
1051 :param load_balance: Load balance (optional, valid for xor and lacp
1052 modes, otherwise ignored).
1053 :param mac: MAC address to assign to the bond interface (optional).
1056 :type load_balance: str
1058 :returns: Interface key (name) in topology.
1060 :raises RuntimeError: If it is not possible to create bond interface on
1064 args = dict(id=int(Constants.BITWISE_NON_ZERO),
1065 use_custom_mac=0 if mac is None else 1,
1066 mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
1067 mode=getattr(LinkBondMode, '{md}'.format(
1068 md=mode.replace('-', '_').upper())).value,
1069 lb=0 if load_balance is None else getattr(
1070 LinkBondLoadBalance, '{lb}'.format(
1071 lb=load_balance.upper())).value)
1072 err_msg = 'Failed to create bond interface on host {host}'.format(
1074 with PapiExecutor(node) as papi_exec:
1075 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1077 InterfaceUtil.add_eth_interface(node, sw_if_index=sw_if_index,
1079 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1084 def add_eth_interface(node, ifc_name=None, sw_if_index=None, ifc_pfx=None):
1085 """Add ethernet interface to current topology.
1087 :param node: DUT node from topology.
1088 :param ifc_name: Name of the interface.
1089 :param sw_if_index: SW interface index.
1090 :param ifc_pfx: Interface key prefix.
1093 :type sw_if_index: int
1096 if_key = Topology.add_new_port(node, ifc_pfx)
1098 if ifc_name and sw_if_index is None:
1099 sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
1101 Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
1102 if sw_if_index and ifc_name is None:
1103 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
1104 Topology.update_interface_name(node, if_key, ifc_name)
1105 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
1106 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1109 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1110 """Create AVF interface on VPP node.
1112 :param node: DUT node from topology.
1113 :param vf_pci_addr: Virtual Function PCI address.
1114 :param num_rx_queues: Number of RX queues.
1116 :type vf_pci_addr: str
1117 :type num_rx_queues: int
1118 :returns: Interface key (name) in topology.
1120 :raises RuntimeError: If it is not possible to create AVF interface on
1124 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1126 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1129 err_msg = 'Failed to create AVF interface on host {host}'.format(
1131 with PapiExecutor(node) as papi_exec:
1132 sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
1134 InterfaceUtil.add_eth_interface(node, sw_if_index=sw_if_index,
1136 if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
1141 def vpp_enslave_physical_interface(node, interface, bond_if):
1142 """Enslave physical interface to bond interface on VPP node.
1144 :param node: DUT node from topology.
1145 :param interface: Physical interface key from topology file.
1146 :param bond_if: Load balance
1148 :type interface: str
1150 :raises RuntimeError: If it is not possible to enslave physical
1151 interface to bond interface on the node.
1153 cmd = 'bond_enslave'
1155 sw_if_index=Topology.get_interface_sw_index(node, interface),
1156 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1159 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1160 'interface {bond} on host {host}'.format(ifc=interface,
1163 with PapiExecutor(node) as papi_exec:
1164 papi_exec.add(cmd, **args).get_reply(err_msg)
1167 def vpp_show_bond_data_on_node(node, verbose=False):
1168 """Show (detailed) bond information on VPP node.
1170 :param node: DUT node from topology.
1171 :param verbose: If detailed information is required or not.
1175 cmd = 'sw_interface_bond_dump'
1176 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1179 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1180 with PapiExecutor(node) as papi_exec:
1181 details = papi_exec.add(cmd).get_details(err_msg)
1183 for bond in details:
1184 data += ('{b}\n'.format(b=bond['interface_name'].rstrip('\x00')))
1185 data += (' mode: {m}\n'.format(m=LinkBondMode(
1186 bond['mode']).name.lower()))
1187 data += (' load balance: {lb}\n'.format(lb=LinkBondLoadBalance(
1188 bond['lb']).name.lower()))
1189 data += (' number of active slaves: {n}\n'.format(
1190 n=bond['active_slaves']))
1192 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1193 node, Topology.get_interface_by_sw_index(
1194 node, bond['sw_if_index']))
1195 for slave in slave_data:
1196 if not slave['is_passive']:
1197 data += (' {s}\n'.format(s=slave['interface_name']))
1198 data += (' number of slaves: {n}\n'.format(n=bond['slaves']))
1200 for slave in slave_data:
1201 data += (' {s}\n'.format(s=slave['interface_name']))
1202 data += (' interface id: {i}\n'.format(i=bond['id']))
1203 data += (' sw_if_index: {i}\n'.format(i=bond['sw_if_index']))
1207 def vpp_bond_slave_dump(node, interface):
1208 """Get bond interface slave(s) data on VPP node.
1210 :param node: DUT node from topology.
1211 :param interface: Physical interface key from topology file.
1213 :type interface: str
1214 :returns: Bond slave interface data.
1217 cmd = 'sw_interface_slave_dump'
1218 args = dict(sw_if_index=Topology.get_interface_sw_index(
1220 err_msg = 'Failed to get slave dump on host {host}'.format(
1223 with PapiExecutor(node) as papi_exec:
1224 details = papi_exec.add(cmd, **args).get_details(err_msg)
1226 def process_slave_dump(slave_dump):
1227 """Process slave dump.
1229 :param slave_dump: Slave interface dump.
1230 :type slave_dump: dict
1231 :returns: Processed slave interface dump.
1234 slave_dump['interface_name'] = slave_dump['interface_name'].\
1238 for slave_dump in details:
1240 process_slave_dump(slave_dump)
1242 logger.debug('Slave data:\n{slave_data}'.format(slave_data=details))
1246 def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
1247 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1249 :param nodes: Nodes in the topology.
1250 :param verbose: If detailed information is required or not.
1254 for node_data in nodes.values():
1255 if node_data['type'] == NodeType.DUT:
1256 InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
1259 def vpp_enable_input_acl_interface(node, interface, ip_version,
1261 """Enable input acl on interface.
1263 :param node: VPP node to setup interface for input acl.
1264 :param interface: Interface to setup input acl.
1265 :param ip_version: Version of IP protocol.
1266 :param table_index: Classify table index.
1268 :type interface: str or int
1269 :type ip_version: str
1270 :type table_index: int
1272 cmd = 'input_acl_set_interface'
1274 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1275 ip4_table_index=table_index if ip_version == 'ip4'
1276 else Constants.BITWISE_NON_ZERO,
1277 ip6_table_index=table_index if ip_version == 'ip6'
1278 else Constants.BITWISE_NON_ZERO,
1279 l2_table_index=table_index if ip_version == 'l2'
1280 else Constants.BITWISE_NON_ZERO,
1282 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1284 with PapiExecutor(node) as papi_exec:
1285 papi_exec.add(cmd, **args).get_reply(err_msg)
1288 def get_interface_classify_table(node, interface):
1289 """Get name of classify table for the given interface.
1291 TODO: Move to Classify.py.
1293 :param node: VPP node to get data from.
1294 :param interface: Name or sw_if_index of a specific interface.
1296 :type interface: str or int
1297 :returns: Classify table name.
1300 if isinstance(interface, basestring):
1301 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1303 sw_if_index = interface
1305 cmd = 'classify_table_by_interface'
1306 args = dict(sw_if_index=sw_if_index)
1307 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1309 with PapiExecutor(node) as papi_exec:
1310 reply = papi_exec.add(cmd, **args).get_reply(err_msg)
1315 def get_sw_if_index(node, interface_name):
1316 """Get sw_if_index for the given interface from actual interface dump.
1318 :param node: VPP node to get interface data from.
1319 :param interface_name: Name of the specific interface.
1321 :type interface_name: str
1322 :returns: sw_if_index of the given interface.
1325 interface_data = InterfaceUtil.vpp_get_interface_data(
1326 node, interface=interface_name)
1327 return interface_data.get('sw_if_index')
1330 def vxlan_gpe_dump(node, interface_name=None):
1331 """Get VxLAN GPE data for the given interface.
1333 :param node: VPP node to get interface data from.
1334 :param interface_name: Name of the specific interface. If None,
1335 information about all VxLAN GPE interfaces is returned.
1337 :type interface_name: str
1338 :returns: Dictionary containing data for the given VxLAN GPE interface
1339 or if interface=None, the list of dictionaries with all VxLAN GPE
1341 :rtype: dict or list
1343 if interface_name is not None:
1344 sw_if_index = InterfaceUtil.get_interface_index(
1345 node, interface_name)
1347 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1349 cmd = 'vxlan_gpe_tunnel_dump'
1350 args = dict(sw_if_index=sw_if_index)
1351 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1353 with PapiExecutor(node) as papi_exec:
1354 details = papi_exec.add(cmd, **args).get_details(err_msg)
1356 def process_vxlan_gpe_dump(vxlan_dump):
1357 """Process vxlan_gpe dump.
1359 :param vxlan_dump: Vxlan_gpe nterface dump.
1360 :type vxlan_dump: dict
1361 :returns: Processed vxlan_gpe interface dump.
1364 if vxlan_dump['is_ipv6']:
1365 vxlan_dump['local'] = \
1366 inet_ntop(AF_INET6, vxlan_dump['local'])
1367 vxlan_dump['remote'] = \
1368 inet_ntop(AF_INET6, vxlan_dump['remote'])
1370 vxlan_dump['local'] = \
1371 inet_ntop(AF_INET, vxlan_dump['local'][0:4])
1372 vxlan_dump['remote'] = \
1373 inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
1376 data = list() if interface_name is None else dict()
1377 for vxlan_dump in details:
1378 if interface_name is None:
1379 data.append(process_vxlan_gpe_dump(vxlan_dump))
1380 elif vxlan_dump['sw_if_index'] == sw_if_index:
1381 data = process_vxlan_gpe_dump(vxlan_dump)
1384 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1385 vxlan_gpe_data=data))
1389 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1390 """Assign VPP interface to specific VRF/FIB table.
1392 :param node: VPP node where the FIB and interface are located.
1393 :param interface: Interface to be assigned to FIB.
1394 :param table_id: VRF table ID.
1395 :param ipv6: Assign to IPv6 table. Default False.
1397 :type interface: str or int
1401 cmd = 'sw_interface_set_table'
1403 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1404 is_ipv6=1 if ipv6 else 0,
1405 vrf_id=int(table_id))
1406 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1408 with PapiExecutor(node) as papi_exec:
1409 papi_exec.add(cmd, **args).get_reply(err_msg)
1412 def set_linux_interface_mac(node, interface, mac, namespace=None,
1414 """Set MAC address for interface in linux.
1416 :param node: Node where to execute command.
1417 :param interface: Interface in namespace.
1418 :param mac: MAC to be assigned to interface.
1419 :param namespace: Execute command in namespace. Optional
1420 :param vf_id: Virtual Function id. Optional
1422 :type interface: str
1424 :type namespace: str
1427 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1428 if vf_id is not None else 'address {mac}'.format(mac=mac)
1429 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1431 cmd = ('{ns} ip link set {interface} {mac}'.
1432 format(ns=ns_str, interface=interface, mac=mac_str))
1433 exec_cmd_no_error(node, cmd, sudo=True)
1436 def set_linux_interface_trust_on(node, interface, namespace=None,
1438 """Set trust on (promisc) for interface in linux.
1440 :param node: Node where to execute command.
1441 :param interface: Interface in namespace.
1442 :param namespace: Execute command in namespace. Optional
1443 :param vf_id: Virtual Function id. Optional
1445 :type interface: str
1446 :type namespace: str
1449 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1450 if vf_id is not None else 'trust on'
1451 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1453 cmd = ('{ns} ip link set dev {interface} {trust}'.
1454 format(ns=ns_str, interface=interface, trust=trust_str))
1455 exec_cmd_no_error(node, cmd, sudo=True)
1458 def set_linux_interface_spoof_off(node, interface, namespace=None,
1460 """Set spoof off for interface in linux.
1462 :param node: Node where to execute command.
1463 :param interface: Interface in namespace.
1464 :param namespace: Execute command in namespace. Optional
1465 :param vf_id: Virtual Function id. Optional
1467 :type interface: str
1468 :type namespace: str
1471 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1472 if vf_id is not None else 'spoof off'
1473 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1475 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1476 format(ns=ns_str, interface=interface, spoof=spoof_str))
1477 exec_cmd_no_error(node, cmd, sudo=True)
1480 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1481 """Init PCI device by creating VFs and bind them to vfio-pci for AVF
1482 driver testing on DUT.
1484 :param node: DUT node.
1485 :param ifc_key: Interface key from topology file.
1486 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
1487 :param osi_layer: OSI Layer type to initialize TG with.
1488 Default value "L2" sets linux interface spoof off.
1492 :type osi_layer: str
1493 :returns: Virtual Function topology interface keys.
1495 :raises RuntimeError: If a reason preventing initialization is found.
1497 # Read PCI address and driver.
1498 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1499 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1500 uio_driver = Topology.get_uio_driver(node)
1501 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1502 if kernel_driver != "i40e":
1504 "AVF needs i40e driver, not {driver} at node {host} ifc {ifc}"\
1505 .format(driver=kernel_driver, host=node["host"], ifc=ifc_key))
1506 current_driver = DUTSetup.get_pci_dev_driver(
1507 node, pf_pci_addr.replace(':', r'\:'))
1509 VPPUtil.stop_vpp_service(node)
1510 if current_driver != kernel_driver:
1511 # PCI device must be re-bound to kernel driver before creating VFs.
1512 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1513 # Stop VPP to prevent deadlock.
1514 # Unbind from current driver.
1515 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1516 # Bind to kernel driver.
1517 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1519 # Initialize PCI VFs
1520 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1523 # Set MAC address and bind each virtual function to uio driver.
1524 for vf_id in range(numvfs):
1525 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1526 pf_mac_addr[3], pf_mac_addr[4],
1527 pf_mac_addr[5], "{:02x}".format(vf_id)])
1529 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1530 format(pci=pf_pci_addr)
1531 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1533 if osi_layer == 'L2':
1534 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1536 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1539 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1540 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1542 # Add newly created ports into topology file
1543 vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
1544 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1545 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1546 Topology.update_interface_name(node, vf_ifc_key,
1547 vf_ifc_name+str(vf_id+1))
1548 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1549 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1550 vf_ifc_keys.append(vf_ifc_key)
1555 def vpp_sw_interface_rx_placement_dump(node):
1556 """Dump VPP interface RX placement on node.
1558 :param node: Node to run command on.
1560 :returns: Thread mapping information as a list of dictionaries.
1563 cmd = 'sw_interface_rx_placement_dump'
1564 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
1565 cmd=cmd, host=node['host'])
1566 with PapiExecutor(node) as papi_exec:
1567 for ifc in node['interfaces'].values():
1568 if ifc['vpp_sw_index'] is not None:
1569 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
1570 details = papi_exec.get_details(err_msg)
1571 return sorted(details, key=lambda k: k['sw_if_index'])
1574 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
1576 """Set interface RX placement to worker on node.
1578 :param node: Node to run command on.
1579 :param sw_if_index: VPP SW interface index.
1580 :param queue_id: VPP interface queue ID.
1581 :param worker_id: VPP worker ID (indexing from 0).
1583 :type sw_if_index: int
1585 :type worker_id: int
1586 :raises RuntimeError: If failed to run command on host or if no API
1589 cmd = 'sw_interface_set_rx_placement'
1590 err_msg = "Failed to set interface RX placement to worker on host " \
1591 "{host}!".format(host=node['host'])
1592 args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
1593 worker_id=worker_id)
1594 with PapiExecutor(node) as papi_exec:
1595 papi_exec.add(cmd, **args).get_reply(err_msg)
1598 def vpp_round_robin_rx_placement(node, prefix):
1599 """Set Round Robin interface RX placement on all worker threads
1602 :param node: Topology nodes.
1603 :param prefix: Interface name prefix.
1608 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1611 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1612 for interface in node['interfaces'].values():
1613 if placement['sw_if_index'] == interface['vpp_sw_index'] \
1614 and prefix in interface['name']:
1615 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1616 node, placement['sw_if_index'], placement['queue_id'],
1617 worker_id % worker_cnt)
1621 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1622 """Set Round Robin interface RX placement on all worker threads
1625 :param nodes: Topology nodes.
1626 :param prefix: Interface name prefix.
1630 for node in nodes.values():
1631 if node['type'] == NodeType.DUT:
1632 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)