1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from socket import AF_INET, AF_INET6, inet_ntop
17 from time import sleep
19 from enum import IntEnum
20 from ipaddress import ip_address
21 from robot.api import logger
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.CpuUtils import CpuUtils
25 from resources.libraries.python.DUTSetup import DUTSetup
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class LinkBondLoadBalance(IntEnum):
35 """Link bonding load balance."""
41 class LinkBondMode(IntEnum):
42 """Link bonding load balance."""
50 class InterfaceUtil(object):
51 """General utilities for managing interfaces"""
53 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
56 def pci_to_int(pci_str):
57 """Convert PCI address from string format (0000:18:0a.0) to
58 integer representation (169345024).
60 :param pci_str: PCI address in string representation.
62 :returns: Integer representation of PCI address.
65 pci = list(pci_str.split(':')[0:2])
66 pci.extend(pci_str.split(':')[2].split('.'))
68 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
69 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
72 def get_interface_index(node, interface):
73 """Get interface sw_if_index from topology file.
75 :param node: Node where the interface is.
76 :param interface: Numeric index or name string of a specific interface.
78 :type interface: str or int
79 :returns: SW interface index.
83 sw_if_index = int(interface)
85 sw_if_index = Topology.get_interface_sw_index(node, interface)
86 if sw_if_index is None:
88 Topology.get_interface_sw_index_by_name(node, interface)
89 except TypeError as err:
90 raise TypeError('Wrong interface format {ifc}: {err}'.format(
91 ifc=interface, err=err.message))
96 def set_interface_state(node, interface, state, if_type='key'):
97 """Set interface state on a node.
99 Function can be used for DUTs as well as for TGs.
101 :param node: Node where the interface is.
102 :param interface: Interface key or sw_if_index or name.
103 :param state: One of 'up' or 'down'.
104 :param if_type: Interface type
106 :type interface: str or int
110 :raises ValueError: If the interface type is unknown.
111 :raises ValueError: If the state of interface is unexpected.
112 :raises ValueError: If the node has an unknown node type.
115 if isinstance(interface, basestring):
116 sw_if_index = Topology.get_interface_sw_index(node, interface)
117 iface_name = Topology.get_interface_name(node, interface)
119 sw_if_index = interface
120 elif if_type == 'name':
121 iface_key = Topology.get_interface_by_name(node, interface)
122 if iface_key is not None:
123 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
124 iface_name = interface
126 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
128 if node['type'] == NodeType.DUT:
131 elif state == 'down':
134 raise ValueError('Unexpected interface state: {state}'.format(
136 cmd = 'sw_interface_set_flags'
137 err_msg = 'Failed to set interface state on host {host}'.format(
139 args = dict(sw_if_index=sw_if_index,
140 admin_up_down=admin_up_down)
141 with PapiExecutor(node) as papi_exec:
142 papi_exec.add(cmd, **args).get_replies(err_msg).\
143 verify_reply(err_msg=err_msg)
144 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
145 cmd = 'ip link set {ifc} {state}'.format(
146 ifc=iface_name, state=state)
147 exec_cmd_no_error(node, cmd, sudo=True)
149 raise ValueError('Node {} has unknown NodeType: "{}"'
150 .format(node['host'], node['type']))
153 def set_interface_ethernet_mtu(node, iface_key, mtu):
154 """Set Ethernet MTU for specified interface.
156 Function can be used only for TGs.
158 :param node: Node where the interface is.
159 :param iface_key: Interface key from topology file.
160 :param mtu: MTU to set.
165 :raises ValueError: If the node type is "DUT".
166 :raises ValueError: If the node has an unknown node type.
168 if node['type'] == NodeType.DUT:
169 raise ValueError('Node {}: Setting Ethernet MTU for interface '
170 'on DUT nodes not supported', node['host'])
171 elif node['type'] == NodeType.TG:
172 iface_name = Topology.get_interface_name(node, iface_key)
173 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
174 exec_cmd_no_error(node, cmd, sudo=True)
176 raise ValueError('Node {} has unknown NodeType: "{}"'
177 .format(node['host'], node['type']))
180 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
181 """Set default Ethernet MTU on all interfaces on node.
183 Function can be used only for TGs.
185 :param node: Node where to set default MTU.
189 for ifc in node['interfaces']:
190 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
193 def vpp_set_interface_mtu(node, interface, mtu=9200):
194 """Set Ethernet MTU on interface.
196 :param node: VPP node.
197 :param interface: Interface to setup MTU. Default: 9200.
198 :param mtu: Ethernet MTU size in Bytes.
200 :type interface: str or int
203 if isinstance(interface, basestring):
204 sw_if_index = Topology.get_interface_sw_index(node, interface)
206 sw_if_index = interface
208 cmd = 'hw_interface_set_mtu'
209 err_msg = 'Failed to set interface MTU on host {host}'.format(
211 args = dict(sw_if_index=sw_if_index,
214 with PapiExecutor(node) as papi_exec:
215 papi_exec.add(cmd, **args).get_replies(err_msg).\
216 verify_reply(err_msg=err_msg)
217 except AssertionError as err:
218 # TODO: Make failure tolerance optional.
219 logger.debug("Setting MTU failed. Expected?\n{err}".format(
223 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
224 """Set Ethernet MTU on all interfaces.
226 :param node: VPP node.
227 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
231 for interface in node['interfaces']:
232 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
235 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
236 """Set Ethernet MTU on all interfaces on all DUTs.
238 :param nodes: VPP nodes.
239 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
243 for node in nodes.values():
244 if node['type'] == NodeType.DUT:
245 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
248 def vpp_node_interfaces_ready_wait(node, retries=15):
249 """Wait until all interfaces with admin-up are in link-up state.
251 :param node: Node to wait on.
252 :param retries: Number of retries to check interface status (optional,
257 :raises RuntimeError: If any interface is not in link-up state after
258 defined number of retries.
260 for _ in xrange(0, retries):
262 out = InterfaceUtil.vpp_get_interface_data(node)
263 for interface in out:
264 if interface.get('admin_up_down') == 1:
265 if interface.get('link_up_down') != 1:
266 not_ready.append(interface.get('interface_name'))
270 logger.debug('Interfaces still in link-down state:\n{ifs} '
271 '\nWaiting...'.format(ifs=not_ready))
274 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
275 if 'not_ready' in locals() else 'No check executed!'
276 raise RuntimeError(err)
279 def all_vpp_interfaces_ready_wait(nodes, retries=15):
280 """Wait until all interfaces with admin-up are in link-up state for all
281 nodes in the topology.
283 :param nodes: Nodes in the topology.
284 :param retries: Number of retries to check interface status (optional,
290 for node in nodes.values():
291 if node['type'] == NodeType.DUT:
292 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
295 def vpp_get_interface_data(node, interface=None):
296 """Get all interface data from a VPP node. If a name or
297 sw_interface_index is provided, return only data for the matching
300 :param node: VPP node to get interface data from.
301 :param interface: Numeric index or name string of a specific interface.
303 :type interface: int or str
304 :returns: List of dictionaries containing data for each interface, or a
305 single dictionary for the specified interface.
307 :raises TypeError: if the data type of interface is neither basestring
310 if interface is not None:
311 if isinstance(interface, basestring):
312 param = 'interface_name'
313 elif isinstance(interface, int):
314 param = 'sw_if_index'
316 raise TypeError('Wrong interface format {ifc}'.format(
321 cmd = 'sw_interface_dump'
322 cmd_reply = 'sw_interface_details'
323 args = dict(name_filter_valid=0,
325 err_msg = 'Failed to get interface dump on host {host}'.format(
327 with PapiExecutor(node) as papi_exec:
328 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
330 papi_if_dump = papi_resp.reply[0]['api_reply']
332 def process_if_dump(if_dump):
333 """Process interface dump.
335 :param if_dump: Interface dump.
337 :returns: Processed interface dump.
340 if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
341 if_dump['tag'] = if_dump['tag'].rstrip('\x00')
342 if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
343 if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
344 if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
347 data = list() if interface is None else dict()
348 for item in papi_if_dump:
349 if interface is None:
350 data.append(process_if_dump(item[cmd_reply]))
351 elif str(item[cmd_reply].get(param)).rstrip('\x00') == \
353 data = process_if_dump(item[cmd_reply])
356 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
360 def vpp_get_interface_name(node, sw_if_index):
361 """Get interface name for the given SW interface index from actual
364 :param node: VPP node to get interface data from.
365 :param sw_if_index: SW interface index of the specific interface.
367 :type sw_if_index: int
368 :returns: Name of the given interface.
371 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
372 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
373 if_data = InterfaceUtil.vpp_get_interface_data(
374 node, if_data['sup_sw_if_index'])
376 return if_data.get('interface_name')
379 def vpp_get_interface_sw_index(node, interface_name):
380 """Get interface name for the given SW interface index from actual
383 :param node: VPP node to get interface data from.
384 :param interface_name: Interface name.
386 :type interface_name: str
387 :returns: Name of the given interface.
390 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
392 return if_data.get('sw_if_index')
395 def vpp_get_interface_mac(node, interface):
396 """Get MAC address for the given interface from actual interface dump.
398 :param node: VPP node to get interface data from.
399 :param interface: Numeric index or name string of a specific interface.
401 :type interface: int or str
402 :returns: MAC address.
405 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
406 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
407 if_data = InterfaceUtil.vpp_get_interface_data(
408 node, if_data['sup_sw_if_index'])
410 return if_data.get('l2_address')
413 def tg_set_interface_driver(node, pci_addr, driver):
414 """Set interface driver on the TG node.
416 :param node: Node to set interface driver on (must be TG node).
417 :param pci_addr: PCI address of the interface.
418 :param driver: Driver name.
422 :raises RuntimeError: If unbinding from the current driver fails.
423 :raises RuntimeError: If binding to the new driver fails.
425 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
426 if old_driver == driver:
432 # Unbind from current driver
433 if old_driver is not None:
434 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
435 .format(pci_addr, old_driver)
436 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
437 if int(ret_code) != 0:
438 raise RuntimeError("'{0}' failed on '{1}'"
439 .format(cmd, node['host']))
441 # Bind to the new driver
442 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
443 .format(pci_addr, driver)
444 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
445 if int(ret_code) != 0:
446 raise RuntimeError("'{0}' failed on '{1}'"
447 .format(cmd, node['host']))
450 def tg_get_interface_driver(node, pci_addr):
451 """Get interface driver from the TG node.
453 :param node: Node to get interface driver on (must be TG node).
454 :param pci_addr: PCI address of the interface.
457 :returns: Interface driver or None if not found.
459 :raises RuntimeError: If PCI rescan or lspci command execution failed.
461 return DUTSetup.get_pci_dev_driver(node, pci_addr)
464 def tg_set_interfaces_udev_rules(node):
465 """Set udev rules for interfaces.
467 Create udev rules file in /etc/udev/rules.d where are rules for each
468 interface used by TG node, based on MAC interface has specific name.
469 So after unbind and bind again to kernel driver interface has same
470 name as before. This must be called after TG has set name for each
471 port in topology dictionary.
473 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
476 :param node: Node to set udev rules on (must be TG node).
478 :raises RuntimeError: If setting of udev rules fails.
483 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
484 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
485 if int(ret_code) != 0:
486 raise RuntimeError("'{0}' failed on '{1}'"
487 .format(cmd, node['host']))
489 for interface in node['interfaces'].values():
490 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
491 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
492 interface['name'] + '\\"'
493 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
494 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
495 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
496 if int(ret_code) != 0:
497 raise RuntimeError("'{0}' failed on '{1}'"
498 .format(cmd, node['host']))
500 cmd = '/etc/init.d/udev restart'
501 ssh.exec_command_sudo(cmd)
504 def tg_set_interfaces_default_driver(node):
505 """Set interfaces default driver specified in topology yaml file.
507 :param node: Node to setup interfaces driver on (must be TG node).
510 for interface in node['interfaces'].values():
511 InterfaceUtil.tg_set_interface_driver(node,
512 interface['pci_address'],
516 def update_vpp_interface_data_on_node(node):
517 """Update vpp generated interface data for a given node in DICT__nodes.
519 Updates interface names, software if index numbers and any other details
520 generated specifically by vpp that are unknown before testcase run.
521 It does this by dumping interface list from all devices using python
522 api, and pairing known information from topology (mac address) to state
525 :param node: Node selected from DICT__nodes.
528 interface_list = InterfaceUtil.vpp_get_interface_data(node)
529 interface_dict = dict()
530 for ifc in interface_list:
531 interface_dict[ifc['l2_address']] = ifc
533 for if_name, if_data in node['interfaces'].items():
534 ifc_dict = interface_dict.get(if_data['mac_address'])
535 if ifc_dict is not None:
536 if_data['name'] = ifc_dict['interface_name']
537 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
538 if_data['mtu'] = ifc_dict['mtu'][0]
539 logger.trace('Interface {ifc} found by MAC {mac}'.format(
540 ifc=if_name, mac=if_data['mac_address']))
542 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
543 ifc=if_name, mac=if_data['mac_address']))
544 if_data['vpp_sw_index'] = None
547 def update_nic_interface_names(node):
548 """Update interface names based on nic type and PCI address.
550 This method updates interface names in the same format as VPP does.
552 :param node: Node dictionary.
555 for ifc in node['interfaces'].values():
556 if_pci = ifc['pci_address'].replace('.', ':').split(':')
557 bus = '{:x}'.format(int(if_pci[1], 16))
558 dev = '{:x}'.format(int(if_pci[2], 16))
559 fun = '{:x}'.format(int(if_pci[3], 16))
560 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
561 if ifc['model'] == 'Intel-XL710':
562 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
563 elif ifc['model'] == 'Intel-X710':
564 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
565 elif ifc['model'] == 'Intel-X520-DA2':
566 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
567 elif ifc['model'] == 'Cisco-VIC-1385':
568 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
569 elif ifc['model'] == 'Cisco-VIC-1227':
570 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
572 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
575 def update_nic_interface_names_on_all_duts(nodes):
576 """Update interface names based on nic type and PCI address on all DUTs.
578 This method updates interface names in the same format as VPP does.
580 :param nodes: Topology nodes.
583 for node in nodes.values():
584 if node['type'] == NodeType.DUT:
585 InterfaceUtil.update_nic_interface_names(node)
588 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
589 """Update interface name for TG/linux node in DICT__nodes.
592 # for dev in `ls /sys/class/net/`;
593 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
594 "52:54:00:9f:82:63": "eth0"
595 "52:54:00:77:ae:a9": "eth1"
596 "52:54:00:e1:8a:0f": "eth2"
597 "00:00:00:00:00:00": "lo"
599 :param node: Node selected from DICT__nodes.
600 :param skip_tg_udev: Skip udev rename on TG node.
602 :type skip_tg_udev: bool
603 :raises RuntimeError: If getting of interface name and MAC fails.
605 # First setup interface driver specified in yaml file
606 InterfaceUtil.tg_set_interfaces_default_driver(node)
608 # Get interface names
612 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
613 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
615 (ret_code, stdout, _) = ssh.exec_command(cmd)
616 if int(ret_code) != 0:
617 raise RuntimeError('Get interface name and MAC failed')
618 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
619 interfaces = JsonParser().parse_data(tmp)
620 for interface in node['interfaces'].values():
621 name = interfaces.get(interface['mac_address'])
624 interface['name'] = name
626 # Set udev rules for interfaces
628 InterfaceUtil.tg_set_interfaces_udev_rules(node)
631 def iface_update_numa_node(node):
632 """For all interfaces from topology file update numa node based on
633 information from the node.
635 :param node: Node from topology.
638 :raises ValueError: If numa node ia less than 0.
639 :raises RuntimeError: If update of numa node failes.
642 for if_key in Topology.get_node_interfaces(node):
643 if_pci = Topology.get_interface_pci_addr(node, if_key)
645 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
647 (ret, out, _) = ssh.exec_command(cmd)
652 if CpuUtils.cpu_node_count(node) == 1:
657 logger.trace('Reading numa location failed for: {0}'
660 Topology.set_interface_numa_node(node, if_key,
664 raise RuntimeError('Update numa node failed for: {0}'
668 def update_all_numa_nodes(nodes, skip_tg=False):
669 """For all nodes and all their interfaces from topology file update numa
670 node information based on information from the node.
672 :param nodes: Nodes in the topology.
673 :param skip_tg: Skip TG node
678 for node in nodes.values():
679 if node['type'] == NodeType.DUT:
680 InterfaceUtil.iface_update_numa_node(node)
681 elif node['type'] == NodeType.TG and not skip_tg:
682 InterfaceUtil.iface_update_numa_node(node)
685 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
688 """Update interface names on all nodes in DICT__nodes.
690 This method updates the topology dictionary by querying interface lists
691 of all nodes mentioned in the topology dictionary.
693 :param nodes: Nodes in the topology.
694 :param skip_tg: Skip TG node.
695 :param skip_tg_udev: Skip udev rename on TG node.
696 :param numa_node: Retrieve numa_node location.
699 :type skip_tg_udev: bool
700 :type numa_node: bool
702 for node_data in nodes.values():
703 if node_data['type'] == NodeType.DUT:
704 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
705 elif node_data['type'] == NodeType.TG and not skip_tg:
706 InterfaceUtil.update_tg_interface_data_on_node(
707 node_data, skip_tg_udev)
710 if node_data['type'] == NodeType.DUT:
711 InterfaceUtil.iface_update_numa_node(node_data)
712 elif node_data['type'] == NodeType.TG and not skip_tg:
713 InterfaceUtil.iface_update_numa_node(node_data)
716 def create_vlan_subinterface(node, interface, vlan):
717 """Create VLAN sub-interface on node.
719 :param node: Node to add VLAN subinterface on.
720 :param interface: Interface name on which create VLAN subinterface.
721 :param vlan: VLAN ID of the subinterface to be created.
725 :returns: Name and index of created subinterface.
727 :raises RuntimeError: if it is unable to create VLAN subinterface on the
730 iface_key = Topology.get_interface_by_name(node, interface)
731 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
733 cmd = 'create_vlan_subif'
734 args = dict(sw_if_index=sw_if_index,
736 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
738 with PapiExecutor(node) as papi_exec:
739 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
740 verify_reply(err_msg=err_msg)
742 sw_if_idx = papi_resp['sw_if_index']
743 if_key = Topology.add_new_port(node, 'vlan_subif')
744 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
745 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
746 Topology.update_interface_name(node, if_key, ifc_name)
748 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_idx
751 def create_vxlan_interface(node, vni, source_ip, destination_ip):
752 """Create VXLAN interface and return sw if index of created interface.
754 :param node: Node where to create VXLAN interface.
755 :param vni: VXLAN Network Identifier.
756 :param source_ip: Source IP of a VXLAN Tunnel End Point.
757 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
761 :type destination_ip: str
762 :returns: SW IF INDEX of created interface.
764 :raises RuntimeError: if it is unable to create VxLAN interface on the
767 src_address = ip_address(unicode(source_ip))
768 dst_address = ip_address(unicode(destination_ip))
770 cmd = 'vxlan_add_del_tunnel'
771 args = dict(is_add=1,
772 is_ipv6=1 if src_address.version == 6 else 0,
773 instance=Constants.BITWISE_NON_ZERO,
774 src_address=src_address.packed,
775 dst_address=dst_address.packed,
776 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
778 decap_next_index=Constants.BITWISE_NON_ZERO,
780 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
781 format(host=node['host'])
782 with PapiExecutor(node) as papi_exec:
783 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
784 verify_reply(err_msg=err_msg)
786 sw_if_idx = papi_resp['sw_if_index']
787 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
788 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
789 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
790 Topology.update_interface_name(node, if_key, ifc_name)
795 def vxlan_dump(node, interface=None):
796 """Get VxLAN data for the given interface.
798 :param node: VPP node to get interface data from.
799 :param interface: Numeric index or name string of a specific interface.
800 If None, information about all VxLAN interfaces is returned.
802 :type interface: int or str
803 :returns: Dictionary containing data for the given VxLAN interface or if
804 interface=None, the list of dictionaries with all VxLAN interfaces.
806 :raises TypeError: if the data type of interface is neither basestring
809 if interface is not None:
810 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
812 sw_if_index = int(Constants.BITWISE_NON_ZERO)
814 cmd = 'vxlan_tunnel_dump'
815 cmd_reply = 'vxlan_tunnel_details'
816 args = dict(sw_if_index=sw_if_index)
817 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
819 with PapiExecutor(node) as papi_exec:
820 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
822 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
824 def process_vxlan_dump(vxlan_dump):
825 """Process vxlan dump.
827 :param vxlan_dump: Vxlan interface dump.
828 :type vxlan_dump: dict
829 :returns: Processed vxlan interface dump.
832 if vxlan_dump['is_ipv6']:
833 vxlan_dump['src_address'] = \
834 inet_ntop(AF_INET6, vxlan_dump['src_address'])
835 vxlan_dump['dst_address'] = \
836 inet_ntop(AF_INET6, vxlan_dump['dst_address'])
838 vxlan_dump['src_address'] = \
839 inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
840 vxlan_dump['dst_address'] = \
841 inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
844 data = list() if interface is None else dict()
845 for item in papi_vxlan_dump:
846 if interface is None:
847 data.append(process_vxlan_dump(item[cmd_reply]))
848 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
849 data = process_vxlan_dump(item[cmd_reply])
852 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
856 def vhost_user_dump(node):
857 """Get vhost-user data for the given node.
859 TODO: Move to VhostUser.py
861 :param node: VPP node to get interface data from.
863 :returns: List of dictionaries with all vhost-user interfaces.
866 cmd = 'sw_interface_vhost_user_dump'
867 cmd_reply = 'sw_interface_vhost_user_details'
868 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
870 with PapiExecutor(node) as papi_exec:
871 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
873 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
875 def process_vhost_dump(vhost_dump):
876 """Process vhost dump.
878 :param vhost_dump: Vhost interface dump.
879 :type vhost_dump: dict
880 :returns: Processed vhost interface dump.
883 vhost_dump['interface_name'] = \
884 vhost_dump['interface_name'].rstrip('\x00')
885 vhost_dump['sock_filename'] = \
886 vhost_dump['sock_filename'].rstrip('\x00')
890 for item in papi_vxlan_dump:
891 data.append(process_vhost_dump(item[cmd_reply]))
893 logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
897 def tap_dump(node, name=None):
898 """Get all TAP interface data from the given node, or data about
899 a specific TAP interface.
903 :param node: VPP node to get data from.
904 :param name: Optional name of a specific TAP interface.
907 :returns: Dictionary of information about a specific TAP interface, or
908 a List of dictionaries containing all TAP data for the given node.
911 cmd = 'sw_interface_tap_v2_dump'
912 cmd_reply = 'sw_interface_tap_v2_details'
913 err_msg = 'Failed to get TAP dump on host {host}'.format(
915 with PapiExecutor(node) as papi_exec:
916 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
918 papi_tap_dump = papi_resp.reply[0]['api_reply']
920 def process_tap_dump(tap_dump):
923 :param tap_dump: Tap interface dump.
925 :returns: Processed tap interface dump.
928 tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
929 tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
930 tap_dump['host_namespace'] = \
931 tap_dump['host_namespace'].rstrip('\x00')
932 tap_dump['host_mac_addr'] = \
933 L2Util.bin_to_mac(tap_dump['host_mac_addr'])
934 tap_dump['host_ip4_addr'] = \
935 inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
936 tap_dump['host_ip6_addr'] = \
937 inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
940 data = list() if name is None else dict()
941 for item in papi_tap_dump:
943 data.append(process_tap_dump(item[cmd_reply]))
944 elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
945 data = process_tap_dump(item[cmd_reply])
948 logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
952 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
953 inner_vlan_id=None, type_subif=None):
954 """Create sub-interface on node. It is possible to set required
955 sub-interface type and VLAN tag(s).
957 :param node: Node to add sub-interface.
958 :param interface: Interface name on which create sub-interface.
959 :param sub_id: ID of the sub-interface to be created.
960 :param outer_vlan_id: Optional outer VLAN ID.
961 :param inner_vlan_id: Optional inner VLAN ID.
962 :param type_subif: Optional type of sub-interface. Values supported by
963 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
966 :type interface: str or int
968 :type outer_vlan_id: int
969 :type inner_vlan_id: int
970 :type type_subif: str
971 :returns: Name and index of created sub-interface.
973 :raises RuntimeError: If it is not possible to create sub-interface.
975 subif_types = type_subif.split()
979 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
981 no_tags=1 if 'no_tags' in subif_types else 0,
982 one_tag=1 if 'one_tag' in subif_types else 0,
983 two_tags=1 if 'two_tags' in subif_types else 0,
984 dot1ad=1 if 'dot1ad' in subif_types else 0,
985 exact_match=1 if 'exact_match' in subif_types else 0,
986 default_sub=1 if 'default_sub' in subif_types else 0,
987 outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
988 inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
989 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
990 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
991 err_msg = 'Failed to create sub-interface on host {host}'.format(
993 with PapiExecutor(node) as papi_exec:
994 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
995 verify_reply(err_msg=err_msg)
997 sw_subif_idx = papi_resp['sw_if_index']
998 if_key = Topology.add_new_port(node, 'subinterface')
999 Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
1000 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
1001 Topology.update_interface_name(node, if_key, ifc_name)
1003 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
1006 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1007 """Create GRE tunnel interface on node.
1009 :param node: VPP node to add tunnel interface.
1010 :param source_ip: Source of the GRE tunnel.
1011 :param destination_ip: Destination of the GRE tunnel.
1013 :type source_ip: str
1014 :type destination_ip: str
1015 :returns: Name and index of created GRE tunnel interface.
1017 :raises RuntimeError: If unable to create GRE tunnel interface.
1019 cmd = 'gre_tunnel_add_del'
1020 tunnel = dict(type=0,
1021 instance=Constants.BITWISE_NON_ZERO,
1023 dst=str(destination_ip),
1026 args = dict(is_add=1,
1028 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1030 with PapiExecutor(node) as papi_exec:
1031 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1032 verify_reply(err_msg=err_msg)
1034 sw_if_idx = papi_resp['sw_if_index']
1035 if_key = Topology.add_new_port(node, 'gre_tunnel')
1036 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1037 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1038 Topology.update_interface_name(node, if_key, ifc_name)
1040 return ifc_name, sw_if_idx
1043 def vpp_create_loopback(node):
1044 """Create loopback interface on VPP node.
1046 :param node: Node to create loopback interface on.
1048 :returns: SW interface index.
1050 :raises RuntimeError: If it is not possible to create loopback on the
1053 cmd = 'create_loopback'
1054 args = dict(mac_address=0)
1055 err_msg = 'Failed to create loopback interface on host {host}'.format(
1057 with PapiExecutor(node) as papi_exec:
1058 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1059 verify_reply(err_msg=err_msg)
1061 sw_if_idx = papi_resp['sw_if_index']
1062 if_key = Topology.add_new_port(node, 'loopback')
1063 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1064 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1065 Topology.update_interface_name(node, if_key, ifc_name)
1070 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1071 """Create bond interface on VPP node.
1073 :param node: DUT node from topology.
1074 :param mode: Link bonding mode.
1075 :param load_balance: Load balance (optional, valid for xor and lacp
1076 modes, otherwise ignored).
1077 :param mac: MAC address to assign to the bond interface (optional).
1080 :type load_balance: str
1082 :returns: Interface key (name) in topology.
1084 :raises RuntimeError: If it is not possible to create bond interface on
1088 args = dict(id=int(Constants.BITWISE_NON_ZERO),
1089 use_custom_mac=0 if mac is None else 1,
1090 mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
1091 mode=getattr(LinkBondMode, '{md}'.format(
1092 md=mode.replace('-', '_').upper())).value,
1093 lb=0 if load_balance is None else getattr(
1094 LinkBondLoadBalance, '{lb}'.format(
1095 lb=load_balance.upper())).value)
1096 err_msg = 'Failed to create bond interface on host {host}'.format(
1098 with PapiExecutor(node) as papi_exec:
1099 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1100 verify_reply(err_msg=err_msg)
1102 sw_if_idx = papi_resp['sw_if_index']
1103 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1105 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1110 def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
1111 """Add ethernet interface to current topology.
1113 :param node: DUT node from topology.
1114 :param ifc_name: Name of the interface.
1115 :param sw_if_idx: SW interface index.
1116 :param ifc_pfx: Interface key prefix.
1119 :type sw_if_idx: int
1122 if_key = Topology.add_new_port(node, ifc_pfx)
1124 if ifc_name and sw_if_idx is None:
1125 sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
1126 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1127 if sw_if_idx and ifc_name is None:
1128 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1129 Topology.update_interface_name(node, if_key, ifc_name)
1130 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
1131 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1134 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1135 """Create AVF interface on VPP node.
1137 :param node: DUT node from topology.
1138 :param vf_pci_addr: Virtual Function PCI address.
1139 :param num_rx_queues: Number of RX queues.
1141 :type vf_pci_addr: str
1142 :type num_rx_queues: int
1143 :returns: Interface key (name) in topology.
1145 :raises RuntimeError: If it is not possible to create AVF interface on
1149 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1151 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1154 err_msg = 'Failed to create AVF interface on host {host}'.format(
1156 with PapiExecutor(node) as papi_exec:
1157 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1158 verify_reply(err_msg=err_msg)
1160 sw_if_idx = papi_resp['sw_if_index']
1161 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1163 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1168 def vpp_enslave_physical_interface(node, interface, bond_if):
1169 """Enslave physical interface to bond interface on VPP node.
1171 :param node: DUT node from topology.
1172 :param interface: Physical interface key from topology file.
1173 :param bond_if: Load balance
1175 :type interface: str
1177 :raises RuntimeError: If it is not possible to enslave physical
1178 interface to bond interface on the node.
1180 cmd = 'bond_enslave'
1182 sw_if_index=Topology.get_interface_sw_index(node, interface),
1183 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1186 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1187 'interface {bond} on host {host}'.format(ifc=interface,
1190 with PapiExecutor(node) as papi_exec:
1191 papi_exec.add(cmd, **args).get_replies(err_msg).\
1192 verify_reply(err_msg=err_msg)
1195 def vpp_show_bond_data_on_node(node, details=False):
1196 """Show (detailed) bond information on VPP node.
1198 :param node: DUT node from topology.
1199 :param details: If detailed information is required or not.
1203 cmd = 'sw_interface_bond_dump'
1204 cmd_reply = 'sw_interface_bond_details'
1205 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1208 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1209 with PapiExecutor(node) as papi_exec:
1210 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
1212 papi_dump = papi_resp.reply[0]['api_reply']
1213 for item in papi_dump:
1214 data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
1216 data += (' mode: {m}\n'.
1217 format(m=LinkBondMode(item[cmd_reply]['mode']).name.
1219 data += (' load balance: {lb}\n'.
1220 format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
1222 data += (' number of active slaves: {n}\n'.
1223 format(n=item[cmd_reply]['active_slaves']))
1225 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1226 node, Topology.get_interface_by_sw_index(
1227 node, item[cmd_reply]['sw_if_index']))
1228 for slave in slave_data:
1229 if not slave['is_passive']:
1230 data += (' {s}\n'.format(s=slave['interface_name']))
1231 data += (' number of slaves: {n}\n'.
1232 format(n=item[cmd_reply]['slaves']))
1234 for slave in slave_data:
1235 data += (' {s}\n'.format(s=slave['interface_name']))
1236 data += (' interface id: {i}\n'.
1237 format(i=item[cmd_reply]['id']))
1238 data += (' sw_if_index: {i}\n'.
1239 format(i=item[cmd_reply]['sw_if_index']))
1243 def vpp_bond_slave_dump(node, interface):
1244 """Get bond interface slave(s) data on VPP node.
1246 :param node: DUT node from topology.
1247 :param interface: Physical interface key from topology file.
1249 :type interface: str
1250 :returns: Bond slave interface data.
1253 cmd = 'sw_interface_slave_dump'
1254 cmd_reply = 'sw_interface_slave_details'
1255 args = dict(sw_if_index=Topology.get_interface_sw_index(
1257 err_msg = 'Failed to get slave dump on host {host}'.format(
1260 with PapiExecutor(node) as papi_exec:
1261 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1263 papi_dump = papi_resp.reply[0]['api_reply']
1265 def process_slave_dump(slave_dump):
1266 """Process slave dump.
1268 :param slave_dump: Slave interface dump.
1269 :type slave_dump: dict
1270 :returns: Processed slave interface dump.
1273 slave_dump['interface_name'] = slave_dump['interface_name'].\
1278 for item in papi_dump:
1279 data.append(process_slave_dump(item[cmd_reply]))
1281 logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
1285 def vpp_show_bond_data_on_all_nodes(nodes, details=False):
1286 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1288 :param nodes: Nodes in the topology.
1289 :param details: If detailed information is required or not.
1293 for node_data in nodes.values():
1294 if node_data['type'] == NodeType.DUT:
1295 InterfaceUtil.vpp_show_bond_data_on_node(node_data, details)
1298 def vpp_enable_input_acl_interface(node, interface, ip_version,
1300 """Enable input acl on interface.
1302 :param node: VPP node to setup interface for input acl.
1303 :param interface: Interface to setup input acl.
1304 :param ip_version: Version of IP protocol.
1305 :param table_index: Classify table index.
1307 :type interface: str or int
1308 :type ip_version: str
1309 :type table_index: int
1311 cmd = 'input_acl_set_interface'
1313 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1314 ip4_table_index=table_index if ip_version == 'ip4'
1315 else Constants.BITWISE_NON_ZERO,
1316 ip6_table_index=table_index if ip_version == 'ip6'
1317 else Constants.BITWISE_NON_ZERO,
1318 l2_table_index=table_index if ip_version == 'l2'
1319 else Constants.BITWISE_NON_ZERO,
1321 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1323 with PapiExecutor(node) as papi_exec:
1324 papi_exec.add(cmd, **args).get_replies(err_msg).\
1325 verify_reply(err_msg=err_msg)
1328 def get_interface_classify_table(node, interface):
1329 """Get name of classify table for the given interface.
1331 TODO: Move to Classify.py.
1333 :param node: VPP node to get data from.
1334 :param interface: Name or sw_if_index of a specific interface.
1336 :type interface: str or int
1337 :returns: Classify table name.
1340 if isinstance(interface, basestring):
1341 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1343 sw_if_index = interface
1345 cmd = 'classify_table_by_interface'
1346 args = dict(sw_if_index=sw_if_index)
1347 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1349 with PapiExecutor(node) as papi_exec:
1350 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
1351 verify_reply(err_msg=err_msg)
1356 def get_sw_if_index(node, interface_name):
1357 """Get sw_if_index for the given interface from actual interface dump.
1359 :param node: VPP node to get interface data from.
1360 :param interface_name: Name of the specific interface.
1362 :type interface_name: str
1363 :returns: sw_if_index of the given interface.
1366 interface_data = InterfaceUtil.vpp_get_interface_data(
1367 node, interface=interface_name)
1368 return interface_data.get('sw_if_index')
1371 def vxlan_gpe_dump(node, interface_name=None):
1372 """Get VxLAN GPE data for the given interface.
1374 :param node: VPP node to get interface data from.
1375 :param interface_name: Name of the specific interface. If None,
1376 information about all VxLAN GPE interfaces is returned.
1378 :type interface_name: str
1379 :returns: Dictionary containing data for the given VxLAN GPE interface
1380 or if interface=None, the list of dictionaries with all VxLAN GPE
1382 :rtype: dict or list
1384 if interface_name is not None:
1385 sw_if_index = InterfaceUtil.get_interface_index(
1386 node, interface_name)
1388 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1390 cmd = 'vxlan_gpe_tunnel_dump'
1391 cmd_reply = 'vxlan_gpe_tunnel_details'
1392 args = dict(sw_if_index=sw_if_index)
1393 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1395 with PapiExecutor(node) as papi_exec:
1396 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1398 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
1400 def process_vxlan_gpe_dump(vxlan_dump):
1401 """Process vxlan_gpe dump.
1403 :param vxlan_dump: Vxlan_gpe nterface dump.
1404 :type vxlan_dump: dict
1405 :returns: Processed vxlan_gpe interface dump.
1408 if vxlan_dump['is_ipv6']:
1409 vxlan_dump['local'] = \
1410 inet_ntop(AF_INET6, vxlan_dump['local'])
1411 vxlan_dump['remote'] = \
1412 inet_ntop(AF_INET6, vxlan_dump['remote'])
1414 vxlan_dump['local'] = \
1415 inet_ntop(AF_INET, vxlan_dump['local'][0:4])
1416 vxlan_dump['remote'] = \
1417 inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
1420 data = list() if interface_name is None else dict()
1421 for item in papi_vxlan_dump:
1422 if interface_name is None:
1423 data.append(process_vxlan_gpe_dump(item[cmd_reply]))
1424 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
1425 data = process_vxlan_gpe_dump(item[cmd_reply])
1428 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1429 vxlan_gpe_data=data))
1433 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1434 """Assign VPP interface to specific VRF/FIB table.
1436 :param node: VPP node where the FIB and interface are located.
1437 :param interface: Interface to be assigned to FIB.
1438 :param table_id: VRF table ID.
1439 :param ipv6: Assign to IPv6 table. Default False.
1441 :type interface: str or int
1445 cmd = 'sw_interface_set_table'
1447 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1448 is_ipv6=1 if ipv6 else 0,
1449 vrf_id=int(table_id))
1450 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1452 with PapiExecutor(node) as papi_exec:
1453 papi_exec.add(cmd, **args).get_replies(err_msg). \
1454 verify_reply(err_msg=err_msg)
1457 def set_linux_interface_mac(node, interface, mac, namespace=None,
1459 """Set MAC address for interface in linux.
1461 :param node: Node where to execute command.
1462 :param interface: Interface in namespace.
1463 :param mac: MAC to be assigned to interface.
1464 :param namespace: Execute command in namespace. Optional
1465 :param vf_id: Virtual Function id. Optional
1467 :type interface: str
1469 :type namespace: str
1472 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1473 if vf_id is not None else 'address {mac}'.format(mac=mac)
1474 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1476 cmd = ('{ns} ip link set {interface} {mac}'.
1477 format(ns=ns_str, interface=interface, mac=mac_str))
1478 exec_cmd_no_error(node, cmd, sudo=True)
1481 def set_linux_interface_trust_on(node, interface, namespace=None,
1483 """Set trust on (promisc) for interface in linux.
1485 :param node: Node where to execute command.
1486 :param interface: Interface in namespace.
1487 :param namespace: Execute command in namespace. Optional
1488 :param vf_id: Virtual Function id. Optional
1490 :type interface: str
1491 :type namespace: str
1494 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1495 if vf_id is not None else 'trust on'
1496 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1498 cmd = ('{ns} ip link set dev {interface} {trust}'.
1499 format(ns=ns_str, interface=interface, trust=trust_str))
1500 exec_cmd_no_error(node, cmd, sudo=True)
1503 def set_linux_interface_spoof_off(node, interface, namespace=None,
1505 """Set spoof off for interface in linux.
1507 :param node: Node where to execute command.
1508 :param interface: Interface in namespace.
1509 :param namespace: Execute command in namespace. Optional
1510 :param vf_id: Virtual Function id. Optional
1512 :type interface: str
1513 :type namespace: str
1516 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1517 if vf_id is not None else 'spoof off'
1518 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1520 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1521 format(ns=ns_str, interface=interface, spoof=spoof_str))
1522 exec_cmd_no_error(node, cmd, sudo=True)
1525 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1526 """Init PCI device by creating VFs and bind them to vfio-pci for AVF
1527 driver testing on DUT.
1529 :param node: DUT node.
1530 :param ifc_key: Interface key from topology file.
1531 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
1532 :param osi_layer: OSI Layer type to initialize TG with.
1533 Default value "L2" sets linux interface spoof off.
1537 :type osi_layer: str
1538 :returns: Virtual Function topology interface keys.
1540 :raises RuntimeError: If a reason preventing initialization is found.
1542 # Read PCI address and driver.
1543 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1544 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1545 uio_driver = Topology.get_uio_driver(node)
1546 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1547 if kernel_driver != "i40e":
1549 "AVF needs i40e driver, not {driver} at node {host} ifc {ifc}"\
1550 .format(driver=kernel_driver, host=node["host"], ifc=ifc_key))
1551 current_driver = DUTSetup.get_pci_dev_driver(
1552 node, pf_pci_addr.replace(':', r'\:'))
1554 VPPUtil.stop_vpp_service(node)
1555 if current_driver != kernel_driver:
1556 # PCI device must be re-bound to kernel driver before creating VFs.
1557 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1558 # Stop VPP to prevent deadlock.
1559 # Unbind from current driver.
1560 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1561 # Bind to kernel driver.
1562 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1564 # Initialize PCI VFs
1565 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1568 # Set MAC address and bind each virtual function to uio driver.
1569 for vf_id in range(numvfs):
1570 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1571 pf_mac_addr[3], pf_mac_addr[4],
1572 pf_mac_addr[5], "{:02x}".format(vf_id)])
1574 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1575 format(pci=pf_pci_addr)
1576 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1578 if osi_layer == 'L2':
1579 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1581 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1584 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1585 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1587 # Add newly created ports into topology file
1588 vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
1589 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1590 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1591 Topology.update_interface_name(node, vf_ifc_key,
1592 vf_ifc_name+str(vf_id+1))
1593 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1594 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1595 vf_ifc_keys.append(vf_ifc_key)
1600 def vpp_sw_interface_rx_placement_dump(node):
1601 """Dump VPP interface RX placement on node.
1603 :param node: Node to run command on.
1605 :returns: Thread mapping information as a list of dictionaries.
1608 cmd = 'sw_interface_rx_placement_dump'
1609 cmd_reply = 'sw_interface_rx_placement_details'
1610 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
1611 cmd=cmd, host=node['host'])
1612 with PapiExecutor(node) as papi_exec:
1613 for ifc in node['interfaces'].values():
1614 if ifc['vpp_sw_index'] is not None:
1615 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
1616 papi_resp = papi_exec.get_dump(err_msg)
1617 thr_mapping = [s[cmd_reply] for r in papi_resp.reply
1618 for s in r['api_reply']]
1619 return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
1622 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
1624 """Set interface RX placement to worker on node.
1626 :param node: Node to run command on.
1627 :param sw_if_index: VPP SW interface index.
1628 :param queue_id: VPP interface queue ID.
1629 :param worker_id: VPP worker ID (indexing from 0).
1631 :type sw_if_index: int
1633 :type worker_id: int
1634 :raises RuntimeError: If failed to run command on host or if no API
1637 cmd = 'sw_interface_set_rx_placement'
1638 err_msg = "Failed to set interface RX placement to worker on host " \
1639 "{host}!".format(host=node['host'])
1640 args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
1641 worker_id=worker_id)
1642 with PapiExecutor(node) as papi_exec:
1643 papi_exec.add(cmd, **args).get_replies(err_msg).\
1644 verify_reply(err_msg=err_msg)
1647 def vpp_round_robin_rx_placement(node, prefix):
1648 """Set Round Robin interface RX placement on all worker threads
1651 :param node: Topology nodes.
1652 :param prefix: Interface name prefix.
1657 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1660 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1661 for interface in node['interfaces'].values():
1662 if placement['sw_if_index'] == interface['vpp_sw_index'] \
1663 and prefix in interface['name']:
1664 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1665 node, placement['sw_if_index'], placement['queue_id'],
1666 worker_id % worker_cnt)
1670 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1671 """Set Round Robin interface RX placement on all worker threads
1674 :param nodes: Topology nodes.
1675 :param prefix: Interface name prefix.
1679 for node in nodes.values():
1680 if node['type'] == NodeType.DUT:
1681 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)