1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from socket import AF_INET, AF_INET6, inet_ntop, inet_pton
17 from time import sleep
19 from enum import IntEnum
20 from ipaddress import IPv4Address, IPv6Address
21 from ipaddress import AddressValueError, NetmaskValueError
22 from robot.api import logger
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.DUTSetup import DUTSetup
27 from resources.libraries.python.L2Util import L2Util
28 from resources.libraries.python.PapiExecutor import PapiExecutor
29 from resources.libraries.python.parsers.JsonParser import JsonParser
30 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
31 from resources.libraries.python.topology import NodeType, Topology
32 from resources.libraries.python.VPPUtil import VPPUtil
35 class LinkBondLoadBalance(IntEnum):
36 """Link bonding load balance."""
42 class LinkBondMode(IntEnum):
43 """Link bonding load balance."""
51 class InterfaceUtil(object):
52 """General utilities for managing interfaces"""
54 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
57 def pci_to_int(pci_str):
58 """Convert PCI address from string format (0000:18:0a.0) to
59 integer representation (169345024).
61 :param pci_str: PCI address in string representation.
63 :returns: Integer representation of PCI address.
66 pci = list(pci_str.split(':')[0:2])
67 pci.extend(pci_str.split(':')[2].split('.'))
69 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
70 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
73 def get_interface_index(node, interface):
74 """Get interface sw_if_index from topology file.
76 :param node: Node where the interface is.
77 :param interface: Numeric index or name string of a specific interface.
79 :type interface: str or int
80 :returns: SW interface index.
84 sw_if_index = int(interface)
86 sw_if_index = Topology.get_interface_sw_index(node, interface)
87 if sw_if_index is None:
89 Topology.get_interface_sw_index_by_name(node, interface)
90 except TypeError as err:
91 raise TypeError('Wrong interface format {ifc}: {err}'.format(
92 ifc=interface, err=err.message))
97 def set_interface_state(node, interface, state, if_type='key'):
98 """Set interface state on a node.
100 Function can be used for DUTs as well as for TGs.
102 :param node: Node where the interface is.
103 :param interface: Interface key or sw_if_index or name.
104 :param state: One of 'up' or 'down'.
105 :param if_type: Interface type
107 :type interface: str or int
111 :raises ValueError: If the interface type is unknown.
112 :raises ValueError: If the state of interface is unexpected.
113 :raises ValueError: If the node has an unknown node type.
116 if isinstance(interface, basestring):
117 sw_if_index = Topology.get_interface_sw_index(node, interface)
118 iface_name = Topology.get_interface_name(node, interface)
120 sw_if_index = interface
121 elif if_type == 'name':
122 iface_key = Topology.get_interface_by_name(node, interface)
123 if iface_key is not None:
124 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
125 iface_name = interface
127 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
129 if node['type'] == NodeType.DUT:
132 elif state == 'down':
135 raise ValueError('Unexpected interface state: {state}'.format(
137 cmd = 'sw_interface_set_flags'
138 err_msg = 'Failed to set interface state on host {host}'.format(
140 args = dict(sw_if_index=sw_if_index,
141 admin_up_down=admin_up_down)
142 with PapiExecutor(node) as papi_exec:
143 papi_exec.add(cmd, **args).get_replies(err_msg).\
144 verify_reply(err_msg=err_msg)
145 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
146 cmd = 'ip link set {ifc} {state}'.format(
147 ifc=iface_name, state=state)
148 exec_cmd_no_error(node, cmd, sudo=True)
150 raise ValueError('Node {} has unknown NodeType: "{}"'
151 .format(node['host'], node['type']))
154 def set_interface_ethernet_mtu(node, iface_key, mtu):
155 """Set Ethernet MTU for specified interface.
157 Function can be used only for TGs.
159 :param node: Node where the interface is.
160 :param iface_key: Interface key from topology file.
161 :param mtu: MTU to set.
166 :raises ValueError: If the node type is "DUT".
167 :raises ValueError: If the node has an unknown node type.
169 if node['type'] == NodeType.DUT:
170 raise ValueError('Node {}: Setting Ethernet MTU for interface '
171 'on DUT nodes not supported', node['host'])
172 elif node['type'] == NodeType.TG:
173 iface_name = Topology.get_interface_name(node, iface_key)
174 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
175 exec_cmd_no_error(node, cmd, sudo=True)
177 raise ValueError('Node {} has unknown NodeType: "{}"'
178 .format(node['host'], node['type']))
181 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
182 """Set default Ethernet MTU on all interfaces on node.
184 Function can be used only for TGs.
186 :param node: Node where to set default MTU.
190 for ifc in node['interfaces']:
191 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
194 def vpp_set_interface_mtu(node, interface, mtu=9200):
195 """Set Ethernet MTU on interface.
197 :param node: VPP node.
198 :param interface: Interface to setup MTU. Default: 9200.
199 :param mtu: Ethernet MTU size in Bytes.
201 :type interface: str or int
204 if isinstance(interface, basestring):
205 sw_if_index = Topology.get_interface_sw_index(node, interface)
207 sw_if_index = interface
209 cmd = 'hw_interface_set_mtu'
210 err_msg = 'Failed to set interface MTU on host {host}'.format(
212 args = dict(sw_if_index=sw_if_index,
214 with PapiExecutor(node) as papi_exec:
215 papi_exec.add(cmd, **args).get_replies(err_msg).\
216 verify_reply(err_msg=err_msg)
219 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
220 """Set Ethernet MTU on all interfaces.
222 :param node: VPP node.
223 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
227 for interface in node['interfaces']:
228 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
231 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
232 """Set Ethernet MTU on all interfaces on all DUTs.
234 :param nodes: VPP nodes.
235 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
239 for node in nodes.values():
240 if node['type'] == NodeType.DUT:
241 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
244 def vpp_node_interfaces_ready_wait(node, retries=15):
245 """Wait until all interfaces with admin-up are in link-up state.
247 :param node: Node to wait on.
248 :param retries: Number of retries to check interface status (optional,
253 :raises RuntimeError: If any interface is not in link-up state after
254 defined number of retries.
256 for _ in xrange(0, retries):
258 out = InterfaceUtil.vpp_get_interface_data(node)
259 for interface in out:
260 if interface.get('admin_up_down') == 1:
261 if interface.get('link_up_down') != 1:
262 not_ready.append(interface.get('interface_name'))
266 logger.debug('Interfaces still in link-down state:\n{ifs} '
267 '\nWaiting...'.format(ifs=not_ready))
270 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
271 if 'not_ready' in locals() else 'No check executed!'
272 raise RuntimeError(err)
275 def all_vpp_interfaces_ready_wait(nodes, retries=15):
276 """Wait until all interfaces with admin-up are in link-up state for all
277 nodes in the topology.
279 :param nodes: Nodes in the topology.
280 :param retries: Number of retries to check interface status (optional,
286 for node in nodes.values():
287 if node['type'] == NodeType.DUT:
288 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
291 def vpp_get_interface_data(node, interface=None):
292 """Get all interface data from a VPP node. If a name or
293 sw_interface_index is provided, return only data for the matching
296 :param node: VPP node to get interface data from.
297 :param interface: Numeric index or name string of a specific interface.
299 :type interface: int or str
300 :returns: List of dictionaries containing data for each interface, or a
301 single dictionary for the specified interface.
303 :raises TypeError: if the data type of interface is neither basestring
306 if interface is not None:
307 if isinstance(interface, basestring):
308 param = 'interface_name'
309 elif isinstance(interface, int):
310 param = 'sw_if_index'
312 raise TypeError('Wrong interface format {ifc}'.format(
317 cmd = 'sw_interface_dump'
318 cmd_reply = 'sw_interface_details'
319 args = dict(name_filter_valid=0,
321 err_msg = 'Failed to get interface dump on host {host}'.format(
323 with PapiExecutor(node) as papi_exec:
324 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
326 papi_if_dump = papi_resp.reply[0]['api_reply']
328 def process_if_dump(if_dump):
329 """Process interface dump.
331 :param if_dump: Interface dump.
333 :returns: Processed interface dump.
336 if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
337 if_dump['tag'] = if_dump['tag'].rstrip('\x00')
338 if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
339 if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
340 if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
343 data = list() if interface is None else dict()
344 for item in papi_if_dump:
345 if interface is None:
346 data.append(process_if_dump(item[cmd_reply]))
347 elif str(item[cmd_reply].get(param)).rstrip('\x00') == \
349 data = process_if_dump(item[cmd_reply])
352 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
356 def vpp_get_interface_name(node, sw_if_index):
357 """Get interface name for the given SW interface index from actual
360 :param node: VPP node to get interface data from.
361 :param sw_if_index: SW interface index of the specific interface.
363 :type sw_if_index: int
364 :returns: Name of the given interface.
367 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
368 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
369 if_data = InterfaceUtil.vpp_get_interface_data(
370 node, if_data['sup_sw_if_index'])
372 return if_data.get('interface_name')
375 def vpp_get_interface_sw_index(node, interface_name):
376 """Get interface name for the given SW interface index from actual
379 :param node: VPP node to get interface data from.
380 :param interface_name: Interface name.
382 :type interface_name: str
383 :returns: Name of the given interface.
386 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
388 return if_data.get('sw_if_index')
391 def vpp_get_interface_mac(node, interface):
392 """Get MAC address for the given interface from actual interface dump.
394 :param node: VPP node to get interface data from.
395 :param interface: Numeric index or name string of a specific interface.
397 :type interface: int or str
398 :returns: MAC address.
401 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
402 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
403 if_data = InterfaceUtil.vpp_get_interface_data(
404 node, if_data['sup_sw_if_index'])
406 return if_data.get('l2_address')
409 def tg_set_interface_driver(node, pci_addr, driver):
410 """Set interface driver on the TG node.
412 :param node: Node to set interface driver on (must be TG node).
413 :param pci_addr: PCI address of the interface.
414 :param driver: Driver name.
418 :raises RuntimeError: If unbinding from the current driver fails.
419 :raises RuntimeError: If binding to the new driver fails.
421 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
422 if old_driver == driver:
428 # Unbind from current driver
429 if old_driver is not None:
430 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
431 .format(pci_addr, old_driver)
432 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
433 if int(ret_code) != 0:
434 raise RuntimeError("'{0}' failed on '{1}'"
435 .format(cmd, node['host']))
437 # Bind to the new driver
438 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
439 .format(pci_addr, driver)
440 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
441 if int(ret_code) != 0:
442 raise RuntimeError("'{0}' failed on '{1}'"
443 .format(cmd, node['host']))
446 def tg_get_interface_driver(node, pci_addr):
447 """Get interface driver from the TG node.
449 :param node: Node to get interface driver on (must be TG node).
450 :param pci_addr: PCI address of the interface.
453 :returns: Interface driver or None if not found.
455 :raises RuntimeError: If PCI rescan or lspci command execution failed.
457 return DUTSetup.get_pci_dev_driver(node, pci_addr)
460 def tg_set_interfaces_udev_rules(node):
461 """Set udev rules for interfaces.
463 Create udev rules file in /etc/udev/rules.d where are rules for each
464 interface used by TG node, based on MAC interface has specific name.
465 So after unbind and bind again to kernel driver interface has same
466 name as before. This must be called after TG has set name for each
467 port in topology dictionary.
469 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
472 :param node: Node to set udev rules on (must be TG node).
474 :raises RuntimeError: If setting of udev rules fails.
479 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
480 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
481 if int(ret_code) != 0:
482 raise RuntimeError("'{0}' failed on '{1}'"
483 .format(cmd, node['host']))
485 for interface in node['interfaces'].values():
486 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
487 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
488 interface['name'] + '\\"'
489 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
490 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
491 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
492 if int(ret_code) != 0:
493 raise RuntimeError("'{0}' failed on '{1}'"
494 .format(cmd, node['host']))
496 cmd = '/etc/init.d/udev restart'
497 ssh.exec_command_sudo(cmd)
500 def tg_set_interfaces_default_driver(node):
501 """Set interfaces default driver specified in topology yaml file.
503 :param node: Node to setup interfaces driver on (must be TG node).
506 for interface in node['interfaces'].values():
507 InterfaceUtil.tg_set_interface_driver(node,
508 interface['pci_address'],
512 def update_vpp_interface_data_on_node(node):
513 """Update vpp generated interface data for a given node in DICT__nodes.
515 Updates interface names, software if index numbers and any other details
516 generated specifically by vpp that are unknown before testcase run.
517 It does this by dumping interface list from all devices using python
518 api, and pairing known information from topology (mac address) to state
521 :param node: Node selected from DICT__nodes.
524 interface_list = InterfaceUtil.vpp_get_interface_data(node)
525 interface_dict = dict()
526 for ifc in interface_list:
527 interface_dict[ifc['l2_address']] = ifc
529 for if_name, if_data in node['interfaces'].items():
530 ifc_dict = interface_dict.get(if_data['mac_address'])
531 if ifc_dict is not None:
532 if_data['name'] = ifc_dict['interface_name']
533 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
534 if_data['mtu'] = ifc_dict['mtu'][0]
535 logger.trace('Interface {ifc} found by MAC {mac}'.format(
536 ifc=if_name, mac=if_data['mac_address']))
538 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
539 ifc=if_name, mac=if_data['mac_address']))
540 if_data['vpp_sw_index'] = None
543 def update_nic_interface_names(node):
544 """Update interface names based on nic type and PCI address.
546 This method updates interface names in the same format as VPP does.
548 :param node: Node dictionary.
551 for ifc in node['interfaces'].values():
552 if_pci = ifc['pci_address'].replace('.', ':').split(':')
553 bus = '{:x}'.format(int(if_pci[1], 16))
554 dev = '{:x}'.format(int(if_pci[2], 16))
555 fun = '{:x}'.format(int(if_pci[3], 16))
556 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
557 if ifc['model'] == 'Intel-XL710':
558 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
559 elif ifc['model'] == 'Intel-X710':
560 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
561 elif ifc['model'] == 'Intel-X520-DA2':
562 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
563 elif ifc['model'] == 'Cisco-VIC-1385':
564 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
565 elif ifc['model'] == 'Cisco-VIC-1227':
566 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
568 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
571 def update_nic_interface_names_on_all_duts(nodes):
572 """Update interface names based on nic type and PCI address on all DUTs.
574 This method updates interface names in the same format as VPP does.
576 :param nodes: Topology nodes.
579 for node in nodes.values():
580 if node['type'] == NodeType.DUT:
581 InterfaceUtil.update_nic_interface_names(node)
584 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
585 """Update interface name for TG/linux node in DICT__nodes.
588 # for dev in `ls /sys/class/net/`;
589 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
590 "52:54:00:9f:82:63": "eth0"
591 "52:54:00:77:ae:a9": "eth1"
592 "52:54:00:e1:8a:0f": "eth2"
593 "00:00:00:00:00:00": "lo"
595 :param node: Node selected from DICT__nodes.
596 :param skip_tg_udev: Skip udev rename on TG node.
598 :type skip_tg_udev: bool
599 :raises RuntimeError: If getting of interface name and MAC fails.
601 # First setup interface driver specified in yaml file
602 InterfaceUtil.tg_set_interfaces_default_driver(node)
604 # Get interface names
608 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
609 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
611 (ret_code, stdout, _) = ssh.exec_command(cmd)
612 if int(ret_code) != 0:
613 raise RuntimeError('Get interface name and MAC failed')
614 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
615 interfaces = JsonParser().parse_data(tmp)
616 for interface in node['interfaces'].values():
617 name = interfaces.get(interface['mac_address'])
620 interface['name'] = name
622 # Set udev rules for interfaces
624 InterfaceUtil.tg_set_interfaces_udev_rules(node)
627 def iface_update_numa_node(node):
628 """For all interfaces from topology file update numa node based on
629 information from the node.
631 :param node: Node from topology.
634 :raises ValueError: If numa node ia less than 0.
635 :raises RuntimeError: If update of numa node failes.
638 for if_key in Topology.get_node_interfaces(node):
639 if_pci = Topology.get_interface_pci_addr(node, if_key)
641 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
643 (ret, out, _) = ssh.exec_command(cmd)
648 if CpuUtils.cpu_node_count(node) == 1:
653 logger.trace('Reading numa location failed for: {0}'
656 Topology.set_interface_numa_node(node, if_key,
660 raise RuntimeError('Update numa node failed for: {0}'
664 def update_all_numa_nodes(nodes, skip_tg=False):
665 """For all nodes and all their interfaces from topology file update numa
666 node information based on information from the node.
668 :param nodes: Nodes in the topology.
669 :param skip_tg: Skip TG node
674 for node in nodes.values():
675 if node['type'] == NodeType.DUT:
676 InterfaceUtil.iface_update_numa_node(node)
677 elif node['type'] == NodeType.TG and not skip_tg:
678 InterfaceUtil.iface_update_numa_node(node)
681 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
684 """Update interface names on all nodes in DICT__nodes.
686 This method updates the topology dictionary by querying interface lists
687 of all nodes mentioned in the topology dictionary.
689 :param nodes: Nodes in the topology.
690 :param skip_tg: Skip TG node.
691 :param skip_tg_udev: Skip udev rename on TG node.
692 :param numa_node: Retrieve numa_node location.
695 :type skip_tg_udev: bool
696 :type numa_node: bool
698 for node_data in nodes.values():
699 if node_data['type'] == NodeType.DUT:
700 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
701 elif node_data['type'] == NodeType.TG and not skip_tg:
702 InterfaceUtil.update_tg_interface_data_on_node(
703 node_data, skip_tg_udev)
706 if node_data['type'] == NodeType.DUT:
707 InterfaceUtil.iface_update_numa_node(node_data)
708 elif node_data['type'] == NodeType.TG and not skip_tg:
709 InterfaceUtil.iface_update_numa_node(node_data)
712 def create_vlan_subinterface(node, interface, vlan):
713 """Create VLAN sub-interface on node.
715 :param node: Node to add VLAN subinterface on.
716 :param interface: Interface name on which create VLAN subinterface.
717 :param vlan: VLAN ID of the subinterface to be created.
721 :returns: Name and index of created subinterface.
723 :raises RuntimeError: if it is unable to create VLAN subinterface on the
726 iface_key = Topology.get_interface_by_name(node, interface)
727 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
729 cmd = 'create_vlan_subif'
730 args = dict(sw_if_index=sw_if_index,
732 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
734 with PapiExecutor(node) as papi_exec:
735 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
736 verify_reply(err_msg=err_msg)
738 sw_if_idx = papi_resp['sw_if_index']
739 if_key = Topology.add_new_port(node, 'vlan_subif')
740 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
741 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
742 Topology.update_interface_name(node, if_key, ifc_name)
744 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_idx
747 def create_vxlan_interface(node, vni, source_ip, destination_ip):
748 """Create VXLAN interface and return sw if index of created interface.
750 :param node: Node where to create VXLAN interface.
751 :param vni: VXLAN Network Identifier.
752 :param source_ip: Source IP of a VXLAN Tunnel End Point.
753 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
757 :type destination_ip: str
758 :returns: SW IF INDEX of created interface.
760 :raises RuntimeError: if it is unable to create VxLAN interface on the
764 src_address = IPv6Address(unicode(source_ip))
765 dst_address = IPv6Address(unicode(destination_ip))
768 except (AddressValueError, NetmaskValueError):
769 src_address = IPv4Address(unicode(source_ip))
770 dst_address = IPv4Address(unicode(destination_ip))
774 cmd = 'vxlan_add_del_tunnel'
775 args = dict(is_add=1,
777 instance=Constants.BITWISE_NON_ZERO,
778 src_address=inet_pton(af_inet, str(src_address)),
779 dst_address=inet_pton(af_inet, str(dst_address)),
780 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
782 decap_next_index=Constants.BITWISE_NON_ZERO,
784 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
785 format(host=node['host'])
786 with PapiExecutor(node) as papi_exec:
787 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
788 verify_reply(err_msg=err_msg)
790 sw_if_idx = papi_resp['sw_if_index']
791 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
792 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
793 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
794 Topology.update_interface_name(node, if_key, ifc_name)
799 def vxlan_dump(node, interface=None):
800 """Get VxLAN data for the given interface.
802 :param node: VPP node to get interface data from.
803 :param interface: Numeric index or name string of a specific interface.
804 If None, information about all VxLAN interfaces is returned.
806 :type interface: int or str
807 :returns: Dictionary containing data for the given VxLAN interface or if
808 interface=None, the list of dictionaries with all VxLAN interfaces.
810 :raises TypeError: if the data type of interface is neither basestring
813 if interface is not None:
814 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
816 sw_if_index = int(Constants.BITWISE_NON_ZERO)
818 cmd = 'vxlan_tunnel_dump'
819 cmd_reply = 'vxlan_tunnel_details'
820 args = dict(sw_if_index=sw_if_index)
821 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
823 with PapiExecutor(node) as papi_exec:
824 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
826 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
828 def process_vxlan_dump(vxlan_dump):
829 """Process vxlan dump.
831 :param vxlan_dump: Vxlan interface dump.
832 :type vxlan_dump: dict
833 :returns: Processed vxlan interface dump.
836 if vxlan_dump['is_ipv6']:
837 vxlan_dump['src_address'] = \
838 inet_ntop(AF_INET6, vxlan_dump['src_address'])
839 vxlan_dump['dst_address'] = \
840 inet_ntop(AF_INET6, vxlan_dump['dst_address'])
842 vxlan_dump['src_address'] = \
843 inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
844 vxlan_dump['dst_address'] = \
845 inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
848 data = list() if interface is None else dict()
849 for item in papi_vxlan_dump:
850 if interface is None:
851 data.append(process_vxlan_dump(item[cmd_reply]))
852 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
853 data = process_vxlan_dump(item[cmd_reply])
856 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
860 def vhost_user_dump(node):
861 """Get vhost-user data for the given node.
863 TODO: Move to VhostUser.py
865 :param node: VPP node to get interface data from.
867 :returns: List of dictionaries with all vhost-user interfaces.
870 cmd = 'sw_interface_vhost_user_dump'
871 cmd_reply = 'sw_interface_vhost_user_details'
872 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
874 with PapiExecutor(node) as papi_exec:
875 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
877 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
879 def process_vhost_dump(vhost_dump):
880 """Process vhost dump.
882 :param vhost_dump: Vhost interface dump.
883 :type vhost_dump: dict
884 :returns: Processed vhost interface dump.
887 vhost_dump['interface_name'] = \
888 vhost_dump['interface_name'].rstrip('\x00')
889 vhost_dump['sock_filename'] = \
890 vhost_dump['sock_filename'].rstrip('\x00')
894 for item in papi_vxlan_dump:
895 data.append(process_vhost_dump(item[cmd_reply]))
897 logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
901 def tap_dump(node, name=None):
902 """Get all TAP interface data from the given node, or data about
903 a specific TAP interface.
907 :param node: VPP node to get data from.
908 :param name: Optional name of a specific TAP interface.
911 :returns: Dictionary of information about a specific TAP interface, or
912 a List of dictionaries containing all TAP data for the given node.
915 cmd = 'sw_interface_tap_v2_dump'
916 cmd_reply = 'sw_interface_tap_v2_details'
917 err_msg = 'Failed to get TAP dump on host {host}'.format(
919 with PapiExecutor(node) as papi_exec:
920 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
922 papi_tap_dump = papi_resp.reply[0]['api_reply']
924 def process_tap_dump(tap_dump):
927 :param tap_dump: Tap interface dump.
929 :returns: Processed tap interface dump.
932 tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
933 tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
934 tap_dump['host_namespace'] = \
935 tap_dump['host_namespace'].rstrip('\x00')
936 tap_dump['host_mac_addr'] = \
937 L2Util.bin_to_mac(tap_dump['host_mac_addr'])
938 tap_dump['host_ip4_addr'] = \
939 inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
940 tap_dump['host_ip6_addr'] = \
941 inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
944 data = list() if name is None else dict()
945 for item in papi_tap_dump:
947 data.append(process_tap_dump(item[cmd_reply]))
948 elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
949 data = process_tap_dump(item[cmd_reply])
952 logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
956 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
957 inner_vlan_id=None, type_subif=None):
958 """Create sub-interface on node. It is possible to set required
959 sub-interface type and VLAN tag(s).
961 :param node: Node to add sub-interface.
962 :param interface: Interface name on which create sub-interface.
963 :param sub_id: ID of the sub-interface to be created.
964 :param outer_vlan_id: Optional outer VLAN ID.
965 :param inner_vlan_id: Optional inner VLAN ID.
966 :param type_subif: Optional type of sub-interface. Values supported by
967 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
970 :type interface: str or int
972 :type outer_vlan_id: int
973 :type inner_vlan_id: int
974 :type type_subif: str
975 :returns: Name and index of created sub-interface.
977 :raises RuntimeError: If it is not possible to create sub-interface.
979 subif_types = type_subif.split()
983 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
985 no_tags=1 if 'no_tags' in subif_types else 0,
986 one_tag=1 if 'one_tag' in subif_types else 0,
987 two_tags=1 if 'two_tags' in subif_types else 0,
988 dot1ad=1 if 'dot1ad' in subif_types else 0,
989 exact_match=1 if 'exact_match' in subif_types else 0,
990 default_sub=1 if 'default_sub' in subif_types else 0,
991 outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
992 inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
993 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
994 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
995 err_msg = 'Failed to create sub-interface on host {host}'.format(
997 with PapiExecutor(node) as papi_exec:
998 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
999 verify_reply(err_msg=err_msg)
1001 sw_subif_idx = papi_resp['sw_if_index']
1002 if_key = Topology.add_new_port(node, 'subinterface')
1003 Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
1004 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
1005 Topology.update_interface_name(node, if_key, ifc_name)
1007 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
1010 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1011 """Create GRE tunnel interface on node.
1013 :param node: VPP node to add tunnel interface.
1014 :param source_ip: Source of the GRE tunnel.
1015 :param destination_ip: Destination of the GRE tunnel.
1017 :type source_ip: str
1018 :type destination_ip: str
1019 :returns: Name and index of created GRE tunnel interface.
1021 :raises RuntimeError: If unable to create GRE tunnel interface.
1023 cmd = 'gre_tunnel_add_del'
1024 tunnel = dict(type=0,
1025 instance=Constants.BITWISE_NON_ZERO,
1027 dst=str(destination_ip),
1030 args = dict(is_add=1,
1032 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1034 with PapiExecutor(node) as papi_exec:
1035 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1036 verify_reply(err_msg=err_msg)
1038 sw_if_idx = papi_resp['sw_if_index']
1039 if_key = Topology.add_new_port(node, 'gre_tunnel')
1040 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1041 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1042 Topology.update_interface_name(node, if_key, ifc_name)
1044 return ifc_name, sw_if_idx
1047 def vpp_create_loopback(node):
1048 """Create loopback interface on VPP node.
1050 :param node: Node to create loopback interface on.
1052 :returns: SW interface index.
1054 :raises RuntimeError: If it is not possible to create loopback on the
1057 cmd = 'create_loopback'
1058 args = dict(mac_address=0)
1059 err_msg = 'Failed to create loopback interface on host {host}'.format(
1061 with PapiExecutor(node) as papi_exec:
1062 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1063 verify_reply(err_msg=err_msg)
1065 sw_if_idx = papi_resp['sw_if_index']
1066 if_key = Topology.add_new_port(node, 'loopback')
1067 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1068 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1069 Topology.update_interface_name(node, if_key, ifc_name)
1074 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1075 """Create bond interface on VPP node.
1077 :param node: DUT node from topology.
1078 :param mode: Link bonding mode.
1079 :param load_balance: Load balance (optional, valid for xor and lacp
1080 modes, otherwise ignored).
1081 :param mac: MAC address to assign to the bond interface (optional).
1084 :type load_balance: str
1086 :returns: Interface key (name) in topology.
1088 :raises RuntimeError: If it is not possible to create bond interface on
1092 args = dict(id=int(Constants.BITWISE_NON_ZERO),
1093 use_custom_mac=0 if mac is None else 1,
1094 mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
1095 mode=getattr(LinkBondMode, '{md}'.format(
1096 md=mode.replace('-', '_').upper())).value,
1097 lb=0 if load_balance is None else getattr(
1098 LinkBondLoadBalance, '{lb}'.format(
1099 lb=load_balance.upper())).value)
1100 err_msg = 'Failed to create bond interface on host {host}'.format(
1102 with PapiExecutor(node) as papi_exec:
1103 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1104 verify_reply(err_msg=err_msg)
1106 sw_if_idx = papi_resp['sw_if_index']
1107 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1109 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1114 def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
1115 """Add ethernet interface to current topology.
1117 :param node: DUT node from topology.
1118 :param ifc_name: Name of the interface.
1119 :param sw_if_idx: SW interface index.
1120 :param ifc_pfx: Interface key prefix.
1123 :type sw_if_idx: int
1126 if_key = Topology.add_new_port(node, ifc_pfx)
1128 if ifc_name and sw_if_idx is None:
1129 sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
1130 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1131 if sw_if_idx and ifc_name is None:
1132 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1133 Topology.update_interface_name(node, if_key, ifc_name)
1134 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
1135 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1138 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1139 """Create AVF interface on VPP node.
1141 :param node: DUT node from topology.
1142 :param vf_pci_addr: Virtual Function PCI address.
1143 :param num_rx_queues: Number of RX queues.
1145 :type vf_pci_addr: str
1146 :type num_rx_queues: int
1147 :returns: Interface key (name) in topology.
1149 :raises RuntimeError: If it is not possible to create AVF interface on
1153 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1155 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1158 err_msg = 'Failed to create AVF interface on host {host}'.format(
1160 with PapiExecutor(node) as papi_exec:
1161 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1162 verify_reply(err_msg=err_msg)
1164 sw_if_idx = papi_resp['sw_if_index']
1165 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1167 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1172 def vpp_enslave_physical_interface(node, interface, bond_if):
1173 """Enslave physical interface to bond interface on VPP node.
1175 :param node: DUT node from topology.
1176 :param interface: Physical interface key from topology file.
1177 :param bond_if: Load balance
1179 :type interface: str
1181 :raises RuntimeError: If it is not possible to enslave physical
1182 interface to bond interface on the node.
1184 cmd = 'bond_enslave'
1186 sw_if_index=Topology.get_interface_sw_index(node, interface),
1187 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1190 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1191 'interface {bond} on host {host}'.format(ifc=interface,
1194 with PapiExecutor(node) as papi_exec:
1195 papi_exec.add(cmd, **args).get_replies(err_msg).\
1196 verify_reply(err_msg=err_msg)
1199 def vpp_show_bond_data_on_node(node, details=False):
1200 """Show (detailed) bond information on VPP node.
1202 :param node: DUT node from topology.
1203 :param details: If detailed information is required or not.
1207 cmd = 'sw_interface_bond_dump'
1208 cmd_reply = 'sw_interface_bond_details'
1209 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1212 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1213 with PapiExecutor(node) as papi_exec:
1214 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
1216 papi_dump = papi_resp.reply[0]['api_reply']
1217 for item in papi_dump:
1218 data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
1220 data += (' mode: {m}\n'.
1221 format(m=LinkBondMode(item[cmd_reply]['mode']).name.
1223 data += (' load balance: {lb}\n'.
1224 format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
1226 data += (' number of active slaves: {n}\n'.
1227 format(n=item[cmd_reply]['active_slaves']))
1229 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1230 node, Topology.get_interface_by_sw_index(
1231 node, item[cmd_reply]['sw_if_index']))
1232 for slave in slave_data:
1233 if not slave['is_passive']:
1234 data += (' {s}\n'.format(s=slave['interface_name']))
1235 data += (' number of slaves: {n}\n'.
1236 format(n=item[cmd_reply]['slaves']))
1238 for slave in slave_data:
1239 data += (' {s}\n'.format(s=slave['interface_name']))
1240 data += (' interface id: {i}\n'.
1241 format(i=item[cmd_reply]['id']))
1242 data += (' sw_if_index: {i}\n'.
1243 format(i=item[cmd_reply]['sw_if_index']))
1247 def vpp_bond_slave_dump(node, interface):
1248 """Get bond interface slave(s) data on VPP node.
1250 :param node: DUT node from topology.
1251 :param interface: Physical interface key from topology file.
1253 :type interface: str
1254 :returns: Bond slave interface data.
1257 cmd = 'sw_interface_slave_dump'
1258 cmd_reply = 'sw_interface_slave_details'
1259 args = dict(sw_if_index=Topology.get_interface_sw_index(
1261 err_msg = 'Failed to get slave dump on host {host}'.format(
1264 with PapiExecutor(node) as papi_exec:
1265 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1267 papi_dump = papi_resp.reply[0]['api_reply']
1269 def process_slave_dump(slave_dump):
1270 """Process slave dump.
1272 :param slave_dump: Slave interface dump.
1273 :type slave_dump: dict
1274 :returns: Processed slave interface dump.
1277 slave_dump['interface_name'] = slave_dump['interface_name'].\
1282 for item in papi_dump:
1283 data.append(process_slave_dump(item[cmd_reply]))
1285 logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
1289 def vpp_show_bond_data_on_all_nodes(nodes, details=False):
1290 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1292 :param nodes: Nodes in the topology.
1293 :param details: If detailed information is required or not.
1297 for node_data in nodes.values():
1298 if node_data['type'] == NodeType.DUT:
1299 InterfaceUtil.vpp_show_bond_data_on_node(node_data, details)
1302 def vpp_enable_input_acl_interface(node, interface, ip_version,
1304 """Enable input acl on interface.
1306 :param node: VPP node to setup interface for input acl.
1307 :param interface: Interface to setup input acl.
1308 :param ip_version: Version of IP protocol.
1309 :param table_index: Classify table index.
1311 :type interface: str or int
1312 :type ip_version: str
1313 :type table_index: int
1315 cmd = 'input_acl_set_interface'
1317 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1318 ip4_table_index=table_index if ip_version == 'ip4'
1319 else Constants.BITWISE_NON_ZERO,
1320 ip6_table_index=table_index if ip_version == 'ip6'
1321 else Constants.BITWISE_NON_ZERO,
1322 l2_table_index=table_index if ip_version == 'l2'
1323 else Constants.BITWISE_NON_ZERO,
1325 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1327 with PapiExecutor(node) as papi_exec:
1328 papi_exec.add(cmd, **args).get_replies(err_msg).\
1329 verify_reply(err_msg=err_msg)
1332 def get_interface_classify_table(node, interface):
1333 """Get name of classify table for the given interface.
1335 TODO: Move to Classify.py.
1337 :param node: VPP node to get data from.
1338 :param interface: Name or sw_if_index of a specific interface.
1340 :type interface: str or int
1341 :returns: Classify table name.
1344 if isinstance(interface, basestring):
1345 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1347 sw_if_index = interface
1349 cmd = 'classify_table_by_interface'
1350 args = dict(sw_if_index=sw_if_index)
1351 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1353 with PapiExecutor(node) as papi_exec:
1354 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
1355 verify_reply(err_msg=err_msg)
1360 def get_sw_if_index(node, interface_name):
1361 """Get sw_if_index for the given interface from actual interface dump.
1363 :param node: VPP node to get interface data from.
1364 :param interface_name: Name of the specific interface.
1366 :type interface_name: str
1367 :returns: sw_if_index of the given interface.
1370 interface_data = InterfaceUtil.vpp_get_interface_data(
1371 node, interface=interface_name)
1372 return interface_data.get('sw_if_index')
1375 def vxlan_gpe_dump(node, interface_name=None):
1376 """Get VxLAN GPE data for the given interface.
1378 :param node: VPP node to get interface data from.
1379 :param interface_name: Name of the specific interface. If None,
1380 information about all VxLAN GPE interfaces is returned.
1382 :type interface_name: str
1383 :returns: Dictionary containing data for the given VxLAN GPE interface
1384 or if interface=None, the list of dictionaries with all VxLAN GPE
1386 :rtype: dict or list
1388 if interface_name is not None:
1389 sw_if_index = InterfaceUtil.get_interface_index(
1390 node, interface_name)
1392 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1394 cmd = 'vxlan_gpe_tunnel_dump'
1395 cmd_reply = 'vxlan_gpe_tunnel_details'
1396 args = dict(sw_if_index=sw_if_index)
1397 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1399 with PapiExecutor(node) as papi_exec:
1400 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1402 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
1404 def process_vxlan_gpe_dump(vxlan_dump):
1405 """Process vxlan_gpe dump.
1407 :param vxlan_dump: Vxlan_gpe nterface dump.
1408 :type vxlan_dump: dict
1409 :returns: Processed vxlan_gpe interface dump.
1412 if vxlan_dump['is_ipv6']:
1413 vxlan_dump['local'] = \
1414 inet_ntop(AF_INET6, vxlan_dump['local'])
1415 vxlan_dump['remote'] = \
1416 inet_ntop(AF_INET6, vxlan_dump['remote'])
1418 vxlan_dump['local'] = \
1419 inet_ntop(AF_INET, vxlan_dump['local'][0:4])
1420 vxlan_dump['remote'] = \
1421 inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
1424 data = list() if interface_name is None else dict()
1425 for item in papi_vxlan_dump:
1426 if interface_name is None:
1427 data.append(process_vxlan_gpe_dump(item[cmd_reply]))
1428 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
1429 data = process_vxlan_gpe_dump(item[cmd_reply])
1432 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1433 vxlan_gpe_data=data))
1437 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1438 """Assign VPP interface to specific VRF/FIB table.
1440 :param node: VPP node where the FIB and interface are located.
1441 :param interface: Interface to be assigned to FIB.
1442 :param table_id: VRF table ID.
1443 :param ipv6: Assign to IPv6 table. Default False.
1445 :type interface: str or int
1449 cmd = 'sw_interface_set_table'
1451 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1452 is_ipv6=1 if ipv6 else 0,
1453 vrf_id=int(table_id))
1454 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1456 with PapiExecutor(node) as papi_exec:
1457 papi_exec.add(cmd, **args).get_replies(err_msg). \
1458 verify_reply(err_msg=err_msg)
1461 def set_linux_interface_mac(node, interface, mac, namespace=None,
1463 """Set MAC address for interface in linux.
1465 :param node: Node where to execute command.
1466 :param interface: Interface in namespace.
1467 :param mac: MAC to be assigned to interface.
1468 :param namespace: Execute command in namespace. Optional
1469 :param vf_id: Virtual Function id. Optional
1471 :type interface: str
1473 :type namespace: str
1476 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1477 if vf_id is not None else 'address {mac}'.format(mac=mac)
1478 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1480 cmd = ('{ns} ip link set {interface} {mac}'.
1481 format(ns=ns_str, interface=interface, mac=mac_str))
1482 exec_cmd_no_error(node, cmd, sudo=True)
1485 def set_linux_interface_trust_on(node, interface, namespace=None,
1487 """Set trust on (promisc) for interface in linux.
1489 :param node: Node where to execute command.
1490 :param interface: Interface in namespace.
1491 :param namespace: Execute command in namespace. Optional
1492 :param vf_id: Virtual Function id. Optional
1494 :type interface: str
1495 :type namespace: str
1498 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1499 if vf_id is not None else 'trust on'
1500 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1502 cmd = ('{ns} ip link set dev {interface} {trust}'.
1503 format(ns=ns_str, interface=interface, trust=trust_str))
1504 exec_cmd_no_error(node, cmd, sudo=True)
1507 def set_linux_interface_spoof_off(node, interface, namespace=None,
1509 """Set spoof off for interface in linux.
1511 :param node: Node where to execute command.
1512 :param interface: Interface in namespace.
1513 :param namespace: Execute command in namespace. Optional
1514 :param vf_id: Virtual Function id. Optional
1516 :type interface: str
1517 :type namespace: str
1520 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1521 if vf_id is not None else 'spoof off'
1522 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1524 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1525 format(ns=ns_str, interface=interface, spoof=spoof_str))
1526 exec_cmd_no_error(node, cmd, sudo=True)
1529 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1530 """Init PCI device by creating VFs and bind them to vfio-pci for AVF
1531 driver testing on DUT.
1533 :param node: DUT node.
1534 :param ifc_key: Interface key from topology file.
1535 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
1536 :param osi_layer: OSI Layer type to initialize TG with.
1537 Default value "L2" sets linux interface spoof off.
1541 :type osi_layer: str
1542 :returns: Virtual Function topology interface keys.
1544 :raises RuntimeError: If a reason preventing initialization is found.
1546 # Read PCI address and driver.
1547 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1548 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1549 uio_driver = Topology.get_uio_driver(node)
1550 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1551 if kernel_driver != "i40e":
1553 "AVF needs i40e driver, not {driver} at node {host} ifc {ifc}"\
1554 .format(driver=kernel_driver, host=node["host"], ifc=ifc_key))
1555 current_driver = DUTSetup.get_pci_dev_driver(
1556 node, pf_pci_addr.replace(':', r'\:'))
1558 VPPUtil.stop_vpp_service(node)
1559 if current_driver != kernel_driver:
1560 # PCI device must be re-bound to kernel driver before creating VFs.
1561 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1562 # Stop VPP to prevent deadlock.
1563 # Unbind from current driver.
1564 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1565 # Bind to kernel driver.
1566 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1568 # Initialize PCI VFs
1569 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1572 # Set MAC address and bind each virtual function to uio driver.
1573 for vf_id in range(numvfs):
1574 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1575 pf_mac_addr[3], pf_mac_addr[4],
1576 pf_mac_addr[5], "{:02x}".format(vf_id)])
1578 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1579 format(pci=pf_pci_addr)
1580 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1582 if osi_layer == 'L2':
1583 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1585 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1588 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1589 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1591 # Add newly created ports into topology file
1592 vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
1593 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1594 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1595 Topology.update_interface_name(node, vf_ifc_key,
1596 vf_ifc_name+str(vf_id+1))
1597 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1598 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1599 vf_ifc_keys.append(vf_ifc_key)
1604 def vpp_create_multiple_vxlan_ipv4_tunnels(
1605 node, node_vxlan_if, node_vlan_if, op_node, op_node_if,
1606 n_tunnels, vni_start, src_ip_start, dst_ip_start, ip_step, ip_limit,
1608 """Create multiple VXLAN tunnel interfaces and VLAN sub-interfaces on
1611 Put each pair of VXLAN tunnel interface and VLAN sub-interface to
1612 separate bridge-domain.
1614 :param node: VPP node to create VXLAN tunnel interfaces.
1615 :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
1617 :param node_vlan_if: VPP node interface key to create VLAN
1619 :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
1620 :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
1622 :param n_tunnels: Number of tunnel interfaces to create.
1623 :param vni_start: VNI start ID.
1624 :param src_ip_start: VXLAN tunnel source IP address start.
1625 :param dst_ip_start: VXLAN tunnel destination IP address start.
1626 :param ip_step: IP address incremental step.
1627 :param ip_limit: IP address limit.
1628 :param bd_id_start: Bridge-domain ID start.
1630 :type node_vxlan_if: str
1631 :type node_vlan_if: str
1633 :type op_node_if: str
1634 :type n_tunnels: int
1635 :type vni_start: int
1636 :type src_ip_start: str
1637 :type dst_ip_start: str
1640 :type bd_id_start: int
1642 # configure IPs, create VXLAN interfaces and VLAN sub-interfaces
1643 vxlan_count = InterfaceUtil.vpp_create_vxlan_and_vlan_interfaces(
1644 node, node_vxlan_if, node_vlan_if, n_tunnels, vni_start,
1645 src_ip_start, dst_ip_start, ip_step, ip_limit)
1647 # update topology with VXLAN interfaces and VLAN sub-interfaces data
1648 # and put interfaces up
1649 InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_up(
1650 node, vxlan_count, node_vlan_if)
1652 # configure bridge domains, ARPs and routes
1653 InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
1654 node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
1655 ip_step, bd_id_start)
1658 def vpp_create_vxlan_and_vlan_interfaces(
1659 node, node_vxlan_if, node_vlan_if, vxlan_count, vni_start,
1660 src_ip_start, dst_ip_start, ip_step, ip_limit):
1662 Configure IPs, create VXLAN interfaces and VLAN sub-interfaces on VPP
1665 :param node: VPP node.
1666 :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
1668 :param node_vlan_if: VPP node interface key to create VLAN
1670 :param vxlan_count: Number of tunnel interfaces to create.
1671 :param vni_start: VNI start ID.
1672 :param src_ip_start: VXLAN tunnel source IP address start.
1673 :param dst_ip_start: VXLAN tunnel destination IP address start.
1674 :param ip_step: IP address incremental step.
1675 :param ip_limit: IP address limit.
1677 :type node_vxlan_if: str
1678 :type node_vlan_if: str
1679 :type vxlan_count: int
1680 :type vni_start: int
1681 :type src_ip_start: str
1682 :type dst_ip_start: str
1685 :returns: Number of created VXLAN interfaces.
1689 src_address_start = IPv6Address(unicode(src_ip_start))
1690 dst_address_start = IPv6Address(unicode(dst_ip_start))
1691 ip_address_limit = IPv6Address(unicode(ip_limit))
1694 except (AddressValueError, NetmaskValueError):
1695 src_address_start = IPv4Address(unicode(src_ip_start))
1696 dst_address_start = IPv4Address(unicode(dst_ip_start))
1697 ip_address_limit = IPv4Address(unicode(ip_limit))
1701 with PapiExecutor(node) as papi_exec:
1702 for i in xrange(0, vxlan_count):
1703 src_ip = src_address_start + i * ip_step
1704 dst_ip = dst_address_start + i * ip_step
1705 if src_ip > ip_address_limit or dst_ip > ip_address_limit:
1706 logger.warn("Can't do more iterations - IP address limit "
1707 "has been reached.")
1710 cmd = 'sw_interface_add_del_address'
1712 sw_if_index=InterfaceUtil.get_interface_index(
1713 node, node_vxlan_if),
1717 address_length=128 if is_ipv6 else 32,
1718 address=inet_pton(af_inet, str(src_ip)))
1719 papi_exec.add(cmd, **args)
1720 cmd = 'vxlan_add_del_tunnel'
1724 instance=Constants.BITWISE_NON_ZERO,
1725 src_address=inet_pton(af_inet, str(src_ip)),
1726 dst_address=inet_pton(af_inet, str(dst_ip)),
1727 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1729 decap_next_index=Constants.BITWISE_NON_ZERO,
1730 vni=int(vni_start)+i)
1731 papi_exec.add(cmd, **args)
1732 cmd = 'create_vlan_subif'
1734 sw_if_index=InterfaceUtil.get_interface_index(
1735 node, node_vlan_if),
1737 papi_exec.add(cmd, **args)
1738 papi_exec.get_replies().verify_replies()
1743 def vpp_put_vxlan_and_vlan_interfaces_up(node, vxlan_count, node_vlan_if):
1745 Update topology with VXLAN interfaces and VLAN sub-interfaces data
1746 and put interfaces up.
1748 :param node: VPP node.
1749 :param vxlan_count: Number of tunnel interfaces.
1750 :param node_vlan_if: VPP node interface key where VLAN sub-interfaces
1753 :type vxlan_count: int
1754 :type node_vlan_if: str
1756 if_data = InterfaceUtil.vpp_get_interface_data(node)
1758 with PapiExecutor(node) as papi_exec:
1759 for i in xrange(0, vxlan_count):
1760 vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel')
1761 vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i)
1763 vxlan_subif_idx = None
1764 vlan_subif_key = Topology.add_new_port(node, 'vlan_subif')
1765 vlan_subif_name = '{if_name}.{vlan}'.format(
1766 if_name=Topology.get_interface_name(
1767 node, node_vlan_if), vlan=i+1)
1770 for data in if_data:
1771 if not vxlan_found \
1772 and data['interface_name'] == vxlan_subif_name:
1773 vxlan_subif_idx = data['sw_if_index']
1775 elif not vlan_found \
1776 and data['interface_name'] == vlan_subif_name:
1777 vlan_idx = data['sw_if_index']
1779 if vxlan_found and vlan_found:
1781 Topology.update_interface_sw_if_index(
1782 node, vxlan_subif_key, vxlan_subif_idx)
1783 Topology.update_interface_name(
1784 node, vxlan_subif_key, vxlan_subif_name)
1785 cmd = 'sw_interface_set_flags'
1786 args1 = dict(sw_if_index=vxlan_subif_idx,
1788 Topology.update_interface_sw_if_index(
1789 node, vlan_subif_key, vlan_idx)
1790 Topology.update_interface_name(
1791 node, vlan_subif_key, vlan_subif_name)
1792 args2 = dict(sw_if_index=vlan_idx,
1794 papi_exec.add(cmd, **args1).add(cmd, **args2)
1795 papi_exec.get_replies().verify_replies()
1798 def vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
1799 node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
1800 ip_step, bd_id_start):
1802 Configure ARPs and routes for VXLAN interfaces and put each pair of
1803 VXLAN tunnel interface and VLAN sub-interface to separate bridge-domain.
1805 :param node: VPP node.
1806 :param node_vxlan_if: VPP node interface key where VXLAN tunnel
1807 interfaces have been created.
1808 :param vxlan_count: Number of tunnel interfaces.
1809 :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
1810 :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
1812 :param dst_ip_start: VXLAN tunnel destination IP address start.
1813 :param ip_step: IP address incremental step.
1814 :param bd_id_start: Bridge-domain ID start.
1816 :type node_vxlan_if: str
1817 :type vxlan_count: int
1820 :type dst_ip_start: str
1822 :type bd_id_start: int
1825 dst_address_start = IPv6Address(unicode(dst_ip_start))
1828 except (AddressValueError, NetmaskValueError):
1829 dst_address_start = IPv4Address(unicode(dst_ip_start))
1833 with PapiExecutor(node) as papi_exec:
1834 for i in xrange(0, vxlan_count):
1835 dst_ip = dst_address_start + i * ip_step
1837 sw_if_index=Topology.get_interface_sw_index(
1838 node, node_vxlan_if),
1841 Topology.get_interface_mac(op_node, op_node_if)),
1842 ip_address=str(dst_ip))
1843 cmd = 'ip_neighbor_add_del'
1847 papi_exec.add(cmd, **args)
1848 cmd = 'ip_add_del_route'
1850 next_hop_sw_if_index=Topology.get_interface_sw_index(
1851 node, node_vxlan_if),
1856 next_hop_proto=1 if is_ipv6 else 0,
1857 dst_address_length=128 if is_ipv6 else 32,
1858 dst_address=inet_pton(af_inet, str(dst_ip)),
1859 next_hop_address=inet_pton(af_inet, str(dst_ip)))
1860 papi_exec.add(cmd, **args)
1861 cmd = 'sw_interface_set_l2_bridge'
1863 rx_sw_if_index=Topology.get_interface_sw_index(
1864 node, 'vxlan_tunnel{nr}'.format(nr=i+1)),
1865 bd_id=int(bd_id_start+i),
1869 papi_exec.add(cmd, **args)
1871 rx_sw_if_index=Topology.get_interface_sw_index(
1872 node, 'vlan_subif{nr}'.format(nr=i+1)),
1873 bd_id=int(bd_id_start+i),
1877 papi_exec.add(cmd, **args)
1878 papi_exec.get_replies().verify_replies()
1881 def vpp_sw_interface_rx_placement_dump(node):
1882 """Dump VPP interface RX placement on node.
1884 :param node: Node to run command on.
1886 :returns: Thread mapping information as a list of dictionaries.
1889 cmd = 'sw_interface_rx_placement_dump'
1890 cmd_reply = 'sw_interface_rx_placement_details'
1891 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
1892 cmd=cmd, host=node['host'])
1893 with PapiExecutor(node) as papi_exec:
1894 for ifc in node['interfaces'].values():
1895 if ifc['vpp_sw_index'] is not None:
1896 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
1897 papi_resp = papi_exec.get_dump(err_msg)
1898 thr_mapping = [s[cmd_reply] for r in papi_resp.reply
1899 for s in r['api_reply']]
1900 return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
1903 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
1905 """Set interface RX placement to worker on node.
1907 :param node: Node to run command on.
1908 :param sw_if_index: VPP SW interface index.
1909 :param queue_id: VPP interface queue ID.
1910 :param worker_id: VPP worker ID (indexing from 0).
1912 :type sw_if_index: int
1914 :type worker_id: int
1915 :raises RuntimeError: If failed to run command on host or if no API
1918 cmd = 'sw_interface_set_rx_placement'
1919 err_msg = "Failed to set interface RX placement to worker on host " \
1920 "{host}!".format(host=node['host'])
1921 args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
1922 worker_id=worker_id)
1923 with PapiExecutor(node) as papi_exec:
1924 papi_exec.add(cmd, **args).get_replies(err_msg).\
1925 verify_reply(err_msg=err_msg)
1928 def vpp_round_robin_rx_placement(node, prefix):
1929 """Set Round Robin interface RX placement on all worker threads
1932 :param node: Topology nodes.
1933 :param prefix: Interface name prefix.
1938 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1939 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1940 for interface in node['interfaces'].values():
1941 if placement['sw_if_index'] == interface['vpp_sw_index'] \
1942 and prefix in interface['name']:
1943 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1944 node, placement['sw_if_index'], placement['queue_id'],
1945 worker_id % worker_cnt)
1949 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1950 """Set Round Robin interface RX placement on all worker threads
1953 :param nodes: Topology nodes.
1954 :param prefix: Interface name prefix.
1958 for node in nodes.values():
1959 if node['type'] == NodeType.DUT:
1960 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)