1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from socket import AF_INET, AF_INET6, inet_ntop, inet_pton
17 from time import sleep
19 from enum import IntEnum
20 from ipaddress import IPv4Address, IPv6Address
21 from ipaddress import AddressValueError, NetmaskValueError
22 from robot.api import logger
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.DUTSetup import DUTSetup
27 from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
28 from resources.libraries.python.L2Util import L2Util
29 from resources.libraries.python.PapiExecutor import PapiExecutor
30 from resources.libraries.python.parsers.JsonParser import JsonParser
31 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
32 from resources.libraries.python.topology import NodeType, Topology
33 from resources.libraries.python.VPPUtil import VPPUtil
36 class LinkBondLoadBalance(IntEnum):
37 """Link bonding load balance."""
43 class LinkBondMode(IntEnum):
44 """Link bonding load balance."""
52 class InterfaceUtil(object):
53 """General utilities for managing interfaces"""
55 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
58 def pci_to_int(pci_str):
59 """Convert PCI address from string format (0000:18:0a.0) to
60 integer representation (169345024).
62 :param pci_str: PCI address in string representation.
64 :returns: Integer representation of PCI address.
68 pci = list(pci_str.split(':')[0:2])
69 pci.extend(pci_str.split(':')[2].split('.'))
71 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
72 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
75 def get_interface_index(node, interface):
76 """Get interface sw_if_index from topology file.
78 :param node: Node where the interface is.
79 :param interface: Numeric index or name string of a specific interface.
81 :type interface: str or int
82 :returns: SW interface index.
87 sw_if_index = int(interface)
89 sw_if_index = Topology.get_interface_sw_index(node, interface)
90 if sw_if_index is None:
92 Topology.get_interface_sw_index_by_name(node, interface)
93 except TypeError as err:
94 raise TypeError('Wrong interface format {ifc}: {err}'.format(
95 ifc=interface, err=err.message))
100 def set_interface_state(node, interface, state, if_type='key'):
101 """Set interface state on a node.
103 Function can be used for DUTs as well as for TGs.
105 :param node: Node where the interface is.
106 :param interface: Interface key or sw_if_index or name.
107 :param state: One of 'up' or 'down'.
108 :param if_type: Interface type
110 :type interface: str or int
114 :raises ValueError: If the interface type is unknown.
115 :raises ValueError: If the state of interface is unexpected.
116 :raises ValueError: If the node has an unknown node type.
120 if isinstance(interface, basestring):
121 sw_if_index = Topology.get_interface_sw_index(node, interface)
122 iface_name = Topology.get_interface_name(node, interface)
124 sw_if_index = interface
125 elif if_type == 'name':
126 iface_key = Topology.get_interface_by_name(node, interface)
127 if iface_key is not None:
128 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
129 iface_name = interface
131 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
133 if node['type'] == NodeType.DUT:
136 elif state == 'down':
139 raise ValueError('Unexpected interface state: {state}'.format(
141 cmd = 'sw_interface_set_flags'
142 err_msg = 'Failed to set interface state on host {host}'.format(
144 args = dict(sw_if_index=sw_if_index,
145 admin_up_down=admin_up_down)
146 with PapiExecutor(node) as papi_exec:
147 papi_exec.add(cmd, **args).get_replies(err_msg).\
148 verify_reply(err_msg=err_msg)
149 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
150 cmd = 'ip link set {ifc} {state}'.format(
151 ifc=iface_name, state=state)
152 exec_cmd_no_error(node, cmd, sudo=True)
154 raise ValueError('Node {} has unknown NodeType: "{}"'
155 .format(node['host'], node['type']))
158 def set_interface_ethernet_mtu(node, iface_key, mtu):
159 """Set Ethernet MTU for specified interface.
161 Function can be used only for TGs.
163 :param node: Node where the interface is.
164 :param iface_key: Interface key from topology file.
165 :param mtu: MTU to set.
170 :raises ValueError: If the node type is "DUT".
171 :raises ValueError: If the node has an unknown node type.
174 if node['type'] == NodeType.DUT:
175 raise ValueError('Node {}: Setting Ethernet MTU for interface '
176 'on DUT nodes not supported', node['host'])
177 elif node['type'] == NodeType.TG:
178 iface_name = Topology.get_interface_name(node, iface_key)
179 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
180 exec_cmd_no_error(node, cmd, sudo=True)
182 raise ValueError('Node {} has unknown NodeType: "{}"'
183 .format(node['host'], node['type']))
186 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
187 """Set default Ethernet MTU on all interfaces on node.
189 Function can be used only for TGs.
191 :param node: Node where to set default MTU.
196 for ifc in node['interfaces']:
197 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
200 def vpp_set_interface_mtu(node, interface, mtu=9200):
201 """Set Ethernet MTU on interface.
203 :param node: VPP node.
204 :param interface: Interface to setup MTU. Default: 9200.
205 :param mtu: Ethernet MTU size in Bytes.
207 :type interface: str or int
211 if isinstance(interface, basestring):
212 sw_if_index = Topology.get_interface_sw_index(node, interface)
214 sw_if_index = interface
216 cmd = 'hw_interface_set_mtu'
217 err_msg = 'Failed to set interface MTU on host {host}'.format(
219 args = dict(sw_if_index=sw_if_index,
221 with PapiExecutor(node) as papi_exec:
222 papi_exec.add(cmd, **args).get_replies(err_msg).\
223 verify_reply(err_msg=err_msg)
226 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
227 """Set Ethernet MTU on all interfaces.
229 :param node: VPP node.
230 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
235 for interface in node['interfaces']:
236 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
239 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
240 """Set Ethernet MTU on all interfaces on all DUTs.
242 :param nodes: VPP nodes.
243 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
248 for node in nodes.values():
249 if node['type'] == NodeType.DUT:
250 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
253 def vpp_node_interfaces_ready_wait(node, retries=15):
254 """Wait until all interfaces with admin-up are in link-up state.
256 :param node: Node to wait on.
257 :param retries: Number of retries to check interface status (optional,
262 :raises RuntimeError: If any interface is not in link-up state after
263 defined number of retries.
266 for _ in xrange(0, retries):
268 out = InterfaceUtil.vpp_get_interface_data(node)
269 for interface in out:
270 if interface.get('admin_up_down') == 1:
271 if interface.get('link_up_down') != 1:
272 not_ready.append(interface.get('interface_name'))
276 logger.debug('Interfaces still in link-down state:\n{ifs} '
277 '\nWaiting...'.format(ifs=not_ready))
280 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
281 if 'not_ready' in locals() else 'No check executed!'
282 raise RuntimeError(err)
285 def all_vpp_interfaces_ready_wait(nodes, retries=15):
286 """Wait until all interfaces with admin-up are in link-up state for all
287 nodes in the topology.
289 :param nodes: Nodes in the topology.
290 :param retries: Number of retries to check interface status (optional,
297 for node in nodes.values():
298 if node['type'] == NodeType.DUT:
299 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
302 def vpp_get_interface_data(node, interface=None):
303 """Get all interface data from a VPP node. If a name or
304 sw_interface_index is provided, return only data for the matching
307 :param node: VPP node to get interface data from.
308 :param interface: Numeric index or name string of a specific interface.
310 :type interface: int or str
311 :returns: List of dictionaries containing data for each interface, or a
312 single dictionary for the specified interface.
314 :raises TypeError: if the data type of interface is neither basestring
318 if interface is not None:
319 if isinstance(interface, basestring):
320 param = 'interface_name'
321 elif isinstance(interface, int):
322 param = 'sw_if_index'
324 raise TypeError('Wrong interface format {ifc}'.format(
329 cmd = 'sw_interface_dump'
330 cmd_reply = 'sw_interface_details'
331 args = dict(name_filter_valid=0,
333 err_msg = 'Failed to get interface dump on host {host}'.format(
335 with PapiExecutor(node) as papi_exec:
336 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
338 papi_if_dump = papi_resp.reply[0]['api_reply']
340 def process_if_dump(if_dump):
341 """Process interface dump.
343 :param if_dump: Interface dump.
345 :returns: Processed interface dump.
348 if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
349 if_dump['tag'] = if_dump['tag'].rstrip('\x00')
350 if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
351 if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
352 if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
355 data = list() if interface is None else dict()
356 for item in papi_if_dump:
357 if interface is None:
358 data.append(process_if_dump(item[cmd_reply]))
359 elif str(item[cmd_reply].get(param)).rstrip('\x00') == \
361 data = process_if_dump(item[cmd_reply])
364 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
368 def vpp_get_interface_name(node, sw_if_index):
369 """Get interface name for the given SW interface index from actual
372 :param node: VPP node to get interface data from.
373 :param sw_if_index: SW interface index of the specific interface.
375 :type sw_if_index: int
376 :returns: Name of the given interface.
380 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
381 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
382 if_data = InterfaceUtil.vpp_get_interface_data(
383 node, if_data['sup_sw_if_index'])
385 return if_data.get('interface_name')
388 def vpp_get_interface_sw_index(node, interface_name):
389 """Get interface name for the given SW interface index from actual
392 :param node: VPP node to get interface data from.
393 :param interface_name: Interface name.
395 :type interface_name: str
396 :returns: Name of the given interface.
400 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
402 return if_data.get('sw_if_index')
405 def vpp_get_interface_mac(node, interface):
406 """Get MAC address for the given interface from actual interface dump.
408 :param node: VPP node to get interface data from.
409 :param interface: Numeric index or name string of a specific interface.
411 :type interface: int or str
412 :returns: MAC address.
416 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
417 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
418 if_data = InterfaceUtil.vpp_get_interface_data(
419 node, if_data['sup_sw_if_index'])
421 return if_data.get('l2_address')
424 def vpp_get_interface_ip_addresses(node, interface, ip_version):
425 """Get list of IP addresses from an interface on a VPP node.
427 TODO: Move to IPUtils
429 :param node: VPP node to get data from.
430 :param interface: Name of an interface on the VPP node.
431 :param ip_version: IP protocol version (ipv4 or ipv6).
434 :type ip_version: str
435 :returns: List of dictionaries, each containing IP address, subnet
436 prefix length and also the subnet mask for ipv4 addresses.
437 Note: A single interface may have multiple IP addresses assigned.
442 sw_if_index = Topology.convert_interface_reference(
443 node, interface, 'sw_if_index')
445 if isinstance(interface, basestring):
446 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
450 is_ipv6 = 1 if ip_version == 'ipv6' else 0
452 cmd = 'ip_address_dump'
453 cmd_reply = 'ip_address_details'
454 args = dict(sw_if_index=sw_if_index,
456 err_msg = 'Failed to get L2FIB dump on host {host}'.format(
458 with PapiExecutor(node) as papi_exec:
459 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
462 for item in papi_resp.reply[0]['api_reply']:
463 item[cmd_reply]['ip'] = inet_ntop(AF_INET6, item[cmd_reply]['ip']) \
464 if is_ipv6 else inet_ntop(AF_INET, item[cmd_reply]['ip'][0:4])
465 data.append(item[cmd_reply])
467 if ip_version == 'ipv4':
469 item['netmask'] = convert_ipv4_netmask_prefix(
470 item['prefix_length'])
474 def tg_set_interface_driver(node, pci_addr, driver):
475 """Set interface driver on the TG node.
477 :param node: Node to set interface driver on (must be TG node).
478 :param pci_addr: PCI address of the interface.
479 :param driver: Driver name.
483 :raises RuntimeError: If unbinding from the current driver fails.
484 :raises RuntimeError: If binding to the new driver fails.
486 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
487 if old_driver == driver:
493 # Unbind from current driver
494 if old_driver is not None:
495 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
496 .format(pci_addr, old_driver)
497 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
498 if int(ret_code) != 0:
499 raise RuntimeError("'{0}' failed on '{1}'"
500 .format(cmd, node['host']))
502 # Bind to the new driver
503 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
504 .format(pci_addr, driver)
505 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
506 if int(ret_code) != 0:
507 raise RuntimeError("'{0}' failed on '{1}'"
508 .format(cmd, node['host']))
511 def tg_get_interface_driver(node, pci_addr):
512 """Get interface driver from the TG node.
514 :param node: Node to get interface driver on (must be TG node).
515 :param pci_addr: PCI address of the interface.
518 :returns: Interface driver or None if not found.
520 :raises RuntimeError: If PCI rescan or lspci command execution failed.
522 return DUTSetup.get_pci_dev_driver(node, pci_addr)
525 def tg_set_interfaces_udev_rules(node):
526 """Set udev rules for interfaces.
528 Create udev rules file in /etc/udev/rules.d where are rules for each
529 interface used by TG node, based on MAC interface has specific name.
530 So after unbind and bind again to kernel driver interface has same
531 name as before. This must be called after TG has set name for each
532 port in topology dictionary.
534 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
537 :param node: Node to set udev rules on (must be TG node).
539 :raises RuntimeError: If setting of udev rules fails.
544 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
545 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
546 if int(ret_code) != 0:
547 raise RuntimeError("'{0}' failed on '{1}'"
548 .format(cmd, node['host']))
550 for interface in node['interfaces'].values():
551 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
552 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
553 interface['name'] + '\\"'
554 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
555 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
556 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
557 if int(ret_code) != 0:
558 raise RuntimeError("'{0}' failed on '{1}'"
559 .format(cmd, node['host']))
561 cmd = '/etc/init.d/udev restart'
562 ssh.exec_command_sudo(cmd)
565 def tg_set_interfaces_default_driver(node):
566 """Set interfaces default driver specified in topology yaml file.
568 :param node: Node to setup interfaces driver on (must be TG node).
571 for interface in node['interfaces'].values():
572 InterfaceUtil.tg_set_interface_driver(node,
573 interface['pci_address'],
577 def update_vpp_interface_data_on_node(node):
578 """Update vpp generated interface data for a given node in DICT__nodes.
580 Updates interface names, software if index numbers and any other details
581 generated specifically by vpp that are unknown before testcase run.
582 It does this by dumping interface list from all devices using python
583 api, and pairing known information from topology (mac address) to state
586 :param node: Node selected from DICT__nodes.
590 interface_list = InterfaceUtil.vpp_get_interface_data(node)
591 interface_dict = dict()
592 for ifc in interface_list:
593 interface_dict[ifc['l2_address']] = ifc
595 for if_name, if_data in node['interfaces'].items():
596 ifc_dict = interface_dict.get(if_data['mac_address'])
597 if ifc_dict is not None:
598 if_data['name'] = ifc_dict['interface_name']
599 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
600 if_data['mtu'] = ifc_dict['mtu'][0]
601 logger.trace('Interface {ifc} found by MAC {mac}'.format(
602 ifc=if_name, mac=if_data['mac_address']))
604 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
605 ifc=if_name, mac=if_data['mac_address']))
606 if_data['vpp_sw_index'] = None
609 def update_nic_interface_names(node):
610 """Update interface names based on nic type and PCI address.
612 This method updates interface names in the same format as VPP does.
614 :param node: Node dictionary.
617 for ifc in node['interfaces'].values():
618 if_pci = ifc['pci_address'].replace('.', ':').split(':')
619 bus = '{:x}'.format(int(if_pci[1], 16))
620 dev = '{:x}'.format(int(if_pci[2], 16))
621 fun = '{:x}'.format(int(if_pci[3], 16))
622 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
623 if ifc['model'] == 'Intel-XL710':
624 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
625 elif ifc['model'] == 'Intel-X710':
626 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
627 elif ifc['model'] == 'Intel-X520-DA2':
628 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
629 elif ifc['model'] == 'Cisco-VIC-1385':
630 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
631 elif ifc['model'] == 'Cisco-VIC-1227':
632 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
634 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
637 def update_nic_interface_names_on_all_duts(nodes):
638 """Update interface names based on nic type and PCI address on all DUTs.
640 This method updates interface names in the same format as VPP does.
642 :param nodes: Topology nodes.
645 for node in nodes.values():
646 if node['type'] == NodeType.DUT:
647 InterfaceUtil.update_nic_interface_names(node)
650 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
651 """Update interface name for TG/linux node in DICT__nodes.
654 # for dev in `ls /sys/class/net/`;
655 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
656 "52:54:00:9f:82:63": "eth0"
657 "52:54:00:77:ae:a9": "eth1"
658 "52:54:00:e1:8a:0f": "eth2"
659 "00:00:00:00:00:00": "lo"
661 :param node: Node selected from DICT__nodes.
662 :param skip_tg_udev: Skip udev rename on TG node.
664 :type skip_tg_udev: bool
665 :raises RuntimeError: If getting of interface name and MAC fails.
667 # First setup interface driver specified in yaml file
668 InterfaceUtil.tg_set_interfaces_default_driver(node)
670 # Get interface names
674 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
675 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
677 (ret_code, stdout, _) = ssh.exec_command(cmd)
678 if int(ret_code) != 0:
679 raise RuntimeError('Get interface name and MAC failed')
680 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
681 interfaces = JsonParser().parse_data(tmp)
682 for interface in node['interfaces'].values():
683 name = interfaces.get(interface['mac_address'])
686 interface['name'] = name
688 # Set udev rules for interfaces
690 InterfaceUtil.tg_set_interfaces_udev_rules(node)
693 def iface_update_numa_node(node):
694 """For all interfaces from topology file update numa node based on
695 information from the node.
697 :param node: Node from topology.
700 :raises ValueError: If numa node ia less than 0.
701 :raises RuntimeError: If update of numa node failes.
704 for if_key in Topology.get_node_interfaces(node):
705 if_pci = Topology.get_interface_pci_addr(node, if_key)
707 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
709 (ret, out, _) = ssh.exec_command(cmd)
714 if CpuUtils.cpu_node_count(node) == 1:
719 logger.trace('Reading numa location failed for: {0}'
722 Topology.set_interface_numa_node(node, if_key,
726 raise RuntimeError('Update numa node failed for: {0}'
730 def update_all_numa_nodes(nodes, skip_tg=False):
731 """For all nodes and all their interfaces from topology file update numa
732 node information based on information from the node.
734 :param nodes: Nodes in the topology.
735 :param skip_tg: Skip TG node
740 for node in nodes.values():
741 if node['type'] == NodeType.DUT:
742 InterfaceUtil.iface_update_numa_node(node)
743 elif node['type'] == NodeType.TG and not skip_tg:
744 InterfaceUtil.iface_update_numa_node(node)
747 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
750 """Update interface names on all nodes in DICT__nodes.
752 This method updates the topology dictionary by querying interface lists
753 of all nodes mentioned in the topology dictionary.
755 :param nodes: Nodes in the topology.
756 :param skip_tg: Skip TG node.
757 :param skip_tg_udev: Skip udev rename on TG node.
758 :param numa_node: Retrieve numa_node location.
761 :type skip_tg_udev: bool
762 :type numa_node: bool
764 for node_data in nodes.values():
765 if node_data['type'] == NodeType.DUT:
766 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
767 elif node_data['type'] == NodeType.TG and not skip_tg:
768 InterfaceUtil.update_tg_interface_data_on_node(
769 node_data, skip_tg_udev)
772 if node_data['type'] == NodeType.DUT:
773 InterfaceUtil.iface_update_numa_node(node_data)
774 elif node_data['type'] == NodeType.TG and not skip_tg:
775 InterfaceUtil.iface_update_numa_node(node_data)
778 def create_vlan_subinterface(node, interface, vlan):
779 """Create VLAN sub-interface on node.
781 :param node: Node to add VLAN subinterface on.
782 :param interface: Interface name on which create VLAN subinterface.
783 :param vlan: VLAN ID of the subinterface to be created.
787 :returns: Name and index of created subinterface.
789 :raises RuntimeError: if it is unable to create VLAN subinterface on the
793 iface_key = Topology.get_interface_by_name(node, interface)
794 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
796 cmd = 'create_vlan_subif'
797 args = dict(sw_if_index=sw_if_index,
799 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
801 with PapiExecutor(node) as papi_exec:
802 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
803 verify_reply(err_msg=err_msg)
805 sw_if_idx = papi_resp['sw_if_index']
806 if_key = Topology.add_new_port(node, 'vlan_subif')
807 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
808 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
809 Topology.update_interface_name(node, if_key, ifc_name)
811 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_idx
814 def create_vxlan_interface(node, vni, source_ip, destination_ip):
815 """Create VXLAN interface and return sw if index of created interface.
817 :param node: Node where to create VXLAN interface.
818 :param vni: VXLAN Network Identifier.
819 :param source_ip: Source IP of a VXLAN Tunnel End Point.
820 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
824 :type destination_ip: str
825 :returns: SW IF INDEX of created interface.
827 :raises RuntimeError: if it is unable to create VxLAN interface on the
832 src_address = IPv6Address(unicode(source_ip))
833 dst_address = IPv6Address(unicode(destination_ip))
836 except (AddressValueError, NetmaskValueError):
837 src_address = IPv4Address(unicode(source_ip))
838 dst_address = IPv4Address(unicode(destination_ip))
842 cmd = 'vxlan_add_del_tunnel'
843 args = dict(is_add=1,
845 instance=Constants.BITWISE_NON_ZERO,
846 src_address=inet_pton(af_inet, str(src_address)),
847 dst_address=inet_pton(af_inet, str(dst_address)),
848 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
850 decap_next_index=Constants.BITWISE_NON_ZERO,
852 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
853 format(host=node['host'])
854 with PapiExecutor(node) as papi_exec:
855 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
856 verify_reply(err_msg=err_msg)
858 sw_if_idx = papi_resp['sw_if_index']
859 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
860 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
861 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
862 Topology.update_interface_name(node, if_key, ifc_name)
867 def vxlan_dump(node, interface=None):
868 """Get VxLAN data for the given interface.
870 :param node: VPP node to get interface data from.
871 :param interface: Numeric index or name string of a specific interface.
872 If None, information about all VxLAN interfaces is returned.
874 :type interface: int or str
875 :returns: Dictionary containing data for the given VxLAN interface or if
876 interface=None, the list of dictionaries with all VxLAN interfaces.
878 :raises TypeError: if the data type of interface is neither basestring
882 if interface is not None:
883 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
885 sw_if_index = int(Constants.BITWISE_NON_ZERO)
887 cmd = 'vxlan_tunnel_dump'
888 cmd_reply = 'vxlan_tunnel_details'
889 args = dict(sw_if_index=sw_if_index)
890 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
892 with PapiExecutor(node) as papi_exec:
893 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
895 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
897 def process_vxlan_dump(vxlan_dump):
898 """Process vxlan dump.
900 :param vxlan_dump: Vxlan interface dump.
901 :type vxlan_dump: dict
902 :returns: Processed vxlan interface dump.
905 if vxlan_dump['is_ipv6']:
906 vxlan_dump['src_address'] = \
907 inet_ntop(AF_INET6, vxlan_dump['src_address'])
908 vxlan_dump['dst_address'] = \
909 inet_ntop(AF_INET6, vxlan_dump['dst_address'])
911 vxlan_dump['src_address'] = \
912 inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
913 vxlan_dump['dst_address'] = \
914 inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
917 data = list() if interface is None else dict()
918 for item in papi_vxlan_dump:
919 if interface is None:
920 data.append(process_vxlan_dump(item[cmd_reply]))
921 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
922 data = process_vxlan_dump(item[cmd_reply])
925 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
929 def vhost_user_dump(node):
930 """Get vhost-user data for the given node.
932 TODO: Move to VhostUser.py
934 :param node: VPP node to get interface data from.
936 :returns: List of dictionaries with all vhost-user interfaces.
940 cmd = 'sw_interface_vhost_user_dump'
941 cmd_reply = 'sw_interface_vhost_user_details'
942 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
944 with PapiExecutor(node) as papi_exec:
945 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
947 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
949 def process_vhost_dump(vhost_dump):
950 """Process vhost dump.
952 :param vhost_dump: Vhost interface dump.
953 :type vhost_dump: dict
954 :returns: Processed vhost interface dump.
957 vhost_dump['interface_name'] = \
958 vhost_dump['interface_name'].rstrip('\x00')
959 vhost_dump['sock_filename'] = \
960 vhost_dump['sock_filename'].rstrip('\x00')
964 for item in papi_vxlan_dump:
965 data.append(process_vhost_dump(item[cmd_reply]))
967 logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
971 def tap_dump(node, name=None):
972 """Get all TAP interface data from the given node, or data about
973 a specific TAP interface.
977 :param node: VPP node to get data from.
978 :param name: Optional name of a specific TAP interface.
981 :returns: Dictionary of information about a specific TAP interface, or
982 a List of dictionaries containing all TAP data for the given node.
986 cmd = 'sw_interface_tap_v2_dump'
987 cmd_reply = 'sw_interface_tap_v2_details'
988 err_msg = 'Failed to get TAP dump on host {host}'.format(
990 with PapiExecutor(node) as papi_exec:
991 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
993 papi_tap_dump = papi_resp.reply[0]['api_reply']
995 def process_tap_dump(tap_dump):
998 :param tap_dump: Tap interface dump.
1000 :returns: Processed tap interface dump.
1003 tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
1004 tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
1005 tap_dump['host_namespace'] = \
1006 tap_dump['host_namespace'].rstrip('\x00')
1007 tap_dump['host_mac_addr'] = \
1008 L2Util.bin_to_mac(tap_dump['host_mac_addr'])
1009 tap_dump['host_ip4_addr'] = \
1010 inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
1011 tap_dump['host_ip6_addr'] = \
1012 inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
1015 data = list() if name is None else dict()
1016 for item in papi_tap_dump:
1018 data.append(process_tap_dump(item[cmd_reply]))
1019 elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
1020 data = process_tap_dump(item[cmd_reply])
1023 logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
1027 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
1028 inner_vlan_id=None, type_subif=None):
1029 """Create sub-interface on node. It is possible to set required
1030 sub-interface type and VLAN tag(s).
1032 :param node: Node to add sub-interface.
1033 :param interface: Interface name on which create sub-interface.
1034 :param sub_id: ID of the sub-interface to be created.
1035 :param outer_vlan_id: Optional outer VLAN ID.
1036 :param inner_vlan_id: Optional inner VLAN ID.
1037 :param type_subif: Optional type of sub-interface. Values supported by
1038 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
1041 :type interface: str or int
1043 :type outer_vlan_id: int
1044 :type inner_vlan_id: int
1045 :type type_subif: str
1046 :returns: Name and index of created sub-interface.
1048 :raises RuntimeError: If it is not possible to create sub-interface.
1051 subif_types = type_subif.split()
1053 cmd = 'create_subif'
1055 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1057 no_tags=1 if 'no_tags' in subif_types else 0,
1058 one_tag=1 if 'one_tag' in subif_types else 0,
1059 two_tags=1 if 'two_tags' in subif_types else 0,
1060 dot1ad=1 if 'dot1ad' in subif_types else 0,
1061 exact_match=1 if 'exact_match' in subif_types else 0,
1062 default_sub=1 if 'default_sub' in subif_types else 0,
1063 outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
1064 inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
1065 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
1066 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
1067 err_msg = 'Failed to create sub-interface on host {host}'.format(
1069 with PapiExecutor(node) as papi_exec:
1070 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1071 verify_reply(err_msg=err_msg)
1073 sw_subif_idx = papi_resp['sw_if_index']
1074 if_key = Topology.add_new_port(node, 'subinterface')
1075 Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
1076 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
1077 Topology.update_interface_name(node, if_key, ifc_name)
1079 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
1082 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1083 """Create GRE tunnel interface on node.
1085 :param node: VPP node to add tunnel interface.
1086 :param source_ip: Source of the GRE tunnel.
1087 :param destination_ip: Destination of the GRE tunnel.
1089 :type source_ip: str
1090 :type destination_ip: str
1091 :returns: Name and index of created GRE tunnel interface.
1093 :raises RuntimeError: If unable to create GRE tunnel interface.
1096 cmd = 'gre_tunnel_add_del'
1097 tunnel = dict(type=0,
1098 instance=Constants.BITWISE_NON_ZERO,
1100 dst=str(destination_ip),
1103 args = dict(is_add=1,
1105 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1107 with PapiExecutor(node) as papi_exec:
1108 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1109 verify_reply(err_msg=err_msg)
1111 sw_if_idx = papi_resp['sw_if_index']
1112 if_key = Topology.add_new_port(node, 'gre_tunnel')
1113 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1114 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1115 Topology.update_interface_name(node, if_key, ifc_name)
1117 return ifc_name, sw_if_idx
1120 def vpp_create_loopback(node):
1121 """Create loopback interface on VPP node.
1123 :param node: Node to create loopback interface on.
1125 :returns: SW interface index.
1127 :raises RuntimeError: If it is not possible to create loopback on the
1131 cmd = 'create_loopback'
1132 args = dict(mac_address=0)
1133 err_msg = 'Failed to create loopback interface on host {host}'.format(
1135 with PapiExecutor(node) as papi_exec:
1136 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1137 verify_reply(err_msg=err_msg)
1139 sw_if_idx = papi_resp['sw_if_index']
1140 if_key = Topology.add_new_port(node, 'loopback')
1141 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1142 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1143 Topology.update_interface_name(node, if_key, ifc_name)
1148 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1149 """Create bond interface on VPP node.
1151 :param node: DUT node from topology.
1152 :param mode: Link bonding mode.
1153 :param load_balance: Load balance (optional, valid for xor and lacp
1154 modes, otherwise ignored).
1155 :param mac: MAC address to assign to the bond interface (optional).
1158 :type load_balance: str
1160 :returns: Interface key (name) in topology.
1162 :raises RuntimeError: If it is not possible to create bond interface on
1167 args = dict(id=int(Constants.BITWISE_NON_ZERO),
1168 use_custom_mac=0 if mac is None else 1,
1169 mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
1170 mode=getattr(LinkBondMode, '{md}'.format(
1171 md=mode.replace('-', '_').upper())).value,
1172 lb=0 if load_balance is None else getattr(
1173 LinkBondLoadBalance, '{lb}'.format(
1174 lb=load_balance.upper())).value)
1175 err_msg = 'Failed to create bond interface on host {host}'.format(
1177 with PapiExecutor(node) as papi_exec:
1178 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1179 verify_reply(err_msg=err_msg)
1181 sw_if_idx = papi_resp['sw_if_index']
1182 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1184 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1189 def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
1190 """Add ethernet interface to current topology.
1192 :param node: DUT node from topology.
1193 :param ifc_name: Name of the interface.
1194 :param sw_if_idx: SW interface index.
1195 :param ifc_pfx: Interface key prefix.
1198 :type sw_if_idx: int
1202 if_key = Topology.add_new_port(node, ifc_pfx)
1204 if ifc_name and sw_if_idx is None:
1205 sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
1206 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1207 if sw_if_idx and ifc_name is None:
1208 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1209 Topology.update_interface_name(node, if_key, ifc_name)
1210 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
1211 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1214 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1215 """Create AVF interface on VPP node.
1217 :param node: DUT node from topology.
1218 :param vf_pci_addr: Virtual Function PCI address.
1219 :param num_rx_queues: Number of RX queues.
1221 :type vf_pci_addr: str
1222 :type num_rx_queues: int
1223 :returns: Interface key (name) in topology.
1225 :raises RuntimeError: If it is not possible to create AVF interface on
1230 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1232 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1235 err_msg = 'Failed to create AVF interface on host {host}'.format(
1237 with PapiExecutor(node) as papi_exec:
1238 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1239 verify_reply(err_msg=err_msg)
1241 sw_if_idx = papi_resp['sw_if_index']
1242 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1244 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1249 def vpp_enslave_physical_interface(node, interface, bond_if):
1250 """Enslave physical interface to bond interface on VPP node.
1252 :param node: DUT node from topology.
1253 :param interface: Physical interface key from topology file.
1254 :param bond_if: Load balance
1256 :type interface: str
1258 :raises RuntimeError: If it is not possible to enslave physical
1259 interface to bond interface on the node.
1262 cmd = 'bond_enslave'
1264 sw_if_index=Topology.get_interface_sw_index(node, interface),
1265 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1268 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1269 'interface {bond} on host {host}'.format(ifc=interface,
1272 with PapiExecutor(node) as papi_exec:
1273 papi_exec.add(cmd, **args).get_replies(err_msg).\
1274 verify_reply(err_msg=err_msg)
1277 def vpp_show_bond_data_on_node(node, details=False):
1278 """Show (detailed) bond information on VPP node.
1280 :param node: DUT node from topology.
1281 :param details: If detailed information is required or not.
1286 cmd = 'sw_interface_bond_dump'
1287 cmd_reply = 'sw_interface_bond_details'
1288 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1291 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1292 with PapiExecutor(node) as papi_exec:
1293 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
1295 papi_dump = papi_resp.reply[0]['api_reply']
1296 for item in papi_dump:
1297 data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
1299 data += (' mode: {m}\n'.
1300 format(m=LinkBondMode(item[cmd_reply]['mode']).name.
1302 data += (' load balance: {lb}\n'.
1303 format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
1305 data += (' number of active slaves: {n}\n'.
1306 format(n=item[cmd_reply]['active_slaves']))
1308 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1309 node, Topology.get_interface_by_sw_index(
1310 node, item[cmd_reply]['sw_if_index']))
1311 for slave in slave_data:
1312 if not slave['is_passive']:
1313 data += (' {s}\n'.format(s=slave['interface_name']))
1314 data += (' number of slaves: {n}\n'.
1315 format(n=item[cmd_reply]['slaves']))
1317 for slave in slave_data:
1318 data += (' {s}\n'.format(s=slave['interface_name']))
1319 data += (' interface id: {i}\n'.
1320 format(i=item[cmd_reply]['id']))
1321 data += (' sw_if_index: {i}\n'.
1322 format(i=item[cmd_reply]['sw_if_index']))
1326 def vpp_bond_slave_dump(node, interface):
1327 """Get bond interface slave(s) data on VPP node.
1329 :param node: DUT node from topology.
1330 :param interface: Physical interface key from topology file.
1332 :type interface: str
1333 :returns: Bond slave interface data.
1336 cmd = 'sw_interface_slave_dump'
1337 cmd_reply = 'sw_interface_slave_details'
1338 args = dict(sw_if_index=Topology.get_interface_sw_index(
1340 err_msg = 'Failed to get slave dump on host {host}'.format(
1343 with PapiExecutor(node) as papi_exec:
1344 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1346 papi_dump = papi_resp.reply[0]['api_reply']
1348 def process_slave_dump(slave_dump):
1349 """Process slave dump.
1351 :param slave_dump: Slave interface dump.
1352 :type slave_dump: dict
1353 :returns: Processed slave interface dump.
1356 slave_dump['interface_name'] = slave_dump['interface_name'].\
1361 for item in papi_dump:
1362 data.append(process_slave_dump(item[cmd_reply]))
1364 logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
1368 def vpp_show_bond_data_on_all_nodes(nodes, details=False):
1369 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1371 :param nodes: Nodes in the topology.
1372 :param details: If detailed information is required or not.
1376 for node_data in nodes.values():
1377 if node_data['type'] == NodeType.DUT:
1378 InterfaceUtil.vpp_show_bond_data_on_node(node_data, details)
1381 def vpp_enable_input_acl_interface(node, interface, ip_version,
1383 """Enable input acl on interface.
1385 :param node: VPP node to setup interface for input acl.
1386 :param interface: Interface to setup input acl.
1387 :param ip_version: Version of IP protocol.
1388 :param table_index: Classify table index.
1390 :type interface: str or int
1391 :type ip_version: str
1392 :type table_index: int
1395 cmd = 'input_acl_set_interface'
1397 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1398 ip4_table_index=table_index if ip_version == 'ip4'
1399 else Constants.BITWISE_NON_ZERO,
1400 ip6_table_index=table_index if ip_version == 'ip6'
1401 else Constants.BITWISE_NON_ZERO,
1402 l2_table_index=table_index if ip_version == 'l2'
1403 else Constants.BITWISE_NON_ZERO,
1405 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1407 with PapiExecutor(node) as papi_exec:
1408 papi_exec.add(cmd, **args).get_replies(err_msg).\
1409 verify_reply(err_msg=err_msg)
1412 def get_interface_classify_table(node, interface):
1413 """Get name of classify table for the given interface.
1415 TODO: Move to Classify.py.
1417 :param node: VPP node to get data from.
1418 :param interface: Name or sw_if_index of a specific interface.
1420 :type interface: str or int
1421 :returns: Classify table name.
1424 if isinstance(interface, basestring):
1425 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1427 sw_if_index = interface
1429 cmd = 'classify_table_by_interface'
1430 args = dict(sw_if_index=sw_if_index)
1431 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1433 with PapiExecutor(node) as papi_exec:
1434 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
1435 verify_reply(err_msg=err_msg)
1440 def get_interface_vrf_table(node, interface, ip_version='ipv4'):
1441 """Get vrf ID for the given interface.
1443 TODO: Move to proper IP library when implementing CSIT-1459.
1445 :param node: VPP node.
1446 :param interface: Name or sw_if_index of a specific interface.
1448 :param ip_version: IP protocol version (ipv4 or ipv6).
1449 :type interface: str or int
1450 :type ip_version: str
1451 :returns: vrf ID of the specified interface.
1454 if isinstance(interface, basestring):
1455 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1457 sw_if_index = interface
1459 is_ipv6 = 1 if ip_version == 'ipv6' else 0
1461 cmd = 'sw_interface_get_table'
1462 args = dict(sw_if_index=sw_if_index,
1464 err_msg = 'Failed to get VRF id assigned to interface {ifc}'.format(
1466 with PapiExecutor(node) as papi_exec:
1467 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
1468 verify_reply(err_msg=err_msg)
1470 return papi_resp['vrf_id']
1473 def get_sw_if_index(node, interface_name):
1474 """Get sw_if_index for the given interface from actual interface dump.
1476 :param node: VPP node to get interface data from.
1477 :param interface_name: Name of the specific interface.
1479 :type interface_name: str
1480 :returns: sw_if_index of the given interface.
1484 interface_data = InterfaceUtil.vpp_get_interface_data(
1485 node, interface=interface_name)
1486 return interface_data.get('sw_if_index')
1489 def vxlan_gpe_dump(node, interface_name=None):
1490 """Get VxLAN GPE data for the given interface.
1492 :param node: VPP node to get interface data from.
1493 :param interface_name: Name of the specific interface. If None,
1494 information about all VxLAN GPE interfaces is returned.
1496 :type interface_name: str
1497 :returns: Dictionary containing data for the given VxLAN GPE interface
1498 or if interface=None, the list of dictionaries with all VxLAN GPE
1500 :rtype: dict or list
1503 if interface_name is not None:
1504 sw_if_index = InterfaceUtil.get_interface_index(
1505 node, interface_name)
1507 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1509 cmd = 'vxlan_gpe_tunnel_dump'
1510 cmd_reply = 'vxlan_gpe_tunnel_details'
1511 args = dict(sw_if_index=sw_if_index)
1512 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1514 with PapiExecutor(node) as papi_exec:
1515 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1517 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
1519 def process_vxlan_gpe_dump(vxlan_dump):
1520 """Process vxlan_gpe dump.
1522 :param vxlan_dump: Vxlan_gpe nterface dump.
1523 :type vxlan_dump: dict
1524 :returns: Processed vxlan_gpe interface dump.
1527 if vxlan_dump['is_ipv6']:
1528 vxlan_dump['local'] = \
1529 inet_ntop(AF_INET6, vxlan_dump['local'])
1530 vxlan_dump['remote'] = \
1531 inet_ntop(AF_INET6, vxlan_dump['remote'])
1533 vxlan_dump['local'] = \
1534 inet_ntop(AF_INET, vxlan_dump['local'][0:4])
1535 vxlan_dump['remote'] = \
1536 inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
1539 data = list() if interface_name is None else dict()
1540 for item in papi_vxlan_dump:
1541 if interface_name is None:
1542 data.append(process_vxlan_gpe_dump(item[cmd_reply]))
1543 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
1544 data = process_vxlan_gpe_dump(item[cmd_reply])
1547 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1548 vxlan_gpe_data=data))
1552 def vpp_ip_source_check_setup(node, if_name):
1553 """Setup Reverse Path Forwarding source check on interface.
1555 TODO: Move to proper IP library when implementing CSIT-1459.
1557 :param node: Node to setup RPF source check.
1558 :param if_name: Interface name to setup RPF source check.
1563 cmd = 'ip_source_check_interface_add_del'
1565 sw_if_index=InterfaceUtil.get_interface_index(node, if_name),
1568 err_msg = 'Failed to enable source check on interface {ifc}'.format(
1570 with PapiExecutor(node) as papi_exec:
1571 papi_exec.add(cmd, **args).get_replies(err_msg). \
1572 verify_reply(err_msg=err_msg)
1575 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1576 """Assign VPP interface to specific VRF/FIB table.
1578 :param node: VPP node where the FIB and interface are located.
1579 :param interface: Interface to be assigned to FIB.
1580 :param table_id: VRF table ID.
1581 :param ipv6: Assign to IPv6 table. Default False.
1583 :type interface: str or int
1588 cmd = 'sw_interface_set_table'
1590 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1591 is_ipv6=1 if ipv6 else 0,
1592 vrf_id=int(table_id))
1593 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1595 with PapiExecutor(node) as papi_exec:
1596 papi_exec.add(cmd, **args).get_replies(err_msg). \
1597 verify_reply(err_msg=err_msg)
1600 def set_linux_interface_mac(node, interface, mac, namespace=None,
1602 """Set MAC address for interface in linux.
1604 :param node: Node where to execute command.
1605 :param interface: Interface in namespace.
1606 :param mac: MAC to be assigned to interface.
1607 :param namespace: Execute command in namespace. Optional
1608 :param vf_id: Virtual Function id. Optional
1610 :type interface: str
1612 :type namespace: str
1615 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1616 if vf_id is not None else 'address {mac}'.format(mac=mac)
1617 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1619 cmd = ('{ns} ip link set {interface} {mac}'.
1620 format(ns=ns_str, interface=interface, mac=mac_str))
1621 exec_cmd_no_error(node, cmd, sudo=True)
1624 def set_linux_interface_trust_on(node, interface, namespace=None,
1626 """Set trust on (promisc) for interface in linux.
1628 :param node: Node where to execute command.
1629 :param interface: Interface in namespace.
1630 :param namespace: Execute command in namespace. Optional
1631 :param vf_id: Virtual Function id. Optional
1633 :type interface: str
1634 :type namespace: str
1637 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1638 if vf_id is not None else 'trust on'
1639 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1641 cmd = ('{ns} ip link set dev {interface} {trust}'.
1642 format(ns=ns_str, interface=interface, trust=trust_str))
1643 exec_cmd_no_error(node, cmd, sudo=True)
1646 def set_linux_interface_spoof_off(node, interface, namespace=None,
1648 """Set spoof off for interface in linux.
1650 :param node: Node where to execute command.
1651 :param interface: Interface in namespace.
1652 :param namespace: Execute command in namespace. Optional
1653 :param vf_id: Virtual Function id. Optional
1655 :type interface: str
1656 :type namespace: str
1659 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1660 if vf_id is not None else 'spoof off'
1661 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1663 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1664 format(ns=ns_str, interface=interface, spoof=spoof_str))
1665 exec_cmd_no_error(node, cmd, sudo=True)
1668 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1669 """Init PCI device by creating VFs and bind them to vfio-pci for AVF
1670 driver testing on DUT.
1672 :param node: DUT node.
1673 :param ifc_key: Interface key from topology file.
1674 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
1675 :param osi_layer: OSI Layer type to initialize TG with.
1676 Default value "L2" sets linux interface spoof off.
1680 :type osi_layer: str
1681 :returns: Virtual Function topology interface keys.
1687 # Read PCI address and driver.
1688 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1689 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1690 uio_driver = Topology.get_uio_driver(node)
1691 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1692 current_driver = DUTSetup.get_pci_dev_driver(
1693 node, pf_pci_addr.replace(':', r'\:'))
1695 VPPUtil.stop_vpp_service(node)
1696 if current_driver != kernel_driver:
1697 # PCI device must be re-bound to kernel driver before creating VFs.
1698 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1699 # Stop VPP to prevent deadlock.
1700 # Unbind from current driver.
1701 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1702 # Bind to kernel driver.
1703 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1705 # Initialize PCI VFs
1706 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1709 # Set MAC address and bind each virtual function to uio driver.
1710 for vf_id in range(numvfs):
1711 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1712 pf_mac_addr[3], pf_mac_addr[4],
1713 pf_mac_addr[5], "{:02x}".format(vf_id)])
1715 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1716 format(pci=pf_pci_addr)
1717 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1719 if osi_layer == 'L2':
1720 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1722 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1725 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1726 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1728 # Add newly created ports into topology file
1729 vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
1730 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1731 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1732 Topology.update_interface_name(node, vf_ifc_key,
1733 vf_ifc_name+str(vf_id+1))
1734 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1735 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1736 vf_ifc_keys.append(vf_ifc_key)
1741 def vpp_create_multiple_vxlan_ipv4_tunnels(
1742 node, node_vxlan_if, node_vlan_if, op_node, op_node_if,
1743 n_tunnels, vni_start, src_ip_start, dst_ip_start, ip_step, ip_limit,
1745 """Create multiple VXLAN tunnel interfaces and VLAN sub-interfaces on
1748 Put each pair of VXLAN tunnel interface and VLAN sub-interface to
1749 separate bridge-domain.
1751 :param node: VPP node to create VXLAN tunnel interfaces.
1752 :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
1754 :param node_vlan_if: VPP node interface key to create VLAN
1756 :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
1757 :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
1759 :param n_tunnels: Number of tunnel interfaces to create.
1760 :param vni_start: VNI start ID.
1761 :param src_ip_start: VXLAN tunnel source IP address start.
1762 :param dst_ip_start: VXLAN tunnel destination IP address start.
1763 :param ip_step: IP address incremental step.
1764 :param ip_limit: IP address limit.
1765 :param bd_id_start: Bridge-domain ID start.
1767 :type node_vxlan_if: str
1768 :type node_vlan_if: str
1770 :type op_node_if: str
1771 :type n_tunnels: int
1772 :type vni_start: int
1773 :type src_ip_start: str
1774 :type dst_ip_start: str
1777 :type bd_id_start: int
1779 # configure IPs, create VXLAN interfaces and VLAN sub-interfaces
1780 vxlan_count = InterfaceUtil.vpp_create_vxlan_and_vlan_interfaces(
1781 node, node_vxlan_if, node_vlan_if, n_tunnels, vni_start,
1782 src_ip_start, dst_ip_start, ip_step, ip_limit)
1784 # update topology with VXLAN interfaces and VLAN sub-interfaces data
1785 # and put interfaces up
1786 InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_up(
1787 node, vxlan_count, node_vlan_if)
1789 # configure bridge domains, ARPs and routes
1790 InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
1791 node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
1792 ip_step, bd_id_start)
1795 def vpp_create_vxlan_and_vlan_interfaces(
1796 node, node_vxlan_if, node_vlan_if, vxlan_count, vni_start,
1797 src_ip_start, dst_ip_start, ip_step, ip_limit):
1799 Configure IPs, create VXLAN interfaces and VLAN sub-interfaces on VPP
1802 :param node: VPP node.
1803 :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
1805 :param node_vlan_if: VPP node interface key to create VLAN
1807 :param vxlan_count: Number of tunnel interfaces to create.
1808 :param vni_start: VNI start ID.
1809 :param src_ip_start: VXLAN tunnel source IP address start.
1810 :param dst_ip_start: VXLAN tunnel destination IP address start.
1811 :param ip_step: IP address incremental step.
1812 :param ip_limit: IP address limit.
1814 :type node_vxlan_if: str
1815 :type node_vlan_if: str
1816 :type vxlan_count: int
1817 :type vni_start: int
1818 :type src_ip_start: str
1819 :type dst_ip_start: str
1822 :returns: Number of created VXLAN interfaces.
1827 src_address_start = IPv6Address(unicode(src_ip_start))
1828 dst_address_start = IPv6Address(unicode(dst_ip_start))
1829 ip_address_limit = IPv6Address(unicode(ip_limit))
1832 except (AddressValueError, NetmaskValueError):
1833 src_address_start = IPv4Address(unicode(src_ip_start))
1834 dst_address_start = IPv4Address(unicode(dst_ip_start))
1835 ip_address_limit = IPv4Address(unicode(ip_limit))
1839 with PapiExecutor(node) as papi_exec:
1840 for i in xrange(0, vxlan_count):
1841 src_ip = src_address_start + i * ip_step
1842 dst_ip = dst_address_start + i * ip_step
1843 if src_ip > ip_address_limit or dst_ip > ip_address_limit:
1844 logger.warn("Can't do more iterations - IP address limit "
1845 "has been reached.")
1848 cmd = 'sw_interface_add_del_address'
1850 sw_if_index=InterfaceUtil.get_interface_index(
1851 node, node_vxlan_if),
1855 address_length=128 if is_ipv6 else 32,
1856 address=inet_pton(af_inet, str(src_ip)))
1857 papi_exec.add(cmd, **args)
1858 cmd = 'vxlan_add_del_tunnel'
1862 instance=Constants.BITWISE_NON_ZERO,
1863 src_address=inet_pton(af_inet, str(src_ip)),
1864 dst_address=inet_pton(af_inet, str(dst_ip)),
1865 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
1867 decap_next_index=Constants.BITWISE_NON_ZERO,
1868 vni=int(vni_start)+i)
1869 papi_exec.add(cmd, **args)
1870 cmd = 'create_vlan_subif'
1872 sw_if_index=InterfaceUtil.get_interface_index(
1873 node, node_vlan_if),
1875 papi_exec.add(cmd, **args)
1876 papi_exec.get_replies().verify_replies()
1881 def vpp_put_vxlan_and_vlan_interfaces_up(node, vxlan_count, node_vlan_if):
1883 Update topology with VXLAN interfaces and VLAN sub-interfaces data
1884 and put interfaces up.
1886 :param node: VPP node.
1887 :param vxlan_count: Number of tunnel interfaces.
1888 :param node_vlan_if: VPP node interface key where VLAN sub-interfaces
1891 :type vxlan_count: int
1892 :type node_vlan_if: str
1895 if_data = InterfaceUtil.vpp_get_interface_data(node)
1897 with PapiExecutor(node) as papi_exec:
1898 for i in xrange(0, vxlan_count):
1899 vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel')
1900 vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i)
1902 vxlan_subif_idx = None
1903 vlan_subif_key = Topology.add_new_port(node, 'vlan_subif')
1904 vlan_subif_name = '{if_name}.{vlan}'.format(
1905 if_name=Topology.get_interface_name(
1906 node, node_vlan_if), vlan=i+1)
1909 for data in if_data:
1910 if not vxlan_found \
1911 and data['interface_name'] == vxlan_subif_name:
1912 vxlan_subif_idx = data['sw_if_index']
1914 elif not vlan_found \
1915 and data['interface_name'] == vlan_subif_name:
1916 vlan_idx = data['sw_if_index']
1918 if vxlan_found and vlan_found:
1920 Topology.update_interface_sw_if_index(
1921 node, vxlan_subif_key, vxlan_subif_idx)
1922 Topology.update_interface_name(
1923 node, vxlan_subif_key, vxlan_subif_name)
1924 cmd = 'sw_interface_set_flags'
1925 args1 = dict(sw_if_index=vxlan_subif_idx,
1927 Topology.update_interface_sw_if_index(
1928 node, vlan_subif_key, vlan_idx)
1929 Topology.update_interface_name(
1930 node, vlan_subif_key, vlan_subif_name)
1931 args2 = dict(sw_if_index=vlan_idx,
1933 papi_exec.add(cmd, **args1).add(cmd, **args2)
1934 papi_exec.get_replies().verify_replies()
1937 def vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
1938 node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
1939 ip_step, bd_id_start):
1941 Configure ARPs and routes for VXLAN interfaces and put each pair of
1942 VXLAN tunnel interface and VLAN sub-interface to separate bridge-domain.
1944 :param node: VPP node.
1945 :param node_vxlan_if: VPP node interface key where VXLAN tunnel
1946 interfaces have been created.
1947 :param vxlan_count: Number of tunnel interfaces.
1948 :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
1949 :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
1951 :param dst_ip_start: VXLAN tunnel destination IP address start.
1952 :param ip_step: IP address incremental step.
1953 :param bd_id_start: Bridge-domain ID start.
1955 :type node_vxlan_if: str
1956 :type vxlan_count: int
1959 :type dst_ip_start: str
1961 :type bd_id_start: int
1965 dst_address_start = IPv6Address(unicode(dst_ip_start))
1968 except (AddressValueError, NetmaskValueError):
1969 dst_address_start = IPv4Address(unicode(dst_ip_start))
1973 with PapiExecutor(node) as papi_exec:
1974 for i in xrange(0, vxlan_count):
1975 dst_ip = dst_address_start + i * ip_step
1977 sw_if_index=Topology.get_interface_sw_index(
1978 node, node_vxlan_if),
1981 Topology.get_interface_mac(op_node, op_node_if)),
1982 ip_address=str(dst_ip))
1983 cmd = 'ip_neighbor_add_del'
1987 papi_exec.add(cmd, **args)
1988 cmd = 'ip_add_del_route'
1990 next_hop_sw_if_index=Topology.get_interface_sw_index(
1991 node, node_vxlan_if),
1996 next_hop_proto=1 if is_ipv6 else 0,
1997 dst_address_length=128 if is_ipv6 else 32,
1998 dst_address=inet_pton(af_inet, str(dst_ip)),
1999 next_hop_address=inet_pton(af_inet, str(dst_ip)))
2000 papi_exec.add(cmd, **args)
2001 cmd = 'sw_interface_set_l2_bridge'
2003 rx_sw_if_index=Topology.get_interface_sw_index(
2004 node, 'vxlan_tunnel{nr}'.format(nr=i+1)),
2005 bd_id=int(bd_id_start+i),
2009 papi_exec.add(cmd, **args)
2011 rx_sw_if_index=Topology.get_interface_sw_index(
2012 node, 'vlan_subif{nr}'.format(nr=i+1)),
2013 bd_id=int(bd_id_start+i),
2017 papi_exec.add(cmd, **args)
2018 papi_exec.get_replies().verify_replies()
2021 def vpp_sw_interface_rx_placement_dump(node):
2022 """Dump VPP interface RX placement on node.
2024 :param node: Node to run command on.
2026 :returns: Thread mapping information as a list of dictionaries.
2030 cmd = 'sw_interface_rx_placement_dump'
2031 cmd_reply = 'sw_interface_rx_placement_details'
2032 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
2033 cmd=cmd, host=node['host'])
2034 with PapiExecutor(node) as papi_exec:
2035 for ifc in node['interfaces'].values():
2036 if ifc['vpp_sw_index'] is not None:
2037 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
2038 papi_resp = papi_exec.get_dump(err_msg)
2039 thr_mapping = [s[cmd_reply] for r in papi_resp.reply
2040 for s in r['api_reply']]
2041 return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
2044 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
2046 """Set interface RX placement to worker on node.
2048 :param node: Node to run command on.
2049 :param sw_if_index: VPP SW interface index.
2050 :param queue_id: VPP interface queue ID.
2051 :param worker_id: VPP worker ID (indexing from 0).
2053 :type sw_if_index: int
2055 :type worker_id: int
2056 :raises RuntimeError: If failed to run command on host or if no API
2060 cmd = 'sw_interface_set_rx_placement'
2061 err_msg = "Failed to set interface RX placement to worker on host " \
2062 "{host}!".format(host=node['host'])
2063 args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
2064 worker_id=worker_id)
2065 with PapiExecutor(node) as papi_exec:
2066 papi_exec.add(cmd, **args).get_replies(err_msg).\
2067 verify_reply(err_msg=err_msg)
2070 def vpp_round_robin_rx_placement(node, prefix):
2071 """Set Round Robin interface RX placement on all worker threads
2074 :param node: Topology nodes.
2075 :param prefix: Interface name prefix.
2080 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
2081 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
2082 for interface in node['interfaces'].values():
2083 if placement['sw_if_index'] == interface['vpp_sw_index'] \
2084 and prefix in interface['name']:
2085 InterfaceUtil.vpp_sw_interface_set_rx_placement(
2086 node, placement['sw_if_index'], placement['queue_id'],
2087 worker_id % worker_cnt)
2091 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
2092 """Set Round Robin interface RX placement on all worker threads
2095 :param nodes: Topology nodes.
2096 :param prefix: Interface name prefix.
2100 for node in nodes.values():
2101 if node['type'] == NodeType.DUT:
2102 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)