+ def vpp_get_interface_data(node, interface=None):
+ """Get all interface data from a VPP node. If a name or
+ sw_interface_index is provided, return only data for the matching
+ interface(s).
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :returns: List of dictionaries containing data for each interface, or a
+ single dictionary for the specified interface.
+ :rtype: list or dict
+ :raises TypeError: if the data type of interface is neither basestring
+ nor int.
+ """
+ if interface is not None:
+ if isinstance(interface, basestring):
+ param = 'interface_name'
+ elif isinstance(interface, int):
+ param = 'sw_if_index'
+ else:
+ raise TypeError('Wrong interface format {ifc}'.format(
+ ifc=interface))
+ else:
+ param = ''
+
+ cmd = 'sw_interface_dump'
+ cmd_reply = 'sw_interface_details'
+ args = dict(name_filter_valid=0,
+ name_filter='')
+ err_msg = 'Failed to get interface dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_if_dump = papi_resp.reply[0]['api_reply']
+
+ def process_if_dump(if_dump):
+ """Process interface dump.
+
+ :param if_dump: Interface dump.
+ :type if_dump: dict
+ :returns: Processed interface dump.
+ :rtype: dict
+ """
+ if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
+ if_dump['tag'] = if_dump['tag'].rstrip('\x00')
+ if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
+ if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
+ if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
+ return if_dump
+
+ data = list() if interface is None else dict()
+ for item in papi_if_dump:
+ if interface is None:
+ data.append(process_if_dump(item[cmd_reply]))
+ elif str(item[cmd_reply].get(param)).rstrip('\x00') == \
+ str(interface):
+ data = process_if_dump(item[cmd_reply])
+ break
+
+ logger.debug('Interface data:\n{if_data}'.format(if_data=data))
+ return data
+
+ @staticmethod
+ def vpp_get_interface_name(node, sw_if_index):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param sw_if_index: SW interface index of the specific interface.
+ :type node: dict
+ :type sw_if_index: int
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
+ if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
+ if_data = InterfaceUtil.vpp_get_interface_data(
+ node, if_data['sup_sw_if_index'])
+
+ return if_data.get('interface_name')
+
+ @staticmethod
+ def vpp_get_interface_sw_index(node, interface_name):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface_name: Interface name.
+ :type node: dict
+ :type interface_name: str
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
+
+ return if_data.get('sw_if_index')
+
+ @staticmethod
+ def vpp_get_interface_mac(node, interface):
+ """Get MAC address for the given interface from actual interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :returns: MAC address.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
+ if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
+ if_data = InterfaceUtil.vpp_get_interface_data(
+ node, if_data['sup_sw_if_index'])
+
+ return if_data.get('l2_address')
+
+ @staticmethod
+ def tg_set_interface_driver(node, pci_addr, driver):
+ """Set interface driver on the TG node.
+
+ :param node: Node to set interface driver on (must be TG node).
+ :param pci_addr: PCI address of the interface.
+ :param driver: Driver name.
+ :type node: dict
+ :type pci_addr: str
+ :type driver: str
+ :raises RuntimeError: If unbinding from the current driver fails.
+ :raises RuntimeError: If binding to the new driver fails.
+ """
+ old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
+ if old_driver == driver:
+ return
+
+ ssh = SSH()
+ ssh.connect(node)
+
+ # Unbind from current driver
+ if old_driver is not None:
+ cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
+ .format(pci_addr, old_driver)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError("'{0}' failed on '{1}'"
+ .format(cmd, node['host']))
+
+ # Bind to the new driver
+ cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
+ .format(pci_addr, driver)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError("'{0}' failed on '{1}'"
+ .format(cmd, node['host']))
+
+ @staticmethod
+ def tg_get_interface_driver(node, pci_addr):
+ """Get interface driver from the TG node.
+
+ :param node: Node to get interface driver on (must be TG node).
+ :param pci_addr: PCI address of the interface.
+ :type node: dict
+ :type pci_addr: str
+ :returns: Interface driver or None if not found.
+ :rtype: str
+ :raises RuntimeError: If PCI rescan or lspci command execution failed.
+ """
+ return DUTSetup.get_pci_dev_driver(node, pci_addr)
+
+ @staticmethod
+ def tg_set_interfaces_udev_rules(node):
+ """Set udev rules for interfaces.
+
+ Create udev rules file in /etc/udev/rules.d where are rules for each
+ interface used by TG node, based on MAC interface has specific name.
+ So after unbind and bind again to kernel driver interface has same
+ name as before. This must be called after TG has set name for each
+ port in topology dictionary.
+ udev rule example
+ SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
+ NAME="eth1"
+
+ :param node: Node to set udev rules on (must be TG node).
+ :type node: dict
+ :raises RuntimeError: If setting of udev rules fails.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError("'{0}' failed on '{1}'"
+ .format(cmd, node['host']))
+
+ for interface in node['interfaces'].values():
+ rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
+ '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
+ interface['name'] + '\\"'
+ cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
+ rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError("'{0}' failed on '{1}'"
+ .format(cmd, node['host']))
+
+ cmd = '/etc/init.d/udev restart'
+ ssh.exec_command_sudo(cmd)
+
+ @staticmethod
+ def tg_set_interfaces_default_driver(node):
+ """Set interfaces default driver specified in topology yaml file.
+
+ :param node: Node to setup interfaces driver on (must be TG node).
+ :type node: dict
+ """
+ for interface in node['interfaces'].values():
+ InterfaceUtil.tg_set_interface_driver(node,
+ interface['pci_address'],
+ interface['driver'])
+
+ @staticmethod
+ def update_vpp_interface_data_on_node(node):
+ """Update vpp generated interface data for a given node in DICT__nodes.
+
+ Updates interface names, software if index numbers and any other details
+ generated specifically by vpp that are unknown before testcase run.
+ It does this by dumping interface list from all devices using python
+ api, and pairing known information from topology (mac address) to state
+ from VPP.
+
+ :param node: Node selected from DICT__nodes.
+ :type node: dict
+ """
+ interface_list = InterfaceUtil.vpp_get_interface_data(node)
+ interface_dict = dict()
+ for ifc in interface_list:
+ interface_dict[ifc['l2_address']] = ifc
+
+ for if_name, if_data in node['interfaces'].items():
+ ifc_dict = interface_dict.get(if_data['mac_address'])
+ if ifc_dict is not None:
+ if_data['name'] = ifc_dict['interface_name']
+ if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
+ if_data['mtu'] = ifc_dict['mtu'][0]
+ logger.trace('Interface {ifc} found by MAC {mac}'.format(
+ ifc=if_name, mac=if_data['mac_address']))
+ else:
+ logger.trace('Interface {ifc} not found by MAC {mac}'.format(
+ ifc=if_name, mac=if_data['mac_address']))
+ if_data['vpp_sw_index'] = None
+
+ @staticmethod
+ def update_nic_interface_names(node):
+ """Update interface names based on nic type and PCI address.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param node: Node dictionary.
+ :type node: dict
+ """
+ for ifc in node['interfaces'].values():
+ if_pci = ifc['pci_address'].replace('.', ':').split(':')
+ bus = '{:x}'.format(int(if_pci[1], 16))
+ dev = '{:x}'.format(int(if_pci[2], 16))
+ fun = '{:x}'.format(int(if_pci[3], 16))
+ loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
+ if ifc['model'] == 'Intel-XL710':
+ ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Intel-X710':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Intel-X520-DA2':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Cisco-VIC-1385':
+ ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Cisco-VIC-1227':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ else:
+ ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
+
+ @staticmethod
+ def update_nic_interface_names_on_all_duts(nodes):
+ """Update interface names based on nic type and PCI address on all DUTs.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ InterfaceUtil.update_nic_interface_names(node)
+
+ @staticmethod
+ def update_tg_interface_data_on_node(node, skip_tg_udev=False):
+ """Update interface name for TG/linux node in DICT__nodes.
+
+ .. note::
+ # for dev in `ls /sys/class/net/`;
+ > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
+ "52:54:00:9f:82:63": "eth0"
+ "52:54:00:77:ae:a9": "eth1"
+ "52:54:00:e1:8a:0f": "eth2"
+ "00:00:00:00:00:00": "lo"
+
+ :param node: Node selected from DICT__nodes.
+ :param skip_tg_udev: Skip udev rename on TG node.
+ :type node: dict
+ :type skip_tg_udev: bool
+ :raises RuntimeError: If getting of interface name and MAC fails.
+ """
+ # First setup interface driver specified in yaml file
+ InterfaceUtil.tg_set_interfaces_default_driver(node)
+
+ # Get interface names
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
+ '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
+
+ (ret_code, stdout, _) = ssh.exec_command(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError('Get interface name and MAC failed')
+ tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
+ interfaces = JsonParser().parse_data(tmp)
+ for interface in node['interfaces'].values():
+ name = interfaces.get(interface['mac_address'])
+ if name is None:
+ continue
+ interface['name'] = name
+
+ # Set udev rules for interfaces
+ if not skip_tg_udev:
+ InterfaceUtil.tg_set_interfaces_udev_rules(node)
+
+ @staticmethod
+ def iface_update_numa_node(node):
+ """For all interfaces from topology file update numa node based on
+ information from the node.
+
+ :param node: Node from topology.
+ :type node: dict
+ :returns: Nothing.
+ :raises ValueError: If numa node ia less than 0.
+ :raises RuntimeError: If update of numa node failes.
+ """
+ ssh = SSH()
+ for if_key in Topology.get_node_interfaces(node):
+ if_pci = Topology.get_interface_pci_addr(node, if_key)
+ ssh.connect(node)
+ cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
+ for _ in range(3):
+ (ret, out, _) = ssh.exec_command(cmd)
+ if ret == 0:
+ try:
+ numa_node = int(out)
+ if numa_node < 0:
+ if CpuUtils.cpu_node_count(node) == 1:
+ numa_node = 0
+ else:
+ raise ValueError
+ except ValueError:
+ logger.trace('Reading numa location failed for: {0}'
+ .format(if_pci))
+ else:
+ Topology.set_interface_numa_node(node, if_key,
+ numa_node)
+ break
+ else:
+ raise RuntimeError('Update numa node failed for: {0}'
+ .format(if_pci))
+
+ @staticmethod
+ def update_all_numa_nodes(nodes, skip_tg=False):
+ """For all nodes and all their interfaces from topology file update numa
+ node information based on information from the node.
+
+ :param nodes: Nodes in the topology.
+ :param skip_tg: Skip TG node
+ :type nodes: dict
+ :type skip_tg: bool
+ :returns: Nothing.
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ InterfaceUtil.iface_update_numa_node(node)
+ elif node['type'] == NodeType.TG and not skip_tg:
+ InterfaceUtil.iface_update_numa_node(node)
+
+ @staticmethod
+ def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
+ skip_tg_udev=False,
+ numa_node=False):
+ """Update interface names on all nodes in DICT__nodes.
+
+ This method updates the topology dictionary by querying interface lists
+ of all nodes mentioned in the topology dictionary.
+
+ :param nodes: Nodes in the topology.
+ :param skip_tg: Skip TG node.
+ :param skip_tg_udev: Skip udev rename on TG node.
+ :param numa_node: Retrieve numa_node location.
+ :type nodes: dict
+ :type skip_tg: bool
+ :type skip_tg_udev: bool
+ :type numa_node: bool
+ """
+ for node_data in nodes.values():
+ if node_data['type'] == NodeType.DUT:
+ InterfaceUtil.update_vpp_interface_data_on_node(node_data)
+ elif node_data['type'] == NodeType.TG and not skip_tg:
+ InterfaceUtil.update_tg_interface_data_on_node(
+ node_data, skip_tg_udev)
+
+ if numa_node:
+ if node_data['type'] == NodeType.DUT:
+ InterfaceUtil.iface_update_numa_node(node_data)
+ elif node_data['type'] == NodeType.TG and not skip_tg:
+ InterfaceUtil.iface_update_numa_node(node_data)
+
+ @staticmethod
+ def create_vlan_subinterface(node, interface, vlan):
+ """Create VLAN sub-interface on node.
+
+ :param node: Node to add VLAN subinterface on.
+ :param interface: Interface name on which create VLAN subinterface.
+ :param vlan: VLAN ID of the subinterface to be created.
+ :type node: dict
+ :type interface: str
+ :type vlan: int
+ :returns: Name and index of created subinterface.
+ :rtype: tuple
+ :raises RuntimeError: if it is unable to create VLAN subinterface on the
+ node.
+ """
+ iface_key = Topology.get_interface_by_name(node, interface)
+ sw_if_index = Topology.get_interface_sw_index(node, iface_key)
+
+ cmd = 'create_vlan_subif'
+ args = dict(sw_if_index=sw_if_index,
+ vlan_id=int(vlan))
+ err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'vlan_subif')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_idx
+
+ @staticmethod
+ def create_vxlan_interface(node, vni, source_ip, destination_ip):
+ """Create VXLAN interface and return sw if index of created interface.
+
+ :param node: Node where to create VXLAN interface.
+ :param vni: VXLAN Network Identifier.
+ :param source_ip: Source IP of a VXLAN Tunnel End Point.
+ :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
+ :type node: dict
+ :type vni: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create VxLAN interface on the
+ node.
+ """
+ try:
+ src_address = IPv6Address(unicode(source_ip))
+ dst_address = IPv6Address(unicode(destination_ip))
+ af_inet = AF_INET6
+ is_ipv6 = 1
+ except (AddressValueError, NetmaskValueError):
+ src_address = IPv4Address(unicode(source_ip))
+ dst_address = IPv4Address(unicode(destination_ip))
+ af_inet = AF_INET
+ is_ipv6 = 0
+
+ cmd = 'vxlan_add_del_tunnel'
+ args = dict(is_add=1,
+ is_ipv6=is_ipv6,
+ instance=Constants.BITWISE_NON_ZERO,
+ src_address=inet_pton(af_inet, str(src_address)),
+ dst_address=inet_pton(af_inet, str(dst_address)),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=Constants.BITWISE_NON_ZERO,
+ vni=int(vni))
+ err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
+ format(host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'vxlan_tunnel')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_idx
+
+ @staticmethod
+ def vxlan_dump(node, interface=None):
+ """Get VxLAN data for the given interface.
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ If None, information about all VxLAN interfaces is returned.
+ :type node: dict
+ :type interface: int or str
+ :returns: Dictionary containing data for the given VxLAN interface or if
+ interface=None, the list of dictionaries with all VxLAN interfaces.
+ :rtype: dict or list
+ :raises TypeError: if the data type of interface is neither basestring
+ nor int.
+ """
+ if interface is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+ else:
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = 'vxlan_tunnel_dump'
+ cmd_reply = 'vxlan_tunnel_details'
+ args = dict(sw_if_index=sw_if_index)
+ err_msg = 'Failed to get VXLAN dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_vxlan_dump = papi_resp.reply[0]['api_reply']
+
+ def process_vxlan_dump(vxlan_dump):
+ """Process vxlan dump.
+
+ :param vxlan_dump: Vxlan interface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump['is_ipv6']:
+ vxlan_dump['src_address'] = \
+ inet_ntop(AF_INET6, vxlan_dump['src_address'])
+ vxlan_dump['dst_address'] = \
+ inet_ntop(AF_INET6, vxlan_dump['dst_address'])
+ else:
+ vxlan_dump['src_address'] = \
+ inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
+ vxlan_dump['dst_address'] = \
+ inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
+ return vxlan_dump
+
+ data = list() if interface is None else dict()
+ for item in papi_vxlan_dump:
+ if interface is None:
+ data.append(process_vxlan_dump(item[cmd_reply]))
+ elif item[cmd_reply]['sw_if_index'] == sw_if_index:
+ data = process_vxlan_dump(item[cmd_reply])
+ break
+
+ logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
+ return data
+
+ @staticmethod
+ def vhost_user_dump(node):
+ """Get vhost-user data for the given node.
+
+ TODO: Move to VhostUser.py
+
+ :param node: VPP node to get interface data from.
+ :type node: dict
+ :returns: List of dictionaries with all vhost-user interfaces.
+ :rtype: list
+ """
+ cmd = 'sw_interface_vhost_user_dump'
+ cmd_reply = 'sw_interface_vhost_user_details'
+ err_msg = 'Failed to get vhost-user dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_vxlan_dump = papi_resp.reply[0]['api_reply']
+
+ def process_vhost_dump(vhost_dump):
+ """Process vhost dump.
+
+ :param vhost_dump: Vhost interface dump.
+ :type vhost_dump: dict
+ :returns: Processed vhost interface dump.
+ :rtype: dict
+ """
+ vhost_dump['interface_name'] = \
+ vhost_dump['interface_name'].rstrip('\x00')
+ vhost_dump['sock_filename'] = \
+ vhost_dump['sock_filename'].rstrip('\x00')
+ return vhost_dump
+
+ data = list()
+ for item in papi_vxlan_dump:
+ data.append(process_vhost_dump(item[cmd_reply]))
+
+ logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
+ return data
+
+ @staticmethod
+ def tap_dump(node, name=None):
+ """Get all TAP interface data from the given node, or data about
+ a specific TAP interface.
+
+ TODO: Move to Tap.py
+
+ :param node: VPP node to get data from.
+ :param name: Optional name of a specific TAP interface.
+ :type node: dict
+ :type name: str
+ :returns: Dictionary of information about a specific TAP interface, or
+ a List of dictionaries containing all TAP data for the given node.
+ :rtype: dict or list
+ """
+ cmd = 'sw_interface_tap_v2_dump'
+ cmd_reply = 'sw_interface_tap_v2_details'
+ err_msg = 'Failed to get TAP dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_tap_dump = papi_resp.reply[0]['api_reply']
+
+ def process_tap_dump(tap_dump):
+ """Process tap dump.
+
+ :param tap_dump: Tap interface dump.
+ :type tap_dump: dict
+ :returns: Processed tap interface dump.
+ :rtype: dict
+ """
+ tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
+ tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
+ tap_dump['host_namespace'] = \
+ tap_dump['host_namespace'].rstrip('\x00')
+ tap_dump['host_mac_addr'] = \
+ L2Util.bin_to_mac(tap_dump['host_mac_addr'])
+ tap_dump['host_ip4_addr'] = \
+ inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
+ tap_dump['host_ip6_addr'] = \
+ inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
+ return tap_dump
+
+ data = list() if name is None else dict()
+ for item in papi_tap_dump:
+ if name is None:
+ data.append(process_tap_dump(item[cmd_reply]))
+ elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
+ data = process_tap_dump(item[cmd_reply])
+ break
+
+ logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
+ return data
+
+ @staticmethod
+ def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
+ inner_vlan_id=None, type_subif=None):
+ """Create sub-interface on node. It is possible to set required
+ sub-interface type and VLAN tag(s).
+
+ :param node: Node to add sub-interface.
+ :param interface: Interface name on which create sub-interface.
+ :param sub_id: ID of the sub-interface to be created.
+ :param outer_vlan_id: Optional outer VLAN ID.
+ :param inner_vlan_id: Optional inner VLAN ID.
+ :param type_subif: Optional type of sub-interface. Values supported by
+ VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
+ [default_sub]
+ :type node: dict
+ :type interface: str or int
+ :type sub_id: int
+ :type outer_vlan_id: int
+ :type inner_vlan_id: int
+ :type type_subif: str
+ :returns: Name and index of created sub-interface.
+ :rtype: tuple
+ :raises RuntimeError: If it is not possible to create sub-interface.
+ """
+ subif_types = type_subif.split()
+
+ cmd = 'create_subif'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ sub_id=int(sub_id),
+ no_tags=1 if 'no_tags' in subif_types else 0,
+ one_tag=1 if 'one_tag' in subif_types else 0,
+ two_tags=1 if 'two_tags' in subif_types else 0,
+ dot1ad=1 if 'dot1ad' in subif_types else 0,
+ exact_match=1 if 'exact_match' in subif_types else 0,
+ default_sub=1 if 'default_sub' in subif_types else 0,
+ outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
+ inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
+ outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
+ inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
+ err_msg = 'Failed to create sub-interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_subif_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'subinterface')
+ Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
+
+ @staticmethod
+ def create_gre_tunnel_interface(node, source_ip, destination_ip):
+ """Create GRE tunnel interface on node.
+
+ :param node: VPP node to add tunnel interface.
+ :param source_ip: Source of the GRE tunnel.
+ :param destination_ip: Destination of the GRE tunnel.
+ :type node: dict
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: Name and index of created GRE tunnel interface.
+ :rtype: tuple
+ :raises RuntimeError: If unable to create GRE tunnel interface.
+ """
+ cmd = 'gre_tunnel_add_del'
+ tunnel = dict(type=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src=str(source_ip),
+ dst=str(destination_ip),
+ outer_fib_id=0,
+ session_id=0)
+ args = dict(is_add=1,
+ tunnel=tunnel)
+ err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'gre_tunnel')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return ifc_name, sw_if_idx
+
+ @staticmethod
+ def vpp_create_loopback(node):
+ """Create loopback interface on VPP node.
+
+ :param node: Node to create loopback interface on.
+ :type node: dict
+ :returns: SW interface index.
+ :rtype: int
+ :raises RuntimeError: If it is not possible to create loopback on the
+ node.
+ """
+ cmd = 'create_loopback'
+ args = dict(mac_address=0)
+ err_msg = 'Failed to create loopback interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'loopback')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_idx
+
+ @staticmethod
+ def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
+ """Create bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param mode: Link bonding mode.
+ :param load_balance: Load balance (optional, valid for xor and lacp
+ modes, otherwise ignored).
+ :param mac: MAC address to assign to the bond interface (optional).
+ :type node: dict
+ :type mode: str
+ :type load_balance: str
+ :type mac: str
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create bond interface on
+ the node.
+ """
+ cmd = 'bond_create'
+ args = dict(id=int(Constants.BITWISE_NON_ZERO),
+ use_custom_mac=0 if mac is None else 1,
+ mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
+ mode=getattr(LinkBondMode, '{md}'.format(
+ md=mode.replace('-', '_').upper())).value,
+ lb=0 if load_balance is None else getattr(
+ LinkBondLoadBalance, '{lb}'.format(
+ lb=load_balance.upper())).value)
+ err_msg = 'Failed to create bond interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
+ ifc_pfx='eth_bond')
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
+
+ return if_key
+
+ @staticmethod
+ def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
+ """Add ethernet interface to current topology.
+
+ :param node: DUT node from topology.
+ :param ifc_name: Name of the interface.
+ :param sw_if_idx: SW interface index.
+ :param ifc_pfx: Interface key prefix.
+ :type node: dict
+ :type ifc_name: str
+ :type sw_if_idx: int
+ :type ifc_pfx: str
+ """
+ if_key = Topology.add_new_port(node, ifc_pfx)
+
+ if ifc_name and sw_if_idx is None:
+ sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ if sw_if_idx and ifc_name is None:
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
+ Topology.update_interface_mac_address(node, if_key, ifc_mac)
+
+ @staticmethod
+ def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
+ """Create AVF interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param vf_pci_addr: Virtual Function PCI address.
+ :param num_rx_queues: Number of RX queues.
+ :type node: dict
+ :type vf_pci_addr: str
+ :type num_rx_queues: int
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AVF interface on
+ the node.
+ """
+ cmd = 'avf_create'
+ args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
+ enable_elog=0,
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=0,
+ txq_size=0)
+ err_msg = 'Failed to create AVF interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
+ ifc_pfx='eth_avf')
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
+
+ return if_key
+
+ @staticmethod
+ def vpp_enslave_physical_interface(node, interface, bond_if):
+ """Enslave physical interface to bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :param bond_if: Load balance
+ :type node: dict
+ :type interface: str
+ :type bond_if: str
+ :raises RuntimeError: If it is not possible to enslave physical
+ interface to bond interface on the node.
+ """
+ cmd = 'bond_enslave'
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface),
+ bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
+ is_passive=0,
+ is_long_timeout=0)
+ err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
+ 'interface {bond} on host {host}'.format(ifc=interface,
+ bond=bond_if,
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ @staticmethod
+ def vpp_show_bond_data_on_node(node, details=False):
+ """Show (detailed) bond information on VPP node.
+
+ :param node: DUT node from topology.
+ :param details: If detailed information is required or not.
+ :type node: dict
+ :type details: bool
+ """
+ cmd = 'sw_interface_bond_dump'
+ cmd_reply = 'sw_interface_bond_details'
+ err_msg = 'Failed to get bond interface dump on host {host}'.format(
+ host=node['host'])
+
+ data = ('Bond data on node {host}:\n'.format(host=node['host']))
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_dump = papi_resp.reply[0]['api_reply']
+ for item in papi_dump:
+ data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
+ rstrip('\x00')))
+ data += (' mode: {m}\n'.
+ format(m=LinkBondMode(item[cmd_reply]['mode']).name.
+ lower()))
+ data += (' load balance: {lb}\n'.
+ format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
+ lower()))
+ data += (' number of active slaves: {n}\n'.
+ format(n=item[cmd_reply]['active_slaves']))
+ if details:
+ slave_data = InterfaceUtil.vpp_bond_slave_dump(
+ node, Topology.get_interface_by_sw_index(
+ node, item[cmd_reply]['sw_if_index']))
+ for slave in slave_data:
+ if not slave['is_passive']:
+ data += (' {s}\n'.format(s=slave['interface_name']))
+ data += (' number of slaves: {n}\n'.
+ format(n=item[cmd_reply]['slaves']))
+ if details:
+ for slave in slave_data:
+ data += (' {s}\n'.format(s=slave['interface_name']))
+ data += (' interface id: {i}\n'.
+ format(i=item[cmd_reply]['id']))
+ data += (' sw_if_index: {i}\n'.
+ format(i=item[cmd_reply]['sw_if_index']))
+ logger.info(data)
+
+ @staticmethod
+ def vpp_bond_slave_dump(node, interface):
+ """Get bond interface slave(s) data on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :type node: dict
+ :type interface: str
+ :returns: Bond slave interface data.
+ :rtype: dict
+ """
+ cmd = 'sw_interface_slave_dump'
+ cmd_reply = 'sw_interface_slave_details'
+ args = dict(sw_if_index=Topology.get_interface_sw_index(
+ node, interface))
+ err_msg = 'Failed to get slave dump on host {host}'.format(
+ host=node['host'])
+
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_dump = papi_resp.reply[0]['api_reply']
+
+ def process_slave_dump(slave_dump):
+ """Process slave dump.
+
+ :param slave_dump: Slave interface dump.
+ :type slave_dump: dict
+ :returns: Processed slave interface dump.
+ :rtype: dict
+ """
+ slave_dump['interface_name'] = slave_dump['interface_name'].\
+ rstrip('\x00')
+ return slave_dump
+
+ data = list()
+ for item in papi_dump:
+ data.append(process_slave_dump(item[cmd_reply]))
+
+ logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
+ return data
+
+ @staticmethod
+ def vpp_show_bond_data_on_all_nodes(nodes, details=False):
+ """Show (detailed) bond information on all VPP nodes in DICT__nodes.