+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = 'vxlan_tunnel_dump'
+ cmd_reply = 'vxlan_tunnel_details'
+ args = dict(sw_if_index=sw_if_index)
+ err_msg = 'Failed to get VXLAN dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_vxlan_dump = papi_resp.reply[0]['api_reply']
+
+ def process_vxlan_dump(vxlan_dump):
+ """Process vxlan dump.
+
+ :param vxlan_dump: Vxlan interface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump['is_ipv6']:
+ vxlan_dump['src_address'] = \
+ inet_ntop(AF_INET6, vxlan_dump['src_address'])
+ vxlan_dump['dst_address'] = \
+ inet_ntop(AF_INET6, vxlan_dump['dst_address'])
+ else:
+ vxlan_dump['src_address'] = \
+ inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
+ vxlan_dump['dst_address'] = \
+ inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
+ return vxlan_dump
+
+ data = list() if interface is None else dict()
+ for item in papi_vxlan_dump:
+ if interface is None:
+ data.append(process_vxlan_dump(item[cmd_reply]))
+ elif item[cmd_reply]['sw_if_index'] == sw_if_index:
+ data = process_vxlan_dump(item[cmd_reply])
+ break
+
+ logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
+ return data
+
+ @staticmethod
+ def vhost_user_dump(node):
+ """Get vhost-user data for the given node.
+
+ TODO: Move to VhostUser.py
+
+ :param node: VPP node to get interface data from.
+ :type node: dict
+ :returns: List of dictionaries with all vhost-user interfaces.
+ :rtype: list
+ """
+ cmd = 'sw_interface_vhost_user_dump'
+ cmd_reply = 'sw_interface_vhost_user_details'
+ err_msg = 'Failed to get vhost-user dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_vxlan_dump = papi_resp.reply[0]['api_reply']
+
+ def process_vhost_dump(vhost_dump):
+ """Process vhost dump.
+
+ :param vhost_dump: Vhost interface dump.
+ :type vhost_dump: dict
+ :returns: Processed vhost interface dump.
+ :rtype: dict
+ """
+ vhost_dump['interface_name'] = \
+ vhost_dump['interface_name'].rstrip('\x00')
+ vhost_dump['sock_filename'] = \
+ vhost_dump['sock_filename'].rstrip('\x00')
+ return vhost_dump
+
+ data = list()
+ for item in papi_vxlan_dump:
+ data.append(process_vhost_dump(item[cmd_reply]))
+
+ logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
+ return data
+
+ @staticmethod
+ def tap_dump(node, name=None):
+ """Get all TAP interface data from the given node, or data about
+ a specific TAP interface.
+
+ TODO: Move to Tap.py
+
+ :param node: VPP node to get data from.
+ :param name: Optional name of a specific TAP interface.
+ :type node: dict
+ :type name: str
+ :returns: Dictionary of information about a specific TAP interface, or
+ a List of dictionaries containing all TAP data for the given node.
+ :rtype: dict or list
+ """
+ cmd = 'sw_interface_tap_v2_dump'
+ cmd_reply = 'sw_interface_tap_v2_details'
+ err_msg = 'Failed to get TAP dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_tap_dump = papi_resp.reply[0]['api_reply']
+
+ def process_tap_dump(tap_dump):
+ """Process tap dump.
+
+ :param tap_dump: Tap interface dump.
+ :type tap_dump: dict
+ :returns: Processed tap interface dump.
+ :rtype: dict
+ """
+ tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
+ tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
+ tap_dump['host_namespace'] = \
+ tap_dump['host_namespace'].rstrip('\x00')
+ tap_dump['host_mac_addr'] = \
+ L2Util.bin_to_mac(tap_dump['host_mac_addr'])
+ tap_dump['host_ip4_addr'] = \
+ inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
+ tap_dump['host_ip6_addr'] = \
+ inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
+ return tap_dump
+
+ data = list() if name is None else dict()
+ for item in papi_tap_dump:
+ if name is None:
+ data.append(process_tap_dump(item[cmd_reply]))
+ elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
+ data = process_tap_dump(item[cmd_reply])
+ break
+
+ logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
+ return data
+
+ @staticmethod
+ def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
+ inner_vlan_id=None, type_subif=None):
+ """Create sub-interface on node. It is possible to set required
+ sub-interface type and VLAN tag(s).
+
+ :param node: Node to add sub-interface.
+ :param interface: Interface name on which create sub-interface.
+ :param sub_id: ID of the sub-interface to be created.
+ :param outer_vlan_id: Optional outer VLAN ID.
+ :param inner_vlan_id: Optional inner VLAN ID.
+ :param type_subif: Optional type of sub-interface. Values supported by
+ VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
+ [default_sub]
+ :type node: dict
+ :type interface: str or int
+ :type sub_id: int
+ :type outer_vlan_id: int
+ :type inner_vlan_id: int
+ :type type_subif: str
+ :returns: Name and index of created sub-interface.
+ :rtype: tuple
+ :raises RuntimeError: If it is not possible to create sub-interface.
+ """
+ subif_types = type_subif.split()
+
+ cmd = 'create_subif'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ sub_id=int(sub_id),
+ no_tags=1 if 'no_tags' in subif_types else 0,
+ one_tag=1 if 'one_tag' in subif_types else 0,
+ two_tags=1 if 'two_tags' in subif_types else 0,
+ dot1ad=1 if 'dot1ad' in subif_types else 0,
+ exact_match=1 if 'exact_match' in subif_types else 0,
+ default_sub=1 if 'default_sub' in subif_types else 0,
+ outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
+ inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
+ outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
+ inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
+ err_msg = 'Failed to create sub-interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_subif_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'subinterface')
+ Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
+
+ @staticmethod
+ def create_gre_tunnel_interface(node, source_ip, destination_ip):
+ """Create GRE tunnel interface on node.
+
+ :param node: VPP node to add tunnel interface.
+ :param source_ip: Source of the GRE tunnel.
+ :param destination_ip: Destination of the GRE tunnel.
+ :type node: dict
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: Name and index of created GRE tunnel interface.
+ :rtype: tuple
+ :raises RuntimeError: If unable to create GRE tunnel interface.
+ """
+ cmd = 'gre_tunnel_add_del'
+ tunnel = dict(type=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src=str(source_ip),
+ dst=str(destination_ip),
+ outer_fib_id=0,
+ session_id=0)
+ args = dict(is_add=1,
+ tunnel=tunnel)
+ err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'gre_tunnel')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return ifc_name, sw_if_idx
+
+ @staticmethod
+ def vpp_create_loopback(node):
+ """Create loopback interface on VPP node.
+
+ :param node: Node to create loopback interface on.
+ :type node: dict
+ :returns: SW interface index.
+ :rtype: int
+ :raises RuntimeError: If it is not possible to create loopback on the
+ node.
+ """
+ cmd = 'create_loopback'
+ args = dict(mac_address=0)
+ err_msg = 'Failed to create loopback interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ if_key = Topology.add_new_port(node, 'loopback')
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_idx
+
+ @staticmethod
+ def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
+ """Create bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param mode: Link bonding mode.
+ :param load_balance: Load balance (optional, valid for xor and lacp
+ modes, otherwise ignored).
+ :param mac: MAC address to assign to the bond interface (optional).
+ :type node: dict
+ :type mode: str
+ :type load_balance: str
+ :type mac: str
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create bond interface on
+ the node.
+ """
+ cmd = 'bond_create'
+ args = dict(id=int(Constants.BITWISE_NON_ZERO),
+ use_custom_mac=0 if mac is None else 1,
+ mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
+ mode=getattr(LinkBondMode, '{md}'.format(
+ md=mode.replace('-', '_').upper())).value,
+ lb=0 if load_balance is None else getattr(
+ LinkBondLoadBalance, '{lb}'.format(
+ lb=load_balance.upper())).value)
+ err_msg = 'Failed to create bond interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
+ ifc_pfx='eth_bond')
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
+
+ return if_key
+
+ @staticmethod
+ def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
+ """Add ethernet interface to current topology.
+
+ :param node: DUT node from topology.
+ :param ifc_name: Name of the interface.
+ :param sw_if_idx: SW interface index.
+ :param ifc_pfx: Interface key prefix.
+ :type node: dict
+ :type ifc_name: str
+ :type sw_if_idx: int
+ :type ifc_pfx: str
+ """
+ if_key = Topology.add_new_port(node, ifc_pfx)
+
+ if ifc_name and sw_if_idx is None:
+ sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+ if sw_if_idx and ifc_name is None:
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
+ Topology.update_interface_mac_address(node, if_key, ifc_mac)
+
+ @staticmethod
+ def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
+ """Create AVF interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param vf_pci_addr: Virtual Function PCI address.
+ :param num_rx_queues: Number of RX queues.
+ :type node: dict
+ :type vf_pci_addr: str
+ :type num_rx_queues: int
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AVF interface on
+ the node.
+ """
+ cmd = 'avf_create'
+ args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
+ enable_elog=0,
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=0,
+ txq_size=0)
+ err_msg = 'Failed to create AVF interface on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ sw_if_idx = papi_resp['sw_if_index']
+ InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
+ ifc_pfx='eth_avf')
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
+
+ return if_key
+
+ @staticmethod
+ def vpp_enslave_physical_interface(node, interface, bond_if):
+ """Enslave physical interface to bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :param bond_if: Load balance
+ :type node: dict
+ :type interface: str
+ :type bond_if: str
+ :raises RuntimeError: If it is not possible to enslave physical
+ interface to bond interface on the node.
+ """
+ cmd = 'bond_enslave'
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface),
+ bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
+ is_passive=0,
+ is_long_timeout=0)
+ err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
+ 'interface {bond} on host {host}'.format(ifc=interface,
+ bond=bond_if,
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ @staticmethod
+ def vpp_show_bond_data_on_node(node, details=False):
+ """Show (detailed) bond information on VPP node.
+
+ :param node: DUT node from topology.
+ :param details: If detailed information is required or not.
+ :type node: dict
+ :type details: bool
+ """
+ cmd = 'sw_interface_bond_dump'
+ cmd_reply = 'sw_interface_bond_details'
+ err_msg = 'Failed to get bond interface dump on host {host}'.format(
+ host=node['host'])
+
+ data = ('Bond data on node {host}:\n'.format(host=node['host']))
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd).get_dump(err_msg)
+
+ papi_dump = papi_resp.reply[0]['api_reply']
+ for item in papi_dump:
+ data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
+ rstrip('\x00')))
+ data += (' mode: {m}\n'.
+ format(m=LinkBondMode(item[cmd_reply]['mode']).name.
+ lower()))
+ data += (' load balance: {lb}\n'.
+ format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
+ lower()))
+ data += (' number of active slaves: {n}\n'.
+ format(n=item[cmd_reply]['active_slaves']))
+ if details:
+ slave_data = InterfaceUtil.vpp_bond_slave_dump(
+ node, Topology.get_interface_by_sw_index(
+ node, item[cmd_reply]['sw_if_index']))
+ for slave in slave_data:
+ if not slave['is_passive']:
+ data += (' {s}\n'.format(s=slave['interface_name']))
+ data += (' number of slaves: {n}\n'.
+ format(n=item[cmd_reply]['slaves']))
+ if details:
+ for slave in slave_data:
+ data += (' {s}\n'.format(s=slave['interface_name']))
+ data += (' interface id: {i}\n'.
+ format(i=item[cmd_reply]['id']))
+ data += (' sw_if_index: {i}\n'.
+ format(i=item[cmd_reply]['sw_if_index']))
+ logger.info(data)
+
+ @staticmethod
+ def vpp_bond_slave_dump(node, interface):
+ """Get bond interface slave(s) data on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :type node: dict
+ :type interface: str
+ :returns: Bond slave interface data.
+ :rtype: dict
+ """
+ cmd = 'sw_interface_slave_dump'
+ cmd_reply = 'sw_interface_slave_details'
+ args = dict(sw_if_index=Topology.get_interface_sw_index(
+ node, interface))
+ err_msg = 'Failed to get slave dump on host {host}'.format(
+ host=node['host'])
+
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_dump = papi_resp.reply[0]['api_reply']
+
+ def process_slave_dump(slave_dump):
+ """Process slave dump.
+
+ :param slave_dump: Slave interface dump.
+ :type slave_dump: dict
+ :returns: Processed slave interface dump.
+ :rtype: dict
+ """
+ slave_dump['interface_name'] = slave_dump['interface_name'].\
+ rstrip('\x00')
+ return slave_dump
+
+ data = list()
+ for item in papi_dump:
+ data.append(process_slave_dump(item[cmd_reply]))
+
+ logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
+ return data
+
+ @staticmethod
+ def vpp_show_bond_data_on_all_nodes(nodes, details=False):
+ """Show (detailed) bond information on all VPP nodes in DICT__nodes.
+
+ :param nodes: Nodes in the topology.
+ :param details: If detailed information is required or not.
+ :type nodes: dict
+ :type details: bool
+ """
+ for node_data in nodes.values():
+ if node_data['type'] == NodeType.DUT:
+ InterfaceUtil.vpp_show_bond_data_on_node(node_data, details)
+
+ @staticmethod
+ def vpp_enable_input_acl_interface(node, interface, ip_version,
+ table_index):
+ """Enable input acl on interface.
+
+ :param node: VPP node to setup interface for input acl.
+ :param interface: Interface to setup input acl.
+ :param ip_version: Version of IP protocol.
+ :param table_index: Classify table index.
+ :type node: dict
+ :type interface: str or int
+ :type ip_version: str
+ :type table_index: int
+ """
+ cmd = 'input_acl_set_interface'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ ip4_table_index=table_index if ip_version == 'ip4'
+ else Constants.BITWISE_NON_ZERO,
+ ip6_table_index=table_index if ip_version == 'ip6'
+ else Constants.BITWISE_NON_ZERO,
+ l2_table_index=table_index if ip_version == 'l2'
+ else Constants.BITWISE_NON_ZERO,
+ is_add=1)
+ err_msg = 'Failed to enable input acl on interface {ifc}'.format(
+ ifc=interface)
+ with PapiExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ @staticmethod
+ def get_interface_classify_table(node, interface):
+ """Get name of classify table for the given interface.
+
+ TODO: Move to Classify.py.
+
+ :param node: VPP node to get data from.
+ :param interface: Name or sw_if_index of a specific interface.
+ :type node: dict
+ :type interface: str or int
+ :returns: Classify table name.
+ :rtype: str
+ """
+ if isinstance(interface, basestring):
+ sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
+ else:
+ sw_if_index = interface
+
+ cmd = 'classify_table_by_interface'
+ args = dict(sw_if_index=sw_if_index)
+ err_msg = 'Failed to get classify table name by interface {ifc}'.format(
+ ifc=interface)
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
+ verify_reply(err_msg=err_msg)
+
+ return papi_resp
+
+ @staticmethod
+ def get_sw_if_index(node, interface_name):
+ """Get sw_if_index for the given interface from actual interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface_name: Name of the specific interface.
+ :type node: dict
+ :type interface_name: str
+ :returns: sw_if_index of the given interface.
+ :rtype: str
+ """
+ interface_data = InterfaceUtil.vpp_get_interface_data(
+ node, interface=interface_name)
+ return interface_data.get('sw_if_index')
+
+ @staticmethod
+ def vxlan_gpe_dump(node, interface_name=None):
+ """Get VxLAN GPE data for the given interface.
+
+ :param node: VPP node to get interface data from.
+ :param interface_name: Name of the specific interface. If None,
+ information about all VxLAN GPE interfaces is returned.
+ :type node: dict
+ :type interface_name: str
+ :returns: Dictionary containing data for the given VxLAN GPE interface
+ or if interface=None, the list of dictionaries with all VxLAN GPE
+ interfaces.
+ :rtype: dict or list
+ """
+ if interface_name is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(
+ node, interface_name)
+ else:
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = 'vxlan_gpe_tunnel_dump'
+ cmd_reply = 'vxlan_gpe_tunnel_details'
+ args = dict(sw_if_index=sw_if_index)
+ err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
+ host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
+
+ papi_vxlan_dump = papi_resp.reply[0]['api_reply']
+
+ def process_vxlan_gpe_dump(vxlan_dump):
+ """Process vxlan_gpe dump.
+
+ :param vxlan_dump: Vxlan_gpe nterface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan_gpe interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump['is_ipv6']:
+ vxlan_dump['local'] = \
+ inet_ntop(AF_INET6, vxlan_dump['local'])
+ vxlan_dump['remote'] = \
+ inet_ntop(AF_INET6, vxlan_dump['remote'])
+ else:
+ vxlan_dump['local'] = \
+ inet_ntop(AF_INET, vxlan_dump['local'][0:4])
+ vxlan_dump['remote'] = \
+ inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
+ return vxlan_dump
+
+ data = list() if interface_name is None else dict()
+ for item in papi_vxlan_dump:
+ if interface_name is None:
+ data.append(process_vxlan_gpe_dump(item[cmd_reply]))
+ elif item[cmd_reply]['sw_if_index'] == sw_if_index:
+ data = process_vxlan_gpe_dump(item[cmd_reply])
+ break
+
+ logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
+ vxlan_gpe_data=data))
+ return data
+
+ @staticmethod
+ def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
+ """Assign VPP interface to specific VRF/FIB table.
+
+ :param node: VPP node where the FIB and interface are located.
+ :param interface: Interface to be assigned to FIB.
+ :param table_id: VRF table ID.
+ :param ipv6: Assign to IPv6 table. Default False.
+ :type node: dict
+ :type interface: str or int
+ :type table_id: int
+ :type ipv6: bool
+ """
+ cmd = 'sw_interface_set_table'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ is_ipv6=1 if ipv6 else 0,
+ vrf_id=int(table_id))
+ err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
+ ifc=interface)
+ with PapiExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg). \
+ verify_reply(err_msg=err_msg)
+
+ @staticmethod
+ def set_linux_interface_mac(node, interface, mac, namespace=None,
+ vf_id=None):
+ """Set MAC address for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param mac: MAC to be assigned to interface.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type mac: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
+ if vf_id is not None else 'address {mac}'.format(mac=mac)
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set {interface} {mac}'.
+ format(ns=ns_str, interface=interface, mac=mac_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_linux_interface_trust_on(node, interface, namespace=None,
+ vf_id=None):
+ """Set trust on (promisc) for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
+ if vf_id is not None else 'trust on'
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set dev {interface} {trust}'.
+ format(ns=ns_str, interface=interface, trust=trust_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_linux_interface_spoof_off(node, interface, namespace=None,
+ vf_id=None):
+ """Set spoof off for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
+ if vf_id is not None else 'spoof off'
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set dev {interface} {spoof}'.
+ format(ns=ns_str, interface=interface, spoof=spoof_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
+ """Init PCI device by creating VFs and bind them to vfio-pci for AVF
+ driver testing on DUT.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ # Read PCI address and driver.
+ pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
+ pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
+ uio_driver = Topology.get_uio_driver(node)
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ if kernel_driver != "i40e":
+ raise RuntimeError(
+ "AVF needs i40e driver, not {driver} at node {host} ifc {ifc}"\
+ .format(driver=kernel_driver, host=node["host"], ifc=ifc_key))
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, pf_pci_addr.replace(':', r'\:'))
+
+ VPPUtil.stop_vpp_service(node)
+ if current_driver != kernel_driver:
+ # PCI device must be re-bound to kernel driver before creating VFs.
+ DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
+ # Stop VPP to prevent deadlock.
+ # Unbind from current driver.
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Bind to kernel driver.
+ DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
+
+ # Initialize PCI VFs
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+
+ vf_ifc_keys = []
+ # Set MAC address and bind each virtual function to uio driver.
+ for vf_id in range(numvfs):
+ vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
+ pf_mac_addr[3], pf_mac_addr[4],
+ pf_mac_addr[5], "{:02x}".format(vf_id)])
+
+ pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
+ format(pci=pf_pci_addr)
+ InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
+ vf_id=vf_id)
+ if osi_layer == 'L2':
+ InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
+ vf_id=vf_id)
+ InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
+ vf_id=vf_id)
+
+ DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
+ DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+
+ # Add newly created ports into topology file
+ vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
+ Topology.update_interface_name(node, vf_ifc_key,
+ vf_ifc_name+str(vf_id+1))
+ Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
+ Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
+ vf_ifc_keys.append(vf_ifc_key)
+
+ return vf_ifc_keys
+
+ @staticmethod
+ def vpp_create_multiple_vxlan_ipv4_tunnels(
+ node, node_vxlan_if, node_vlan_if, op_node, op_node_if,
+ n_tunnels, vni_start, src_ip_start, dst_ip_start, ip_step, ip_limit,
+ bd_id_start):
+ """Create multiple VXLAN tunnel interfaces and VLAN sub-interfaces on
+ VPP node.
+
+ Put each pair of VXLAN tunnel interface and VLAN sub-interface to
+ separate bridge-domain.
+
+ :param node: VPP node to create VXLAN tunnel interfaces.
+ :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
+ interfaces.
+ :param node_vlan_if: VPP node interface key to create VLAN
+ sub-interface.
+ :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
+ :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
+ interfaces.
+ :param n_tunnels: Number of tunnel interfaces to create.
+ :param vni_start: VNI start ID.
+ :param src_ip_start: VXLAN tunnel source IP address start.
+ :param dst_ip_start: VXLAN tunnel destination IP address start.
+ :param ip_step: IP address incremental step.
+ :param ip_limit: IP address limit.
+ :param bd_id_start: Bridge-domain ID start.
+ :type node: dict
+ :type node_vxlan_if: str
+ :type node_vlan_if: str
+ :type op_node: dict
+ :type op_node_if: str
+ :type n_tunnels: int
+ :type vni_start: int
+ :type src_ip_start: str
+ :type dst_ip_start: str
+ :type ip_step: int
+ :type ip_limit: str
+ :type bd_id_start: int
+ """
+ # configure IPs, create VXLAN interfaces and VLAN sub-interfaces
+ vxlan_count = InterfaceUtil.vpp_create_vxlan_and_vlan_interfaces(
+ node, node_vxlan_if, node_vlan_if, n_tunnels, vni_start,
+ src_ip_start, dst_ip_start, ip_step, ip_limit)
+
+ # update topology with VXLAN interfaces and VLAN sub-interfaces data
+ # and put interfaces up
+ InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_up(
+ node, vxlan_count, node_vlan_if)
+
+ # configure bridge domains, ARPs and routes
+ InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
+ node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
+ ip_step, bd_id_start)
+
+ @staticmethod
+ def vpp_create_vxlan_and_vlan_interfaces(
+ node, node_vxlan_if, node_vlan_if, vxlan_count, vni_start,
+ src_ip_start, dst_ip_start, ip_step, ip_limit):
+ """
+ Configure IPs, create VXLAN interfaces and VLAN sub-interfaces on VPP
+ node.
+
+ :param node: VPP node.
+ :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
+ interfaces.
+ :param node_vlan_if: VPP node interface key to create VLAN
+ sub-interface.
+ :param vxlan_count: Number of tunnel interfaces to create.
+ :param vni_start: VNI start ID.
+ :param src_ip_start: VXLAN tunnel source IP address start.
+ :param dst_ip_start: VXLAN tunnel destination IP address start.
+ :param ip_step: IP address incremental step.
+ :param ip_limit: IP address limit.
+ :type node: dict
+ :type node_vxlan_if: str
+ :type node_vlan_if: str
+ :type vxlan_count: int
+ :type vni_start: int
+ :type src_ip_start: str
+ :type dst_ip_start: str
+ :type ip_step: int
+ :type ip_limit: str
+ :returns: Number of created VXLAN interfaces.
+ :rtype: int
+ """
+ try:
+ src_address_start = IPv6Address(unicode(src_ip_start))
+ dst_address_start = IPv6Address(unicode(dst_ip_start))
+ ip_address_limit = IPv6Address(unicode(ip_limit))
+ af_inet = AF_INET6
+ is_ipv6 = 1
+ except (AddressValueError, NetmaskValueError):
+ src_address_start = IPv4Address(unicode(src_ip_start))
+ dst_address_start = IPv4Address(unicode(dst_ip_start))
+ ip_address_limit = IPv4Address(unicode(ip_limit))
+ af_inet = AF_INET
+ is_ipv6 = 0
+
+ with PapiExecutor(node) as papi_exec:
+ for i in xrange(0, vxlan_count):
+ src_ip = src_address_start + i * ip_step
+ dst_ip = dst_address_start + i * ip_step
+ if src_ip > ip_address_limit or dst_ip > ip_address_limit:
+ logger.warn("Can't do more iterations - IP address limit "
+ "has been reached.")
+ vxlan_count = i
+ break
+ cmd = 'sw_interface_add_del_address'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(
+ node, node_vxlan_if),
+ is_add=1,
+ is_ipv6=0,
+ del_all=0,
+ address_length=128 if is_ipv6 else 32,
+ address=inet_pton(af_inet, str(src_ip)))
+ papi_exec.add(cmd, **args)
+ cmd = 'vxlan_add_del_tunnel'
+ args = dict(
+ is_add=1,
+ is_ipv6=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src_address=inet_pton(af_inet, str(src_ip)),
+ dst_address=inet_pton(af_inet, str(dst_ip)),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=Constants.BITWISE_NON_ZERO,
+ vni=int(vni_start)+i)
+ papi_exec.add(cmd, **args)
+ cmd = 'create_vlan_subif'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(
+ node, node_vlan_if),
+ vlan_id=i+1)
+ papi_exec.add(cmd, **args)
+ papi_exec.get_replies().verify_replies()
+
+ return vxlan_count
+
+ @staticmethod
+ def vpp_put_vxlan_and_vlan_interfaces_up(node, vxlan_count, node_vlan_if):
+ """
+ Update topology with VXLAN interfaces and VLAN sub-interfaces data
+ and put interfaces up.
+
+ :param node: VPP node.
+ :param vxlan_count: Number of tunnel interfaces.
+ :param node_vlan_if: VPP node interface key where VLAN sub-interfaces
+ have been created.
+ :type node: dict
+ :type vxlan_count: int
+ :type node_vlan_if: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node)
+
+ with PapiExecutor(node) as papi_exec:
+ for i in xrange(0, vxlan_count):
+ vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel')
+ vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i)
+ vxlan_found = False
+ vxlan_subif_idx = None
+ vlan_subif_key = Topology.add_new_port(node, 'vlan_subif')
+ vlan_subif_name = '{if_name}.{vlan}'.format(
+ if_name=Topology.get_interface_name(
+ node, node_vlan_if), vlan=i+1)
+ vlan_found = False
+ vlan_idx = None
+ for data in if_data:
+ if not vxlan_found \
+ and data['interface_name'] == vxlan_subif_name:
+ vxlan_subif_idx = data['sw_if_index']
+ vxlan_found = True
+ elif not vlan_found \
+ and data['interface_name'] == vlan_subif_name:
+ vlan_idx = data['sw_if_index']
+ vlan_found = True
+ if vxlan_found and vlan_found:
+ break
+ Topology.update_interface_sw_if_index(
+ node, vxlan_subif_key, vxlan_subif_idx)
+ Topology.update_interface_name(
+ node, vxlan_subif_key, vxlan_subif_name)
+ cmd = 'sw_interface_set_flags'
+ args1 = dict(sw_if_index=vxlan_subif_idx,
+ admin_up_down=1)
+ Topology.update_interface_sw_if_index(
+ node, vlan_subif_key, vlan_idx)
+ Topology.update_interface_name(
+ node, vlan_subif_key, vlan_subif_name)
+ args2 = dict(sw_if_index=vlan_idx,
+ admin_up_down=1)
+ papi_exec.add(cmd, **args1).add(cmd, **args2)
+ papi_exec.get_replies().verify_replies()
+
+ @staticmethod
+ def vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
+ node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
+ ip_step, bd_id_start):
+ """
+ Configure ARPs and routes for VXLAN interfaces and put each pair of
+ VXLAN tunnel interface and VLAN sub-interface to separate bridge-domain.
+
+ :param node: VPP node.
+ :param node_vxlan_if: VPP node interface key where VXLAN tunnel
+ interfaces have been created.
+ :param vxlan_count: Number of tunnel interfaces.
+ :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
+ :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
+ interfaces.
+ :param dst_ip_start: VXLAN tunnel destination IP address start.
+ :param ip_step: IP address incremental step.
+ :param bd_id_start: Bridge-domain ID start.
+ :type node: dict
+ :type node_vxlan_if: str
+ :type vxlan_count: int
+ :type op_node: dict
+ :type op_node_if:
+ :type dst_ip_start: str
+ :type ip_step: int
+ :type bd_id_start: int
+ """
+ try:
+ dst_address_start = IPv6Address(unicode(dst_ip_start))
+ af_inet = AF_INET6
+ is_ipv6 = 1
+ except (AddressValueError, NetmaskValueError):
+ dst_address_start = IPv4Address(unicode(dst_ip_start))
+ af_inet = AF_INET
+ is_ipv6 = 0
+
+ with PapiExecutor(node) as papi_exec:
+ for i in xrange(0, vxlan_count):
+ dst_ip = dst_address_start + i * ip_step
+ neighbor = dict(
+ sw_if_index=Topology.get_interface_sw_index(
+ node, node_vxlan_if),
+ flags=0,
+ mac_address=str(
+ Topology.get_interface_mac(op_node, op_node_if)),
+ ip_address=str(dst_ip))
+ cmd = 'ip_neighbor_add_del'
+ args = dict(
+ is_add=1,
+ neighbor=neighbor)
+ papi_exec.add(cmd, **args)
+ cmd = 'ip_add_del_route'
+ args = dict(
+ next_hop_sw_if_index=Topology.get_interface_sw_index(
+ node, node_vxlan_if),
+ table_id=0,
+ is_add=1,
+ is_ipv6=is_ipv6,
+ next_hop_weight=1,
+ next_hop_proto=1 if is_ipv6 else 0,
+ dst_address_length=128 if is_ipv6 else 32,
+ dst_address=inet_pton(af_inet, str(dst_ip)),
+ next_hop_address=inet_pton(af_inet, str(dst_ip)))
+ papi_exec.add(cmd, **args)
+ cmd = 'sw_interface_set_l2_bridge'
+ args = dict(
+ rx_sw_if_index=Topology.get_interface_sw_index(
+ node, 'vxlan_tunnel{nr}'.format(nr=i+1)),
+ bd_id=int(bd_id_start+i),
+ shg=0,
+ port_type=0,
+ enable=1)
+ papi_exec.add(cmd, **args)
+ args = dict(
+ rx_sw_if_index=Topology.get_interface_sw_index(
+ node, 'vlan_subif{nr}'.format(nr=i+1)),
+ bd_id=int(bd_id_start+i),
+ shg=0,
+ port_type=0,
+ enable=1)
+ papi_exec.add(cmd, **args)
+ papi_exec.get_replies().verify_replies()
+
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump(node):
+ """Dump VPP interface RX placement on node.
+
+ :param node: Node to run command on.
+ :type node: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ cmd = 'sw_interface_rx_placement_dump'
+ cmd_reply = 'sw_interface_rx_placement_details'
+ err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+ cmd=cmd, host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ for ifc in node['interfaces'].values():
+ if ifc['vpp_sw_index'] is not None:
+ papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
+ papi_resp = papi_exec.get_dump(err_msg)
+ thr_mapping = [s[cmd_reply] for r in papi_resp.reply
+ for s in r['api_reply']]
+ return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
+
+ @staticmethod
+ def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
+ worker_id):
+ """Set interface RX placement to worker on node.
+
+ :param node: Node to run command on.
+ :param sw_if_index: VPP SW interface index.
+ :param queue_id: VPP interface queue ID.
+ :param worker_id: VPP worker ID (indexing from 0).
+ :type node: dict
+ :type sw_if_index: int
+ :type queue_id: int
+ :type worker_id: int
+ :raises RuntimeError: If failed to run command on host or if no API
+ reply received.
+ """
+ cmd = 'sw_interface_set_rx_placement'
+ err_msg = "Failed to set interface RX placement to worker on host " \
+ "{host}!".format(host=node['host'])
+ args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
+ worker_id=worker_id)
+ with PapiExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg).\
+ verify_reply(err_msg=err_msg)
+
+ @staticmethod
+ def vpp_round_robin_rx_placement(node, prefix):
+ """Set Round Robin interface RX placement on all worker threads
+ on node.
+
+ :param node: Topology nodes.
+ :param prefix: Interface name prefix.
+ :type node: dict
+ :type prefix: str
+ """
+ worker_id = 0
+ worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+ for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
+ for interface in node['interfaces'].values():
+ if placement['sw_if_index'] == interface['vpp_sw_index'] \
+ and prefix in interface['name']:
+ InterfaceUtil.vpp_sw_interface_set_rx_placement(
+ node, placement['sw_if_index'], placement['queue_id'],
+ worker_id % worker_cnt)
+ worker_id += 1
+
+ @staticmethod
+ def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
+ """Set Round Robin interface RX placement on all worker threads
+ on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param prefix: Interface name prefix.
+ :type nodes: dict
+ :type prefix: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)