Update of VPP_STABLE_VER files + quick fix for gre create tunnel
[csit.git] / resources / libraries / python / InterfaceUtil.py
index 290db1d..3f26809 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Interface util library"""
+"""Interface util library."""
 
 
+from socket import AF_INET, AF_INET6, inet_ntop, inet_pton
+from socket import error as inet_error
 from time import time, sleep
 
 from robot.api import logger
 
 from time import time, sleep
 
 from robot.api import logger
 
-from resources.libraries.python.ssh import SSH
-from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.DUTSetup import DUTSetup
 from resources.libraries.python.DUTSetup import DUTSetup
-from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
+from resources.libraries.python.IPUtil import IPUtil
+from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.parsers.JsonParser import JsonParser
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
 from resources.libraries.python.topology import NodeType, Topology
 from resources.libraries.python.VatExecutor import VatExecutor, VatTerminal
 from resources.libraries.python.VatJsonUtil import VatJsonUtil
 from resources.libraries.python.VPPUtil import VPPUtil
 from resources.libraries.python.topology import NodeType, Topology
 from resources.libraries.python.VatExecutor import VatExecutor, VatTerminal
 from resources.libraries.python.VatJsonUtil import VatJsonUtil
 from resources.libraries.python.VPPUtil import VPPUtil
-from resources.libraries.python.parsers.JsonParser import JsonParser
-from resources.libraries.python.CpuUtils import CpuUtils
+
 
 class InterfaceUtil(object):
     """General utilities for managing interfaces"""
 
 class InterfaceUtil(object):
     """General utilities for managing interfaces"""
@@ -519,7 +525,7 @@ class InterfaceUtil(object):
                 InterfaceUtil.update_nic_interface_names(node)
 
     @staticmethod
                 InterfaceUtil.update_nic_interface_names(node)
 
     @staticmethod
-    def update_tg_interface_data_on_node(node):
+    def update_tg_interface_data_on_node(node, skip_tg_udev=False):
         """Update interface name for TG/linux node in DICT__nodes.
 
         .. note::
         """Update interface name for TG/linux node in DICT__nodes.
 
         .. note::
@@ -530,10 +536,10 @@ class InterfaceUtil(object):
             "52:54:00:e1:8a:0f": "eth2"
             "00:00:00:00:00:00": "lo"
 
             "52:54:00:e1:8a:0f": "eth2"
             "00:00:00:00:00:00": "lo"
 
-        .. note:: TODO: parse lshw -json instead
-
         :param node: Node selected from DICT__nodes.
         :param node: Node selected from DICT__nodes.
+        :param skip_tg_udev: Skip udev rename on TG node.
         :type node: dict
         :type node: dict
+        :type skip_tg_udev: bool
         :raises RuntimeError: If getting of interface name and MAC fails.
         """
         # First setup interface driver specified in yaml file
         :raises RuntimeError: If getting of interface name and MAC fails.
         """
         # First setup interface driver specified in yaml file
@@ -558,7 +564,8 @@ class InterfaceUtil(object):
             interface['name'] = name
 
         # Set udev rules for interfaces
             interface['name'] = name
 
         # Set udev rules for interfaces
-        InterfaceUtil.tg_set_interfaces_udev_rules(node)
+        if not skip_tg_udev:
+            InterfaceUtil.tg_set_interfaces_udev_rules(node)
 
     @staticmethod
     def iface_update_numa_node(node):
 
     @staticmethod
     def iface_update_numa_node(node):
@@ -587,15 +594,15 @@ class InterfaceUtil(object):
                             else:
                                 raise ValueError
                     except ValueError:
                             else:
                                 raise ValueError
                     except ValueError:
-                        logger.trace('Reading numa location failed for: {0}'\
-                            .format(if_pci))
+                        logger.trace('Reading numa location failed for: {0}'
+                                     .format(if_pci))
                     else:
                         Topology.set_interface_numa_node(node, if_key,
                                                          numa_node)
                         break
             else:
                     else:
                         Topology.set_interface_numa_node(node, if_key,
                                                          numa_node)
                         break
             else:
-                raise RuntimeError('Update numa node failed for: {0}'\
-                    .format(if_pci))
+                raise RuntimeError('Update numa node failed for: {0}'
+                                   .format(if_pci))
 
     @staticmethod
     def update_all_numa_nodes(nodes, skip_tg=False):
 
     @staticmethod
     def update_all_numa_nodes(nodes, skip_tg=False):
@@ -616,6 +623,7 @@ class InterfaceUtil(object):
 
     @staticmethod
     def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
 
     @staticmethod
     def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
+                                               skip_tg_udev=False,
                                                numa_node=False):
         """Update interface names on all nodes in DICT__nodes.
 
                                                numa_node=False):
         """Update interface names on all nodes in DICT__nodes.
 
@@ -623,17 +631,20 @@ class InterfaceUtil(object):
         of all nodes mentioned in the topology dictionary.
 
         :param nodes: Nodes in the topology.
         of all nodes mentioned in the topology dictionary.
 
         :param nodes: Nodes in the topology.
-        :param skip_tg: Skip TG node
+        :param skip_tg: Skip TG node.
+        :param skip_tg_udev: Skip udev rename on TG node.
         :param numa_node: Retrieve numa_node location.
         :type nodes: dict
         :type skip_tg: bool
         :param numa_node: Retrieve numa_node location.
         :type nodes: dict
         :type skip_tg: bool
+        :type skip_tg_udev: bool
         :type numa_node: bool
         """
         for node_data in nodes.values():
             if node_data['type'] == NodeType.DUT:
                 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
             elif node_data['type'] == NodeType.TG and not skip_tg:
         :type numa_node: bool
         """
         for node_data in nodes.values():
             if node_data['type'] == NodeType.DUT:
                 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
             elif node_data['type'] == NodeType.TG and not skip_tg:
-                InterfaceUtil.update_tg_interface_data_on_node(node_data)
+                InterfaceUtil.update_tg_interface_data_on_node(
+                    node_data, skip_tg_udev)
 
             if numa_node:
                 if node_data['type'] == NodeType.DUT:
 
             if numa_node:
                 if node_data['type'] == NodeType.DUT:
@@ -663,13 +674,13 @@ class InterfaceUtil(object):
                                                sw_if_index=sw_if_index,
                                                vlan=vlan)
         if output[0]["retval"] == 0:
                                                sw_if_index=sw_if_index,
                                                vlan=vlan)
         if output[0]["retval"] == 0:
-            sw_subif_idx = output[0]["sw_if_index"]
+            sw_vlan_idx = output[0]["sw_if_index"]
             logger.trace('VLAN subinterface with sw_if_index {} and VLAN ID {} '
             logger.trace('VLAN subinterface with sw_if_index {} and VLAN ID {} '
-                         'created on node {}'.format(sw_subif_idx,
+                         'created on node {}'.format(sw_vlan_idx,
                                                      vlan, node['host']))
             if_key = Topology.add_new_port(node, "vlan_subif")
                                                      vlan, node['host']))
             if_key = Topology.add_new_port(node, "vlan_subif")
-            Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
-            ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
+            Topology.update_interface_sw_if_index(node, if_key, sw_vlan_idx)
+            ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_vlan_idx)
             Topology.update_interface_name(node, if_key, ifc_name)
         else:
             raise RuntimeError('Unable to create VLAN subinterface on node {}'
             Topology.update_interface_name(node, if_key, ifc_name)
         else:
             raise RuntimeError('Unable to create VLAN subinterface on node {}'
@@ -678,7 +689,7 @@ class InterfaceUtil(object):
         with VatTerminal(node, False) as vat:
             vat.vat_terminal_exec_cmd('exec show interfaces')
 
         with VatTerminal(node, False) as vat:
             vat.vat_terminal_exec_cmd('exec show interfaces')
 
-        return '{}.{}'.format(interface, vlan), sw_subif_idx
+        return '{}.{}'.format(interface, vlan), sw_vlan_idx
 
     @staticmethod
     def create_vxlan_interface(node, vni, source_ip, destination_ip):
 
     @staticmethod
     def create_vxlan_interface(node, vni, source_ip, destination_ip):
@@ -840,12 +851,12 @@ class InterfaceUtil(object):
                                                type_subif=type_subif)
 
         if output[0]["retval"] == 0:
                                                type_subif=type_subif)
 
         if output[0]["retval"] == 0:
-            sw_subif_idx = output[0]["sw_if_index"]
+            sw_vlan_idx = output[0]["sw_if_index"]
             logger.trace('Created subinterface with index {}'
             logger.trace('Created subinterface with index {}'
-                         .format(sw_subif_idx))
+                         .format(sw_vlan_idx))
             if_key = Topology.add_new_port(node, "subinterface")
             if_key = Topology.add_new_port(node, "subinterface")
-            Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
-            ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
+            Topology.update_interface_sw_if_index(node, if_key, sw_vlan_idx)
+            ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_vlan_idx)
             Topology.update_interface_name(node, if_key, ifc_name)
         else:
             raise RuntimeError('Unable to create sub-interface on node {}'
             Topology.update_interface_name(node, if_key, ifc_name)
         else:
             raise RuntimeError('Unable to create sub-interface on node {}'
@@ -855,7 +866,7 @@ class InterfaceUtil(object):
             vat.vat_terminal_exec_cmd('exec show interfaces')
 
         name = '{}.{}'.format(interface, sub_id)
             vat.vat_terminal_exec_cmd('exec show interfaces')
 
         name = '{}.{}'.format(interface, sub_id)
-        return name, sw_subif_idx
+        return name, sw_vlan_idx
 
     @staticmethod
     def create_gre_tunnel_interface(node, source_ip, destination_ip):
 
     @staticmethod
     def create_gre_tunnel_interface(node, source_ip, destination_ip):
@@ -871,28 +882,38 @@ class InterfaceUtil(object):
         :rtype: tuple
         :raises RuntimeError: If unable to create GRE tunnel interface.
         """
         :rtype: tuple
         :raises RuntimeError: If unable to create GRE tunnel interface.
         """
-        output = VatExecutor.cmd_from_template(node, "create_gre.vat",
-                                               src=source_ip,
-                                               dst=destination_ip)
-        output = output[0]
 
 
-        if output["retval"] == 0:
-            sw_if_idx = output["sw_if_index"]
-
-            vat_executor = VatExecutor()
-            vat_executor.execute_script_json_out("dump_interfaces.vat", node)
-            interface_dump_json = vat_executor.get_script_stdout()
-            name = VatJsonUtil.get_interface_name_from_json(
-                interface_dump_json, sw_if_idx)
-
-            if_key = Topology.add_new_port(node, "gre_tunnel")
-            Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
-            Topology.update_interface_name(node, if_key, name)
+        try:
+            src_address = inet_pton(AF_INET6, source_ip)
+            dst_address = inet_pton(AF_INET6, destination_ip)
+            is_ipv6 = 1
+        except inet_error:
+            src_address = inet_pton(AF_INET, source_ip)
+            dst_address = inet_pton(AF_INET, destination_ip)
+            is_ipv6 = 0
+
+        cmd = 'gre_tunnel_add_del'
+        tunnel = dict(type=0,
+                      instance=Constants.BITWISE_NON_ZERO,
+                      src=src_address,
+                      dst=dst_address,
+                      outer_fib_id=0,
+                      session_id=0)
+        args = dict(is_add=1,
+                    tunnel=tunnel)
+        err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
+            host=node['host'])
+        with PapiExecutor(node) as papi_exec:
+            papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
+                verify_reply(err_msg=err_msg)
+
+        sw_if_idx = papi_resp['sw_if_index']
+        if_key = Topology.add_new_port(node, 'gre_tunnel')
+        Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
+        ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
+        Topology.update_interface_name(node, if_key, ifc_name)
 
 
-            return name, sw_if_idx
-        else:
-            raise RuntimeError('Unable to create GRE tunnel on node {}.'
-                               .format(node))
+        return ifc_name, sw_if_idx
 
     @staticmethod
     def vpp_create_loopback(node):
 
     @staticmethod
     def vpp_create_loopback(node):
@@ -983,21 +1004,27 @@ class InterfaceUtil(object):
         Topology.update_interface_mac_address(node, if_key, ifc_mac)
 
     @staticmethod
         Topology.update_interface_mac_address(node, if_key, ifc_mac)
 
     @staticmethod
-    def vpp_create_avf_interface(node, vf_pci_addr):
+    def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
         """Create AVF interface on VPP node.
 
         :param node: DUT node from topology.
         :param vf_pci_addr: Virtual Function PCI address.
         """Create AVF interface on VPP node.
 
         :param node: DUT node from topology.
         :param vf_pci_addr: Virtual Function PCI address.
+        :param num_rx_queues: Number of RX queues.
         :type node: dict
         :type vf_pci_addr: str
         :type node: dict
         :type vf_pci_addr: str
+        :type num_rx_queues: int
         :returns: Interface key (name) in topology.
         :rtype: str
         :raises RuntimeError: If it is not possible to create AVF interface on
             the node.
         """
         :returns: Interface key (name) in topology.
         :rtype: str
         :raises RuntimeError: If it is not possible to create AVF interface on
             the node.
         """
+        num_rx_queues = 'num-rx-queues {num_rx_queues}'\
+            .format(num_rx_queues=num_rx_queues) if num_rx_queues else ''
+
         with VatTerminal(node, json_param=False) as vat:
             vat.vat_terminal_exec_cmd_from_template('create_avf_interface.vat',
         with VatTerminal(node, json_param=False) as vat:
             vat.vat_terminal_exec_cmd_from_template('create_avf_interface.vat',
-                                                    vf_pci_addr=vf_pci_addr)
+                                                    vf_pci_addr=vf_pci_addr,
+                                                    num_rx_queues=num_rx_queues)
             output = vat.vat_stdout
 
         if output is not None:
             output = vat.vat_stdout
 
         if output is not None:
@@ -1312,18 +1339,19 @@ class InterfaceUtil(object):
         exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
         exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
-    def init_avf_interface(node, ifc_key, numvfs=1, topology_type='L2'):
+    def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
         """Init PCI device by creating VFs and bind them to vfio-pci for AVF
         driver testing on DUT.
 
         :param node: DUT node.
         """Init PCI device by creating VFs and bind them to vfio-pci for AVF
         driver testing on DUT.
 
         :param node: DUT node.
-        :param iface_key: Interface key from topology file.
+        :param ifc_key: Interface key from topology file.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
-        :param topology_type: Topology type.
+        :param osi_layer: OSI Layer type to initialize TG with.
+            Default value "L2" sets linux interface spoof off.
         :type node: dict
         :type node: dict
-        :iface_key: str
+        :type ifc_key: str
         :type numvfs: int
         :type numvfs: int
-        :typ topology_type: str
+        :type osi_layer: str
         :returns: Virtual Function topology interface keys.
         :rtype: list
         """
         :returns: Virtual Function topology interface keys.
         :rtype: list
         """
@@ -1335,14 +1363,14 @@ class InterfaceUtil(object):
         pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
         uio_driver = Topology.get_uio_driver(node)
         kernel_driver = Topology.get_interface_driver(node, ifc_key)
         pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
         uio_driver = Topology.get_uio_driver(node)
         kernel_driver = Topology.get_interface_driver(node, ifc_key)
-        current_driver = DUTSetup.get_pci_dev_driver(node,\
-            pf_pci_addr.replace(':', r'\:'))
+        current_driver = DUTSetup.get_pci_dev_driver(
+            node, pf_pci_addr.replace(':', r'\:'))
 
 
+        VPPUtil.stop_vpp_service(node)
         if current_driver != kernel_driver:
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
         if current_driver != kernel_driver:
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
-            VPPUtil.stop_vpp_service(node)
             # Unbind from current driver.
             DUTSetup.pci_driver_unbind(node, pf_pci_addr)
             # Bind to kernel driver.
             # Unbind from current driver.
             DUTSetup.pci_driver_unbind(node, pf_pci_addr)
             # Bind to kernel driver.
@@ -1362,7 +1390,7 @@ class InterfaceUtil(object):
                 format(pci=pf_pci_addr)
             InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
                                                        vf_id=vf_id)
                 format(pci=pf_pci_addr)
             InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
                                                        vf_id=vf_id)
-            if topology_type == 'L2':
+            if osi_layer == 'L2':
                 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
                                                             vf_id=vf_id)
             InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
                 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
                                                             vf_id=vf_id)
             InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
@@ -1382,3 +1410,324 @@ class InterfaceUtil(object):
             vf_ifc_keys.append(vf_ifc_key)
 
         return vf_ifc_keys
             vf_ifc_keys.append(vf_ifc_key)
 
         return vf_ifc_keys
+
+    @staticmethod
+    def vpp_create_multiple_vxlan_ipv4_tunnels(
+            node, node_vxlan_if, node_vlan_if, op_node, op_node_if,
+            n_tunnels, vni_start, src_ip_start, dst_ip_start, ip_step, ip_limit,
+            bd_id_start):
+        """Create multiple VXLAN tunnel interfaces and VLAN sub-interfaces on
+        VPP node.
+
+        Put each pair of VXLAN tunnel interface and VLAN sub-interface to
+        separate bridge-domain.
+
+        :param node: VPP node to create VXLAN tunnel interfaces.
+        :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
+            interfaces.
+        :param node_vlan_if: VPP node interface key to create VLAN
+            sub-interface.
+        :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
+        :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
+            interfaces.
+        :param n_tunnels: Number of tunnel interfaces to create.
+        :param vni_start: VNI start ID.
+        :param src_ip_start: VXLAN tunnel source IP address start.
+        :param dst_ip_start: VXLAN tunnel destination IP address start.
+        :param ip_step: IP address incremental step.
+        :param ip_limit: IP address limit.
+        :param bd_id_start: Bridge-domain ID start.
+        :type node: dict
+        :type node_vxlan_if: str
+        :type node_vlan_if: str
+        :type op_node: dict
+        :type op_node_if: str
+        :type n_tunnels: int
+        :type vni_start: int
+        :type src_ip_start: str
+        :type dst_ip_start: str
+        :type ip_step: int
+        :type ip_limit: str
+        :type bd_id_start: int
+        """
+        # configure IPs, create VXLAN interfaces and VLAN sub-interfaces
+        vxlan_count = InterfaceUtil.vpp_create_vxlan_and_vlan_interfaces(
+            node, node_vxlan_if, node_vlan_if, n_tunnels, vni_start,
+            src_ip_start, dst_ip_start, ip_step, ip_limit)
+
+        # update topology with VXLAN interfaces and VLAN sub-interfaces data
+        # and put interfaces up
+        InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_up(
+            node, vxlan_count, node_vlan_if)
+
+        # configure bridge domains, ARPs and routes
+        InterfaceUtil.vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
+            node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
+            ip_step, bd_id_start)
+
+    @staticmethod
+    def vpp_create_vxlan_and_vlan_interfaces(
+            node, node_vxlan_if, node_vlan_if, vxlan_count, vni_start,
+            src_ip_start, dst_ip_start, ip_step, ip_limit):
+        """
+        Configure IPs, create VXLAN interfaces and VLAN sub-interfaces on VPP
+        node.
+
+        :param node: VPP node.
+        :param node_vxlan_if: VPP node interface key to create VXLAN tunnel
+            interfaces.
+        :param node_vlan_if: VPP node interface key to create VLAN
+            sub-interface.
+        :param vxlan_count: Number of tunnel interfaces to create.
+        :param vni_start: VNI start ID.
+        :param src_ip_start: VXLAN tunnel source IP address start.
+        :param dst_ip_start: VXLAN tunnel destination IP address start.
+        :param ip_step: IP address incremental step.
+        :param ip_limit: IP address limit.
+        :type node: dict
+        :type node_vxlan_if: str
+        :type node_vlan_if: str
+        :type vxlan_count: int
+        :type vni_start: int
+        :type src_ip_start: str
+        :type dst_ip_start: str
+        :type ip_step: int
+        :type ip_limit: str
+        :returns: Number of created VXLAN interfaces.
+        :rtype: int
+        """
+        commands = list()
+
+        src_ip_start_int = IPUtil.ip_to_int(src_ip_start)
+        dst_ip_start_int = IPUtil.ip_to_int(dst_ip_start)
+        ip_limit_int = IPUtil.ip_to_int(ip_limit)
+
+        tmp_fn = '/tmp/create_vxlan_interfaces.config'
+        for i in range(0, vxlan_count):
+            src_ip_int = src_ip_start_int + i * ip_step
+            dst_ip_int = dst_ip_start_int + i * ip_step
+            if src_ip_int > ip_limit_int or dst_ip_int > ip_limit_int:
+                logger.warn("Can't do more iterations - IPv4 address limit "
+                            "has been reached.")
+                vxlan_count = i
+                break
+            src_ip = IPUtil.int_to_ip(src_ip_int)
+            dst_ip = IPUtil.int_to_ip(dst_ip_int)
+            commands.append(
+                'sw_interface_add_del_address sw_if_index {sw_idx} {ip}/32\n'
+                .format(sw_idx=Topology.get_interface_sw_index(
+                    node, node_vxlan_if), ip=src_ip))
+            commands.append(
+                'vxlan_add_del_tunnel src {src_ip} dst {dst_ip} vni {vni}\n'
+                .format(src_ip=src_ip, dst_ip=dst_ip, vni=vni_start+i))
+            commands.append(
+                'create_vlan_subif sw_if_index {sw_idx} vlan {vlan}\n'
+                .format(sw_idx=Topology.get_interface_sw_index(
+                    node, node_vlan_if), vlan=i+1))
+
+        VatExecutor().write_and_execute_script(node, tmp_fn, commands)
+
+        return vxlan_count
+
+    @staticmethod
+    def vpp_put_vxlan_and_vlan_interfaces_up(node, vxlan_count, node_vlan_if):
+        """
+        Update topology with VXLAN interfaces and VLAN sub-interfaces data
+        and put interfaces up.
+
+        :param node: VPP node.
+        :param vxlan_count: Number of tunnel interfaces.
+        :param node_vlan_if: VPP node interface key where VLAN sub-interfaces
+            have been created.
+        :type node: dict
+        :type vxlan_count: int
+        :type node_vlan_if: str
+        """
+        with VatTerminal(node) as vat_ter:
+            if_data = vat_ter.vat_terminal_exec_cmd_from_template(
+                'interface_dump.vat')[0]
+
+        tmp_fn = '/tmp/put_subinterfaces_up.config'
+        commands = list()
+        for i in range(0, vxlan_count):
+            vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel')
+            vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i)
+            vxlan_found = False
+            vxlan_subif_idx = None
+            vlan_subif_key = Topology.add_new_port(node, 'vlan_subif')
+            vlan_subif_name = '{if_name}.{vlan}'.format(
+                if_name=Topology.get_interface_name(
+                    node, node_vlan_if), vlan=i+1)
+            vlan_found = False
+            vlan_idx = None
+            for data in if_data:
+                if_name = data['interface_name']
+                if not vxlan_found and if_name == vxlan_subif_name:
+                    vxlan_subif_idx = data['sw_if_index']
+                    vxlan_found = True
+                elif not vlan_found and if_name == vlan_subif_name:
+                    vlan_idx = data['sw_if_index']
+                    vlan_found = True
+                if vxlan_found and vlan_found:
+                    break
+            Topology.update_interface_sw_if_index(
+                node, vxlan_subif_key, vxlan_subif_idx)
+            Topology.update_interface_name(
+                node, vxlan_subif_key, vxlan_subif_name)
+            commands.append(
+                'sw_interface_set_flags sw_if_index {sw_idx} admin-up link-up\n'
+                .format(sw_idx=vxlan_subif_idx))
+            Topology.update_interface_sw_if_index(
+                node, vlan_subif_key, vlan_idx)
+            Topology.update_interface_name(
+                node, vlan_subif_key, vlan_subif_name)
+            commands.append(
+                'sw_interface_set_flags sw_if_index {sw_idx} admin-up link-up\n'
+                .format(sw_idx=vlan_idx))
+
+        VatExecutor().write_and_execute_script(node, tmp_fn, commands)
+
+    @staticmethod
+    def vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain(
+            node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start,
+            ip_step, bd_id_start):
+        """
+        Configure ARPs and routes for VXLAN interfaces and put each pair of
+        VXLAN tunnel interface and VLAN sub-interface to separate bridge-domain.
+
+        :param node: VPP node.
+        :param node_vxlan_if: VPP node interface key where VXLAN tunnel
+            interfaces have been created.
+        :param vxlan_count: Number of tunnel interfaces.
+        :param op_node: Opposite VPP node for VXLAN tunnel interfaces.
+        :param op_node_if: Opposite VPP node interface key for VXLAN tunnel
+            interfaces.
+        :param dst_ip_start: VXLAN tunnel destination IP address start.
+        :param ip_step: IP address incremental step.
+        :param bd_id_start: Bridge-domain ID start.
+        :type node: dict
+        :type node_vxlan_if: str
+        :type vxlan_count: int
+        :type op_node: dict
+        :type op_node_if:
+        :type dst_ip_start: str
+        :type ip_step: int
+        :type bd_id_start: int
+        """
+        sw_idx_vxlan = Topology.get_interface_sw_index(node, node_vxlan_if)
+
+        dst_ip_start_int = IPUtil.ip_to_int(dst_ip_start)
+
+        tmp_fn = '/tmp/configure_routes_and_bridge_domains.config'
+        commands = list()
+        for i in range(0, vxlan_count):
+            dst_ip = IPUtil.int_to_ip(dst_ip_start_int + i * ip_step)
+            commands.append(
+                'ip_neighbor_add_del sw_if_index {sw_idx} dst {ip} mac {mac}\n'
+                .format(sw_idx=sw_idx_vxlan, ip=dst_ip,
+                        mac=Topology.get_interface_mac(op_node, op_node_if)))
+            commands.append(
+                'ip_add_del_route {ip}/32 via {ip} sw_if_index {sw_idx}'
+                ' resolve-attempts 10 count 1\n'.format(
+                    ip=dst_ip, sw_idx=sw_idx_vxlan))
+            bd_id = bd_id_start + i
+            subif_id = i + 1
+            commands.append(
+                'sw_interface_set_l2_bridge sw_if_index {sw_idx} bd_id {bd_id} '
+                'shg 0 enable\n'.format(sw_idx=Topology.get_interface_sw_index(
+                    node, 'vxlan_tunnel{nr}'.format(nr=subif_id)), bd_id=bd_id))
+            commands.append(
+                'sw_interface_set_l2_bridge sw_if_index {sw_idx} bd_id {bd_id} '
+                'shg 0 enable\n'.format(sw_idx=Topology.get_interface_sw_index(
+                    node, 'vlan_subif{nr}'.format(nr=subif_id)), bd_id=bd_id))
+
+        VatExecutor().write_and_execute_script(node, tmp_fn, commands)
+
+    @staticmethod
+    def vpp_sw_interface_rx_placement_dump(node):
+        """Dump VPP interface RX placement on node.
+
+        :param node: Node to run command on.
+        :type node: dict
+        :returns: Thread mapping information as a list of dictionaries.
+        :rtype: list
+        """
+
+        cmd = 'sw_interface_rx_placement_dump'
+        cmd_reply = 'sw_interface_rx_placement_details'
+        err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+            cmd=cmd, host=node['host'])
+        with PapiExecutor(node) as papi_exec:
+            for ifc in node['interfaces'].values():
+                if ifc['vpp_sw_index'] is not None:
+                    papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
+            papi_resp = papi_exec.get_dump(err_msg)
+        thr_mapping = [s[cmd_reply] for r in papi_resp.reply
+                       for s in r['api_reply']]
+        return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
+
+    @staticmethod
+    def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
+                                          worker_id):
+        """Set interface RX placement to worker on node.
+
+        :param node: Node to run command on.
+        :param sw_if_index: VPP SW interface index.
+        :param queue_id: VPP interface queue ID.
+        :param worker_id: VPP worker ID (indexing from 0).
+        :type node: dict
+        :type sw_if_index: int
+        :type queue_id: int
+        :type worker_id: int
+        :raises RuntimeError: If failed to run command on host or if no API
+            reply received.
+        """
+
+        cmd = 'sw_interface_set_rx_placement'
+        cmd_reply = 'sw_interface_set_rx_placement_reply'
+        err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+            host=node['host'], cmd=cmd)
+        args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
+                    worker_id=worker_id)
+        with PapiExecutor(node) as papi_exec:
+            papi_resp = papi_exec.add(cmd, **args).execute_should_pass(err_msg)
+        data = papi_resp.reply[0]['api_reply'][cmd_reply]
+        if data['retval'] != 0:
+            raise RuntimeError("Failed to set interface RX placement "
+                               "to worker on host {host}".
+                               format(host=node['host']))
+
+    @staticmethod
+    def vpp_round_robin_rx_placement(node, prefix):
+        """Set Round Robin interface RX placement on all worker threads
+        on node.
+
+        :param node: Topology nodes.
+        :param prefix: Interface name prefix.
+        :type node: dict
+        :type prefix: str
+        """
+        worker_id = 0
+        worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+        for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
+            for interface in node['interfaces'].values():
+                if placement['sw_if_index'] == interface['vpp_sw_index'] \
+                    and prefix in interface['name']:
+                    InterfaceUtil.vpp_sw_interface_set_rx_placement(
+                        node, placement['sw_if_index'], placement['queue_id'],
+                        worker_id % worker_cnt)
+                    worker_id += 1
+
+    @staticmethod
+    def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
+        """Set Round Robin interface RX placement on all worker threads
+        on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param prefix: Interface name prefix.
+        :type nodes: dict
+        :type prefix: str
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)