CSIT-1411 Implement manual rx-placement override for bug in VPP 69/17169/17
authorPeter Mikus <pmikus@cisco.com>
Wed, 30 Jan 2019 14:04:40 +0000 (14:04 +0000)
committerPeter Mikus <pmikus@cisco.com>
Mon, 4 Feb 2019 12:43:02 +0000 (12:43 +0000)
Change-Id: Ie3d2b1f40a607ce5190ccfea6a372bc072d0a3b9
Signed-off-by: Peter Mikus <pmikus@cisco.com>
resources/libraries/python/InterfaceUtil.py
resources/libraries/python/VPPUtil.py
resources/libraries/robot/performance/performance_configuration.robot

index cff2c9f..8e18923 100644 (file)
@@ -19,6 +19,8 @@ from robot.api import logger
 
 from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.DUTSetup import DUTSetup
 
 from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiErrors import PapiError
 from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
 from resources.libraries.python.IPUtil import IPUtil
 from resources.libraries.python.parsers.JsonParser import JsonParser
 from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
 from resources.libraries.python.IPUtil import IPUtil
 from resources.libraries.python.parsers.JsonParser import JsonParser
@@ -1620,3 +1622,110 @@ class InterfaceUtil(object):
                     node, 'vlan_subif{nr}'.format(nr=subif_id)), bd_id=bd_id))
 
         VatExecutor().write_and_execute_script(node, tmp_fn, commands)
                     node, 'vlan_subif{nr}'.format(nr=subif_id)), bd_id=bd_id))
 
         VatExecutor().write_and_execute_script(node, tmp_fn, commands)
+
+    @staticmethod
+    def vpp_sw_interface_rx_placement_dump(node):
+        """Dump VPP interface RX placement on node.
+
+        :param node: Node to run command on.
+        :type node: dict
+        :returns: Thread mapping information as a list of dictionaries.
+        :rtype: list
+        :raises RuntimeError: If failed to run command on host.
+        :raises PapiError: If no API reply received.
+        """
+        api_data = list()
+        for ifc in node['interfaces'].values():
+            if ifc['vpp_sw_index'] is not None:
+                api = dict(api_name='sw_interface_rx_placement_dump')
+                api_args = dict(sw_if_index=ifc['vpp_sw_index'])
+                api['api_args'] = api_args
+                api_data.append(api)
+
+        with PapiExecutor(node) as papi_executor:
+            papi_executor.execute_papi(api_data)
+            try:
+                papi_executor.papi_should_have_passed()
+                api_reply = papi_executor.get_papi_reply()
+            except AssertionError:
+                raise RuntimeError('Failed to run {api_name} on host '
+                                   '{host}!'.format(host=node['host'], **api))
+
+        if api_reply:
+            thr_mapping = [s['sw_interface_rx_placement_details'] \
+                for r in api_reply for s in r['api_reply']]
+            return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
+        else:
+            raise PapiError('No reply received for {api_name} on host {host}!'.
+                            format(host=node['host'], **api))
+
+    @staticmethod
+    def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
+                                          worker_id):
+        """Set interface RX placement to worker on node.
+
+        :param node: Node to run command on.
+        :param sw_if_index: VPP SW interface index.
+        :param queue_id: VPP interface queue ID.
+        :param worker_id: VPP worker ID (indexing from 0).
+        :type node: dict
+        :type sw_if_index: int
+        :type queue_id: int
+        :type worker_id: int
+        :raises RuntimeError: If failed to run command on host.
+        :raises PapiError: If no API reply received.
+        """
+        api_data = list()
+        api = dict(api_name='sw_interface_set_rx_placement')
+        api_args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
+                        worker_id=worker_id)
+        api['api_args'] = api_args
+        api_data.append(api)
+
+        with PapiExecutor(node) as papi_executor:
+            papi_executor.execute_papi(api_data)
+            try:
+                papi_executor.papi_should_have_passed()
+                api_reply = papi_executor.get_papi_reply()
+            except AssertionError:
+                raise RuntimeError('Failed to run {api_name} on host '
+                                   '{host}!'.format(host=node['host'], **api))
+
+        if not api_reply:
+            raise PapiError('No reply received for {api_name} on host {host}!'.
+                            format(host=node['host'], **api))
+
+    @staticmethod
+    def vpp_round_robin_rx_placement(node, prefix):
+        """Set Round Robin interface RX placement on all worker threads
+        on node.
+
+        :param node: Topology nodes.
+        :param prefix: Interface name prefix.
+        :type node: dict
+        :type prefix: str
+        """
+        worker_id = 0
+        worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+        for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
+            for interface in node['interfaces'].values():
+                if placement['sw_if_index'] == interface['vpp_sw_index'] \
+                    and prefix in interface['name']:
+                    InterfaceUtil.vpp_sw_interface_set_rx_placement(
+                        node, placement['sw_if_index'], placement['queue_id'],
+                        worker_id % worker_cnt)
+                    worker_id += 1
+
+    @staticmethod
+    def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
+        """Set Round Robin interface RX placement on all worker threads
+        on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param prefix: Interface name prefix.
+        :type nodes: dict
+        :type prefix: str
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)
index 63d9aac..82fded3 100644 (file)
@@ -317,3 +317,36 @@ class VPPUtil(object):
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
                 VPPUtil.show_event_logger_on_dut(node)
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
                 VPPUtil.show_event_logger_on_dut(node)
+
+    @staticmethod
+    def vpp_show_threads(node):
+        """Show VPP threads on node.
+
+        :param node: Node to run command on.
+        :type node: dict
+        :returns: VPP thread data.
+        :rtype: list
+        :raises RuntimeError: If failed to run command on host.
+        :raises PapiError: If no API reply received.
+        """
+        api_data = list()
+        api = dict(api_name='show_threads')
+        api_args = dict()
+        api['api_args'] = api_args
+        api_data.append(api)
+
+        with PapiExecutor(node) as papi_executor:
+            papi_executor.execute_papi(api_data)
+            try:
+                papi_executor.papi_should_have_passed()
+                api_reply = papi_executor.get_papi_reply()
+            except AssertionError:
+                raise RuntimeError('Failed to run {api_name} on host '
+                                   '{host}!'.format(host=node['host'], **api))
+
+        if api_reply:
+            return \
+                api_reply[0]['api_reply']['show_threads_reply']['thread_data']
+        else:
+            raise PapiError('No reply received for {api_name} on host {host}!'.
+                            format(host=node['host'], **api))
index 2330cff..be30d37 100644 (file)
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: bool
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: bool
+| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface
+| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean
 | | ...
 | | ... | *Note:*
 | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
 | | ...
 | | ... | *Note:*
 | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
 | | ...
 | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${nf_cpus}
 | | ... | ${qemu_id}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
 | | ...
 | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${nf_cpus}
 | | ... | ${qemu_id}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
-| | ... | ${use_tuned_cfs}=${False}
+| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True}
 | | ...
 | | ${nf_cpus_count}= | Get Length | ${nf_cpus}
 | | ...
 | | ${nf_cpus_count}= | Get Length | ${nf_cpus}
+| | ${rxq}= | Run Keyword If | '${auto_scale}' == ${True}
+| | ... | Set Variable | ${rxq_count_int}
+| | ... | ELSE | Set Variable | ${1}
 | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
 | | ... | WITH NAME | ${vm_name}
 | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
 | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
 | | ... | WITH NAME | ${vm_name}
 | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
 | | Dpdk Testpmd Start | ${vm} | eal_corelist=${testpmd_cpus}
 | | ... | eal_mem_channels=4 | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE}
 | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
 | | Dpdk Testpmd Start | ${vm} | eal_corelist=${testpmd_cpus}
 | | ... | eal_mem_channels=4 | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE}
 | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
-| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int}
-| | ... | pmd_max_pkt_len=${max_pkt_len}
+| | ... | pmd_rxq=${rxq} | pmd_txq=${rxq} | pmd_max_pkt_len=${max_pkt_len}
 | | Return From Keyword | ${vm}
 
 | Configure guest VMs with dpdk-testpmd connected via vhost-user on node
 | | Return From Keyword | ${vm}
 
 | Configure guest VMs with dpdk-testpmd connected via vhost-user on node
 | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
 | | All VPP Interfaces Ready Wait | ${nodes}
 | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
 | | All VPP Interfaces Ready Wait | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual
 
 | Configure guest VM with dpdk-testpmd-mac connected via vhost-user
 | | [Documentation]
 
 | Configure guest VM with dpdk-testpmd-mac connected via vhost-user
 | | [Documentation]
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: bool
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: bool
+| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface
+| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean
 | | ...
 | | ... | *Note:*
 | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
 | | ...
 | | ... | *Note:*
 | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
 | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name}
 | | ... | ${eth0_mac} | ${eth1_mac} | ${nf_cpus} | ${qemu_id}=${1}
 | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
 | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name}
 | | ... | ${eth0_mac} | ${eth1_mac} | ${nf_cpus} | ${qemu_id}=${1}
 | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
-| | ... | ${use_tuned_cfs}=${False}
+| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True}
 | | ...
 | | ${nf_cpus_count}= | Get Length | ${nf_cpus}
 | | ...
 | | ${nf_cpus_count}= | Get Length | ${nf_cpus}
+| | ${rxq}= | Run Keyword If | '${auto_scale}' == ${True}
+| | ... | Set Variable | ${rxq_count_int}
+| | ... | ELSE | Set Variable | ${1}
 | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
 | | ... | WITH NAME | ${vm_name}
 | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
 | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
 | | ... | WITH NAME | ${vm_name}
 | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
 | | ... | eal_mem_channels=4 | pmd_fwd_mode=mac | pmd_eth_peer_0=0,${eth0_mac}
 | | ... | pmd_eth_peer_1=1,${eth1_mac} | pmd_disable_hw_vlan=${TRUE}
 | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
 | | ... | eal_mem_channels=4 | pmd_fwd_mode=mac | pmd_eth_peer_0=0,${eth0_mac}
 | | ... | pmd_eth_peer_1=1,${eth1_mac} | pmd_disable_hw_vlan=${TRUE}
 | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
-| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int}
-| | ... | pmd_max_pkt_len=${max_pkt_len}
+| | ... | pmd_rxq=${rxq} | pmd_txq=${rxq} | pmd_max_pkt_len=${max_pkt_len}
 | | Return From Keyword | ${vm}
 
 | Configure guest VMs with dpdk-testpmd-mac connected via vhost-user on node
 | | Return From Keyword | ${vm}
 
 | Configure guest VMs with dpdk-testpmd-mac connected via vhost-user on node
 | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
 | | All VPP Interfaces Ready Wait | ${nodes}
 | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
 | | All VPP Interfaces Ready Wait | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual
 
 | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user on node
 | | [Documentation]
 
 | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user on node
 | | [Documentation]
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
+| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface
+| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | [Arguments] | ${dut} | ${nf_chains}=${1} | ${nf_chain}=${1}
 | | ... | ${nf_nodes}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
 | | ...
 | | [Arguments] | ${dut} | ${nf_chains}=${1} | ${nf_chain}=${1}
 | | ... | ${nf_nodes}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
-| | ... | ${use_tuned_cfs}=${False}
+| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${False}
 | | ...
 | | ${tg_if1_mac}= | Get Interface MAC | ${tg} | ${tg_if1}
 | | ${tg_if2_mac}= | Get Interface MAC | ${tg} | ${tg_if2}
 | | ...
 | | ${tg_if1_mac}= | Get Interface MAC | ${tg} | ${tg_if1}
 | | ${tg_if2_mac}= | Get Interface MAC | ${tg} | ${tg_if2}
 | | | ... | ${dut} | ${sock1} | ${sock2} | ${nf_name} | ${vif1_mac}
 | | | ... | ${vif2_mac} | ${nf_cpus} | qemu_id=${qemu_id} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs}
 | | | ... | ${dut} | ${sock1} | ${sock2} | ${nf_name} | ${vif1_mac}
 | | | ... | ${vif2_mac} | ${nf_cpus} | qemu_id=${qemu_id} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs}
+| | | ... | auto_scale=${auto_scale}
 | | | Set To Dictionary | ${${dut}_vm_refs} | ${nf_name} | ${vm}
 
 | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user
 | | | Set To Dictionary | ${${dut}_vm_refs} | ${nf_name} | ${vm}
 
 | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
+| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface
+| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | [Arguments] | ${nf_chains}=${1} | ${nf_chain}=${1} | ${nf_nodes}=${1}
 | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
 | | ...
 | | [Arguments] | ${nf_chains}=${1} | ${nf_chain}=${1} | ${nf_nodes}=${1}
 | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
-| | ... | ${use_tuned_cfs}=${False}
+| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${False}
 | | ...
 | | ${duts}= | Get Matches | ${nodes} | DUT*
 | | :FOR | ${dut} | IN | @{duts}
 | | ...
 | | ${duts}= | Get Matches | ${nodes} | DUT*
 | | :FOR | ${dut} | IN | @{duts}
 | | | ... | ${dut} | nf_chains=${nf_chains} | nf_chain=${nf_chain}
 | | | ... | nf_nodes=${nf_nodes} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
 | | | ... | ${dut} | nf_chains=${nf_chains} | nf_chain=${nf_chain}
 | | | ... | nf_nodes=${nf_nodes} | jumbo=${jumbo}
 | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
+| | | ... | auto_scale=${auto_scale}
 
 | Configure chains of NFs with dpdk-testpmd-mac connected via vhost-user
 | | [Documentation]
 
 | Configure chains of NFs with dpdk-testpmd-mac connected via vhost-user
 | | [Documentation]
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
 | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer
 | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
 | | ... | Type: boolean
+| | ... | - auto_scale - Whether to use same amount of RXQs for memif interface
+| | ... | in containers as vswitch, otherwise use single RXQ. Type: boolean
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | ... | *Example:*
 | | ...
 | | ...
 | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${jumbo}=${False}
 | | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False}
 | | ...
 | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${jumbo}=${False}
 | | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False}
+| | ... | ${auto_scale}=${False}
 | | ...
 | | :FOR | ${nf_chain} | IN RANGE | 1 | ${nf_chains}+1
 | | | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user
 | | | ... | nf_chains=${nf_chains} | nf_chain=${nf_chain} | nf_nodes=${nf_nodes}
 | | | ... | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz}
 | | ...
 | | :FOR | ${nf_chain} | IN RANGE | 1 | ${nf_chains}+1
 | | | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user
 | | | ... | nf_chains=${nf_chains} | nf_chain=${nf_chain} | nf_nodes=${nf_nodes}
 | | | ... | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz}
-| | | ... | use_tuned_cfs=${False}
+| | | ... | use_tuned_cfs=${False} | auto_scale=${auto_scale}
 | | All VPP Interfaces Ready Wait | ${nodes}
 | | All VPP Interfaces Ready Wait | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual
 
 | Initialize LISP IPv4 forwarding in 3-node circular topology
 | | [Documentation] | Custom setup of IPv4 addresses on all DUT nodes and TG \
 
 | Initialize LISP IPv4 forwarding in 3-node circular topology
 | | [Documentation] | Custom setup of IPv4 addresses on all DUT nodes and TG \
 | | | Initialize L2 xconnect with memif pairs on DUT node | ${dut} | ${count}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
 | | | Initialize L2 xconnect with memif pairs on DUT node | ${dut} | ${count}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize L2 Bridge Domain with memif pairs on DUT node
 | | [Documentation]
 
 | Initialize L2 Bridge Domain with memif pairs on DUT node
 | | [Documentation]
 | | | ... | nf_nodes=${nf_nodes} | auto_scale=${auto_scale}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
 | | | ... | nf_nodes=${nf_nodes} | auto_scale=${auto_scale}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize L2 Bridge Domain for pipeline with memif pairs
 | | [Documentation]
 
 | Initialize L2 Bridge Domain for pipeline with memif pairs
 | | [Documentation]
 | | | ... | auto_scale=${auto_scale}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
 | | | ... | auto_scale=${auto_scale}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize L2 Bridge Domain with memif pairs and VLAN in circular topology
 | | [Documentation]
 
 | Initialize L2 Bridge Domain with memif pairs and VLAN in circular topology
 | | [Documentation]
 | | ... | Add interface to bridge domain | ${dut2} | ${dut2_if2} | ${bd_id2}
 | | ...
 | | Show Memif on all DUTs | ${nodes}
 | | ... | Add interface to bridge domain | ${dut2} | ${dut2_if2} | ${bd_id2}
 | | ...
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize IPv4 routing with memif pairs on DUT node
 | | [Documentation]
 
 | Initialize IPv4 routing with memif pairs on DUT node
 | | [Documentation]
 | | | Initialize IPv4 routing with memif pairs on DUT node | ${dut} | ${count}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
 | | | Initialize IPv4 routing with memif pairs on DUT node | ${dut} | ${count}
 | | Set interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize L2 xconnect for single memif
 | | [Documentation]
 
 | Initialize L2 xconnect for single memif
 | | [Documentation]
 | | | ... | ${${dut}-memif-${number}-if1}
 | | Set single interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
 | | | ... | ${${dut}-memif-${number}-if1}
 | | Set single interfaces in path up
 | | Show Memif on all DUTs | ${nodes}
+| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif
 
 | Initialize L2 Bridge Domain for single memif
 | | [Documentation]
 
 | Initialize L2 Bridge Domain for single memif
 | | [Documentation]