From 1634e19d9adb70b634c80b760aabac81fd4bfdd1 Mon Sep 17 00:00:00 2001 From: Peter Mikus Date: Wed, 30 Jan 2019 14:04:40 +0000 Subject: [PATCH] CSIT-1411 Implement manual rx-placement override for bug in VPP Change-Id: Ie3d2b1f40a607ce5190ccfea6a372bc072d0a3b9 Signed-off-by: Peter Mikus --- resources/libraries/python/InterfaceUtil.py | 109 +++++++++++++++++++++ resources/libraries/python/VPPUtil.py | 33 +++++++ .../performance/performance_configuration.robot | 44 +++++++-- 3 files changed, 177 insertions(+), 9 deletions(-) diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index cff2c9f695..8e1892392d 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -19,6 +19,8 @@ from robot.api import logger from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.DUTSetup import DUTSetup +from resources.libraries.python.PapiExecutor import PapiExecutor +from resources.libraries.python.PapiErrors import PapiError from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix from resources.libraries.python.IPUtil import IPUtil from resources.libraries.python.parsers.JsonParser import JsonParser @@ -1620,3 +1622,110 @@ class InterfaceUtil(object): node, 'vlan_subif{nr}'.format(nr=subif_id)), bd_id=bd_id)) VatExecutor().write_and_execute_script(node, tmp_fn, commands) + + @staticmethod + def vpp_sw_interface_rx_placement_dump(node): + """Dump VPP interface RX placement on node. + + :param node: Node to run command on. + :type node: dict + :returns: Thread mapping information as a list of dictionaries. + :rtype: list + :raises RuntimeError: If failed to run command on host. + :raises PapiError: If no API reply received. + """ + api_data = list() + for ifc in node['interfaces'].values(): + if ifc['vpp_sw_index'] is not None: + api = dict(api_name='sw_interface_rx_placement_dump') + api_args = dict(sw_if_index=ifc['vpp_sw_index']) + api['api_args'] = api_args + api_data.append(api) + + with PapiExecutor(node) as papi_executor: + papi_executor.execute_papi(api_data) + try: + papi_executor.papi_should_have_passed() + api_reply = papi_executor.get_papi_reply() + except AssertionError: + raise RuntimeError('Failed to run {api_name} on host ' + '{host}!'.format(host=node['host'], **api)) + + if api_reply: + thr_mapping = [s['sw_interface_rx_placement_details'] \ + for r in api_reply for s in r['api_reply']] + return sorted(thr_mapping, key=lambda k: k['sw_if_index']) + else: + raise PapiError('No reply received for {api_name} on host {host}!'. + format(host=node['host'], **api)) + + @staticmethod + def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id, + worker_id): + """Set interface RX placement to worker on node. + + :param node: Node to run command on. + :param sw_if_index: VPP SW interface index. + :param queue_id: VPP interface queue ID. + :param worker_id: VPP worker ID (indexing from 0). + :type node: dict + :type sw_if_index: int + :type queue_id: int + :type worker_id: int + :raises RuntimeError: If failed to run command on host. + :raises PapiError: If no API reply received. + """ + api_data = list() + api = dict(api_name='sw_interface_set_rx_placement') + api_args = dict(sw_if_index=sw_if_index, queue_id=queue_id, + worker_id=worker_id) + api['api_args'] = api_args + api_data.append(api) + + with PapiExecutor(node) as papi_executor: + papi_executor.execute_papi(api_data) + try: + papi_executor.papi_should_have_passed() + api_reply = papi_executor.get_papi_reply() + except AssertionError: + raise RuntimeError('Failed to run {api_name} on host ' + '{host}!'.format(host=node['host'], **api)) + + if not api_reply: + raise PapiError('No reply received for {api_name} on host {host}!'. + format(host=node['host'], **api)) + + @staticmethod + def vpp_round_robin_rx_placement(node, prefix): + """Set Round Robin interface RX placement on all worker threads + on node. + + :param node: Topology nodes. + :param prefix: Interface name prefix. + :type node: dict + :type prefix: str + """ + worker_id = 0 + worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 + for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): + for interface in node['interfaces'].values(): + if placement['sw_if_index'] == interface['vpp_sw_index'] \ + and prefix in interface['name']: + InterfaceUtil.vpp_sw_interface_set_rx_placement( + node, placement['sw_if_index'], placement['queue_id'], + worker_id % worker_cnt) + worker_id += 1 + + @staticmethod + def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix): + """Set Round Robin interface RX placement on all worker threads + on all DUTs. + + :param nodes: Topology nodes. + :param prefix: Interface name prefix. + :type nodes: dict + :type prefix: str + """ + for node in nodes.values(): + if node['type'] == NodeType.DUT: + InterfaceUtil.vpp_round_robin_rx_placement(node, prefix) diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py index 63d9aaca86..82fded30b4 100644 --- a/resources/libraries/python/VPPUtil.py +++ b/resources/libraries/python/VPPUtil.py @@ -317,3 +317,36 @@ class VPPUtil(object): for node in nodes.values(): if node['type'] == NodeType.DUT: VPPUtil.show_event_logger_on_dut(node) + + @staticmethod + def vpp_show_threads(node): + """Show VPP threads on node. + + :param node: Node to run command on. + :type node: dict + :returns: VPP thread data. + :rtype: list + :raises RuntimeError: If failed to run command on host. + :raises PapiError: If no API reply received. + """ + api_data = list() + api = dict(api_name='show_threads') + api_args = dict() + api['api_args'] = api_args + api_data.append(api) + + with PapiExecutor(node) as papi_executor: + papi_executor.execute_papi(api_data) + try: + papi_executor.papi_should_have_passed() + api_reply = papi_executor.get_papi_reply() + except AssertionError: + raise RuntimeError('Failed to run {api_name} on host ' + '{host}!'.format(host=node['host'], **api)) + + if api_reply: + return \ + api_reply[0]['api_reply']['show_threads_reply']['thread_data'] + else: + raise PapiError('No reply received for {api_name} on host {host}!'. + format(host=node['host'], **api)) diff --git a/resources/libraries/robot/performance/performance_configuration.robot b/resources/libraries/robot/performance/performance_configuration.robot index 2330cffd8c..be30d3706c 100644 --- a/resources/libraries/robot/performance/performance_configuration.robot +++ b/resources/libraries/robot/performance/performance_configuration.robot @@ -2307,6 +2307,8 @@ | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP. | | ... | Type: bool +| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface +| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean | | ... | | ... | *Note:* | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and @@ -2320,9 +2322,12 @@ | | ... | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${nf_cpus} | | ... | ${qemu_id}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} -| | ... | ${use_tuned_cfs}=${False} +| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True} | | ... | | ${nf_cpus_count}= | Get Length | ${nf_cpus} +| | ${rxq}= | Run Keyword If | '${auto_scale}' == ${True} +| | ... | Set Variable | ${rxq_count_int} +| | ... | ELSE | Set Variable | ${1} | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id} | | ... | WITH NAME | ${vm_name} | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']} @@ -2356,8 +2361,7 @@ | | Dpdk Testpmd Start | ${vm} | eal_corelist=${testpmd_cpus} | | ... | eal_mem_channels=4 | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE} | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz} -| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int} -| | ... | pmd_max_pkt_len=${max_pkt_len} +| | ... | pmd_rxq=${rxq} | pmd_txq=${rxq} | pmd_max_pkt_len=${max_pkt_len} | | Return From Keyword | ${vm} | Configure guest VMs with dpdk-testpmd connected via vhost-user on node @@ -2424,6 +2428,7 @@ | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo} | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False} | | All VPP Interfaces Ready Wait | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual | Configure guest VM with dpdk-testpmd-mac connected via vhost-user | | [Documentation] @@ -2447,6 +2452,8 @@ | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP. | | ... | Type: bool +| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface +| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean | | ... | | ... | *Note:* | | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and @@ -2461,9 +2468,12 @@ | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | | ... | ${eth0_mac} | ${eth1_mac} | ${nf_cpus} | ${qemu_id}=${1} | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} -| | ... | ${use_tuned_cfs}=${False} +| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True} | | ... | | ${nf_cpus_count}= | Get Length | ${nf_cpus} +| | ${rxq}= | Run Keyword If | '${auto_scale}' == ${True} +| | ... | Set Variable | ${rxq_count_int} +| | ... | ELSE | Set Variable | ${1} | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id} | | ... | WITH NAME | ${vm_name} | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']} @@ -2498,8 +2508,7 @@ | | ... | eal_mem_channels=4 | pmd_fwd_mode=mac | pmd_eth_peer_0=0,${eth0_mac} | | ... | pmd_eth_peer_1=1,${eth1_mac} | pmd_disable_hw_vlan=${TRUE} | | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz} -| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int} -| | ... | pmd_max_pkt_len=${max_pkt_len} +| | ... | pmd_rxq=${rxq} | pmd_txq=${rxq} | pmd_max_pkt_len=${max_pkt_len} | | Return From Keyword | ${vm} | Configure guest VMs with dpdk-testpmd-mac connected via vhost-user on node @@ -2571,6 +2580,7 @@ | | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo} | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False} | | All VPP Interfaces Ready Wait | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user on node | | [Documentation] @@ -2588,6 +2598,8 @@ | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP. | | ... | Type: boolean +| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface +| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean | | ... | | ... | *Example:* | | ... @@ -2596,7 +2608,7 @@ | | ... | | [Arguments] | ${dut} | ${nf_chains}=${1} | ${nf_chain}=${1} | | ... | ${nf_nodes}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} -| | ... | ${use_tuned_cfs}=${False} +| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${False} | | ... | | ${tg_if1_mac}= | Get Interface MAC | ${tg} | ${tg_if1} | | ${tg_if2_mac}= | Get Interface MAC | ${tg} | ${tg_if2} @@ -2625,6 +2637,7 @@ | | | ... | ${dut} | ${sock1} | ${sock2} | ${nf_name} | ${vif1_mac} | | | ... | ${vif2_mac} | ${nf_cpus} | qemu_id=${qemu_id} | jumbo=${jumbo} | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs} +| | | ... | auto_scale=${auto_scale} | | | Set To Dictionary | ${${dut}_vm_refs} | ${nf_name} | ${vm} | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user @@ -2642,6 +2655,8 @@ | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP. | | ... | Type: boolean +| | ... | - auto_scale - Whether to use same amount of RXQs for vhost interface +| | ... | in VM as vswitch, otherwise use single RXQ. Type: boolean | | ... | | ... | *Example:* | | ... @@ -2650,7 +2665,7 @@ | | ... | | [Arguments] | ${nf_chains}=${1} | ${nf_chain}=${1} | ${nf_nodes}=${1} | | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} -| | ... | ${use_tuned_cfs}=${False} +| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${False} | | ... | | ${duts}= | Get Matches | ${nodes} | DUT* | | :FOR | ${dut} | IN | @{duts} @@ -2658,6 +2673,7 @@ | | | ... | ${dut} | nf_chains=${nf_chains} | nf_chain=${nf_chain} | | | ... | nf_nodes=${nf_nodes} | jumbo=${jumbo} | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False} +| | | ... | auto_scale=${auto_scale} | Configure chains of NFs with dpdk-testpmd-mac connected via vhost-user | | [Documentation] @@ -2673,6 +2689,8 @@ | | ... | - perf_qemu_qsz - Virtio Queue Size. Type: integer | | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP. | | ... | Type: boolean +| | ... | - auto_scale - Whether to use same amount of RXQs for memif interface +| | ... | in containers as vswitch, otherwise use single RXQ. Type: boolean | | ... | | ... | *Example:* | | ... @@ -2681,13 +2699,15 @@ | | ... | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${jumbo}=${False} | | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False} +| | ... | ${auto_scale}=${False} | | ... | | :FOR | ${nf_chain} | IN RANGE | 1 | ${nf_chains}+1 | | | Configure chain of NFs with dpdk-testpmd-mac connected via vhost-user | | | ... | nf_chains=${nf_chains} | nf_chain=${nf_chain} | nf_nodes=${nf_nodes} | | | ... | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz} -| | | ... | use_tuned_cfs=${False} +| | | ... | use_tuned_cfs=${False} | auto_scale=${auto_scale} | | All VPP Interfaces Ready Wait | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual | Initialize LISP IPv4 forwarding in 3-node circular topology | | [Documentation] | Custom setup of IPv4 addresses on all DUT nodes and TG \ @@ -3020,6 +3040,7 @@ | | | Initialize L2 xconnect with memif pairs on DUT node | ${dut} | ${count} | | Set interfaces in path up | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize L2 Bridge Domain with memif pairs on DUT node | | [Documentation] @@ -3115,6 +3136,7 @@ | | | ... | nf_nodes=${nf_nodes} | auto_scale=${auto_scale} | | Set interfaces in path up | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize L2 Bridge Domain for pipeline with memif pairs | | [Documentation] @@ -3185,6 +3207,7 @@ | | | ... | auto_scale=${auto_scale} | | Set interfaces in path up | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize L2 Bridge Domain with memif pairs and VLAN in circular topology | | [Documentation] @@ -3261,6 +3284,7 @@ | | ... | Add interface to bridge domain | ${dut2} | ${dut2_if2} | ${bd_id2} | | ... | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize IPv4 routing with memif pairs on DUT node | | [Documentation] @@ -3405,6 +3429,7 @@ | | | Initialize IPv4 routing with memif pairs on DUT node | ${dut} | ${count} | | Set interfaces in path up | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize L2 xconnect for single memif | | [Documentation] @@ -3438,6 +3463,7 @@ | | | ... | ${${dut}-memif-${number}-if1} | | Set single interfaces in path up | | Show Memif on all DUTs | ${nodes} +| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=memif | Initialize L2 Bridge Domain for single memif | | [Documentation] -- 2.16.6