perf: hoststack iperf3 test tuning 55/24455/17
authorDave Wallace <dwallacelf@gmail.com>
Tue, 21 Jan 2020 19:02:14 +0000 (19:02 +0000)
committerVratko Polak <vrpolak@cisco.com>
Tue, 28 Jan 2020 14:35:46 +0000 (14:35 +0000)
Change-Id: I53425f57fe9ecef9cff2c94642cc7cb24537a961
Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
12 files changed:
resources/libraries/python/Constants.py
resources/libraries/python/DUTSetup.py
resources/libraries/python/HoststackUtil.py
resources/libraries/python/VppConfigGenerator.py
resources/libraries/python/autogen/Regenerator.py
resources/libraries/python/autogen/Testcase.py
resources/libraries/robot/hoststack/hoststack.robot
resources/libraries/robot/shared/default.robot
resources/libraries/robot/shared/interfaces.robot
resources/libraries/robot/shared/suite_teardown.robot
resources/tools/testbed-setup/ansible/roles/cleanup/tasks/sut.yaml
tests/vpp/perf/hoststack/10ge2p1x710-eth-ip4tcp-ldpreload-iperf3-bps.robot

index 4e60009..5ffc7c0 100644 (file)
@@ -152,7 +152,7 @@ class Constants:
     RESOURCES_TP_WRK_WWW = u"resources/traffic_profiles/wrk/www"
 
     # VPP Communications Library LD_PRELOAD library
-    VCL_LDPRELOAD_LIBRARY=u"/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
+    VCL_LDPRELOAD_LIBRARY = u"/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
 
     # OpenVPP VAT binary name
     VAT_BIN_NAME = u"vpp_api_test"
index 0c0e831..ec0a796 100644 (file)
@@ -175,24 +175,24 @@ class DUTSetup:
         else:
             shell_cmd = f"ip netns exec {namespace} sh -c"
 
-        pgrep_cmd = f"{shell_cmd} \'pgrep {program}\'"
-        ret_code, _, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
-                                  sudo=True)
-        if ret_code == 0:
+        pgrep_cmd = f"{shell_cmd} \'pgrep -c {program}\'"
+        _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
+                                sudo=True)
+        if int(stdout) == 0:
             logger.trace(f"{program} is not running on {host}")
             return
-        ret_code, _, _ = exec_cmd(node, f"{shell_cmd} \'pkill {program}\'",
-                                  timeout=cmd_timeout, sudo=True)
+        exec_cmd(node, f"{shell_cmd} \'pkill {program}\'",
+                 timeout=cmd_timeout, sudo=True)
         for attempt in range(5):
-            ret_code, _, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
-                                      sudo=True)
-            if ret_code != 0:
+            _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
+                                    sudo=True)
+            if int(stdout) == 0:
                 logger.trace(f"Attempt {attempt}: {program} is dead on {host}")
                 return
             sleep(1)
         logger.trace(f"SIGKILLing {program} on {host}")
-        ret_code, _, _ = exec_cmd(node, f"{shell_cmd} \'pkill -9 {program}\'",
-                                  timeout=cmd_timeout, sudo=True)
+        exec_cmd(node, f"{shell_cmd} \'pkill -9 {program}\'",
+                 timeout=cmd_timeout, sudo=True)
 
     @staticmethod
     def verify_program_installed(node, program):
index ad95d51..670a81b 100644 (file)
@@ -94,9 +94,9 @@ class HoststackUtil():
             if u"parallel" in iperf3_attributes:
                 iperf3_cmd[u"args"] += \
                     f" --parallel {iperf3_attributes[u'parallel']}"
-            if u"bytes" in iperf3_attributes:
+            if u"time" in iperf3_attributes:
                 iperf3_cmd[u"args"] += \
-                    f" --bytes {iperf3_attributes[u'bytes']}"
+                    f" --time {iperf3_attributes[u'time']}"
         return iperf3_cmd
 
     @staticmethod
@@ -162,20 +162,22 @@ class HoststackUtil():
         return stdout_log, stderr_log
 
     @staticmethod
-    def start_hoststack_test_program(node, namespace, program):
+    def start_hoststack_test_program(node, namespace, core_list, program):
         """Start the specified HostStack test program.
 
         :param node: DUT node.
         :param namespace: Net Namespace to run program in.
+        :param core_list: List of cpu's to pass to taskset to pin the test
+            program to a different set of cores on the same numa node as VPP.
         :param program: Test program.
         :type node: dict
         :type namespace: str
+        :type core_list: str
         :type program: dict
         :returns: Process ID
         :rtype: int
         :raises RuntimeError: If node subtype is not a DUT or startup failed.
         """
-        # TODO: Pin test program to core(s) on same numa node as VPP.
         if node[u"type"] != u"DUT":
             raise RuntimeError(u"Node type is not a DUT!")
 
@@ -189,8 +191,8 @@ class HoststackUtil():
 
         env_vars = f"{program[u'env_vars']} " if u"env_vars" in program else u""
         args = program[u"args"]
-        cmd = f"nohup {shell_cmd} \'{env_vars}{program_name} {args} " \
-            f">/tmp/{program_name}_stdout.log " \
+        cmd = f"nohup {shell_cmd} \'{env_vars}taskset --cpu-list {core_list} " \
+            f"{program_name} {args} >/tmp/{program_name}_stdout.log " \
             f"2>/tmp/{program_name}_stderr.log &\'"
         try:
             exec_cmd_no_error(node, cmd, sudo=True)
index 3318f57..7a687ae 100644 (file)
@@ -495,6 +495,15 @@ class VppConfigGenerator:
         path = [u"nat"]
         self.add_config_item(self._nodeconfig, value, path)
 
+    def add_tcp_congestion_control_algorithm(self, value=u"cubic"):
+        """Add TCP congestion control algorithm.
+
+        :param value: The congestion control algorithm to use. Example: cubic
+        :type value: str
+        """
+        path = [u"tcp", u"cc-algo"]
+        self.add_config_item(self._nodeconfig, value, path)
+
     def add_tcp_preallocated_connections(self, value):
         """Add TCP pre-allocated connections.
 
index cb0d332..5b112f5 100644 (file)
@@ -280,8 +280,8 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
                 iface, suite_id, suite_tag = get_iface_and_suite_ids(
                     out_filename
                 )
-                # The next replace is probably a noop, but it is safer to maintain
-                # the same structure as for other edits.
+                # The next replace is probably a noop, but it is safer to
+                # maintain the same structure as for other edits.
                 out_prolog = replace_defensively(
                     out_prolog, old_suite_tag, suite_tag, 1,
                     f"Perf suite tag {old_suite_tag} should appear once.",
@@ -466,27 +466,23 @@ class Regenerator:
              u"streams": 1, u"bytes_str": u"1G"}
         ]
         hoststack_wrk_kwargs_list = [
-            {u"phy_cores": i, u"frame_size": 0, u"clients": 1,
+            {u"frame_size": 0, u"phy_cores": i, u"clients": 1,
              u"streams": 1, u"bytes_str": u"1G"} for i in (1, 2, 4)
         ]
         hoststack_iperf3_kwargs_list = [
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 1,
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 1,
              u"streams": 1, u"bytes_str": u"1G"},
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 1,
-             u"streams": 10, u"bytes_str": u"10G"},
-            {u"phy_cores": 2, u"frame_size": 0, u"clients": 1,
-             u"streams": 10, u"bytes_str": u"10G"},
-            {u"phy_cores": 4, u"frame_size": 0, u"clients": 1,
-             u"streams": 10, u"bytes_str": u"10G"},
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 1,
+             u"streams": 10, u"bytes_str": u"1G"},
         ]
         hoststack_quic_kwargs_list = [
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 1,
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 1,
              u"streams": 1, u"bytes_str": u"100M"},
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 1,
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 1,
              u"streams": 10, u"bytes_str": u"100M"},
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 10,
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 10,
              u"streams": 1, u"bytes_str": u"100M"},
-            {u"phy_cores": 1, u"frame_size": 0, u"clients": 10,
+            {u"frame_size": 0, u"phy_cores": 1, u"clients": 10,
              u"streams": 10, u"bytes_str": u"100M"},
         ]
 
@@ -516,7 +512,7 @@ class Regenerator:
             elif in_filename[-10:] in (u"-cps.robot", u"-rps.robot"):
                 write_tcp_files(in_filename, in_prolog,
                                 hoststack_wrk_kwargs_list)
-            elif in_filename[-10:] in (u"-bps.robot"):
+            elif in_filename[-10:] in u"-bps.robot":
                 write_tcp_files(in_filename, in_prolog,
                                 hoststack_iperf3_kwargs_list if u"iperf3"
                                 in in_filename else hoststack_quic_kwargs_list)
index 6b4cfb2..c540d8c 100644 (file)
@@ -122,9 +122,15 @@ class Testcase:
 | | [Tags] | ${{cores_str}}C
 | | phy_cores=${{cores_num}}
 '''
+        elif u"iperf3" in suite_id:
+            template_string = f'''
+| ${{tc_num}}-9000B-${{cores_str}}c-{suite_id}
+| | [Tags] | ${{cores_str}}C | ${{clients_str}}CLIENT | ${{streams_str}}STREAM
+| | phy_cores=${{cores_num}} | clients=${{clients_num}}'''
+            template_string += f" | streams=${{streams_num}}\n"
         else:
             template_string = f'''
-| ${{tc_num}}-IMIX-${{cores_str}}c-{suite_id}
+| ${{tc_num}}-9000B-${{cores_str}}c-{suite_id}
 | | [Tags] | ${{cores_str}}C | ${{clients_str}}CLIENT | ${{streams_str}}STREAM
 | | phy_cores=${{cores_num}} | clients=${{clients_num}}'''
             template_string += f" | streams=${{streams_num}}" \
index cdf35f2..ed1e7b7 100644 (file)
 | ${quic_crypto_engine}= | nocrypto
 | ${quic_fifo_size}= | 4M
 | &{vpp_hoststack_attr}=
-| ... | rxq=${None}
+| ... | rxq=${1}
+| ... | rxd=${256}
+| ... | txd=${256}
 | ... | phy_cores=${1}
 | ... | vpp_api_socket=${SOCKSVR_PATH}
 | ... | api_seg_global_size=2G
 | ... | api_seg_api_size=1G
-| ... | sess_evt_q_seg_size=4G
-| ... | sess_evt_q_length=4000000
-| ... | sess_prealloc_sess=4000000
-| ... | sess_v4_tbl_buckets=2000000
-| ... | sess_v4_tbl_mem=2G
-| ... | sess_v4_hopen_buckets=5000000
-| ... | sess_v4_hopen_mem=3G
-| ... | sess_lendpt_buckets=5000000
-| ... | sess_lendpt_mem=3G
+| ... | tcp_cc_algo=cubic
+| ... | sess_evt_q_seg_size=64M
+| ... | sess_evt_q_length=2048
+| ... | sess_prealloc_sess=128
+| ... | sess_v4_tbl_buckets=20000
+| ... | sess_v4_tbl_mem=64M
+| ... | sess_v4_hopen_buckets=20000
+| ... | sess_v4_hopen_mem=64M
+| ... | sess_lendpt_buckets=250000
+| ... | sess_lendpt_mem=512M
 | &{vpp_echo_server_attr}=
 | ... | role=server
+| ... | cpu_cnt=${1}
 | ... | cfg_vpp_feature=${None}
 | ... | namespace=default
 | ... | vpp_api_socket=${vpp_hoststack_attr}[vpp_api_socket]
@@ -59,6 +63,7 @@
 | ... | tx_results_diff=${False}
 | &{vpp_echo_client_attr}=
 | ... | role=client
+| ... | cpu_cnt=${1}
 | ... | cfg_vpp_feature=${None}
 | ... | namespace=default
 | ... | vpp_api_socket=${vpp_hoststack_attr}[vpp_api_socket]
@@ -76,6 +81,7 @@
 | ... | tx_results_diff=${False}
 | &{iperf3_server_attr}=
 | ... | role=server
+| ... | cpu_cnt=${1}
 | ... | cfg_vpp_feature=${Empty}
 | ... | namespace=default
 | ... | vcl_config=vcl_iperf3.conf
@@ -85,6 +91,7 @@
 | ... | ip_version=${4}
 | &{iperf3_client_attr}=
 | ... | role=client
+| ... | cpu_cnt=${1}
 | ... | cfg_vpp_feature=${Empty}
 | ... | namespace=default
 | ... | vcl_config=vcl_iperf3.conf
 | ... | ip_version=${4}
 | ... | ip_address=${EMPTY}
 | ... | parallel=${1}
-| ... | bytes=1G
+| ... | time=${20}
 
 *** Keywords ***
 | Set VPP Hoststack Attributes
 | | ... | Set the VPP HostStack attributes in the vpp_hoststack_attr dictionary.
 | |
 | | ... | *Arguments:*
-| | ... | - ${rxq} - Type: int
+| | ... | - ${rxq} - Number of Rx Queues Type: int
+| | ... | - ${rxd} - Number of Rx Descriptors Type: int
+| | ... | - ${txd} - Number of Tx Descriptors Type: int
 | | ... | - ${phy_cores} - Number of cores for workers Type: int
 | | ... | - ${vpp_api_socket} - Path to VPP api socket file Type: string
 | | ... | - ${api_seg_global_size} - Global API segment size Type: string
 | | ... | - ${api_seg_api_size} - API segment API fifo size Type: string
+| | ... | - ${tcp_cc_algo} - TCP congestion control algorithm Type: string
 | | ... | - ${sess_evt_q_seg_size} - Session event queue segment size
 | | ... | Type: string
 | | ... | - ${sess_evt_q_length} - Session event queue length Type: string
 | |
 | | [Arguments]
 | | ... | ${rxq}=${vpp_hoststack_attr}[rxq]
+| | ... | ${rxd}=${vpp_hoststack_attr}[rxd]
+| | ... | ${txd}=${vpp_hoststack_attr}[txd]
 | | ... | ${phy_cores}=${vpp_hoststack_attr}[phy_cores]
 | | ... | ${vpp_api_socket}=${vpp_hoststack_attr}[vpp_api_socket]
 | | ... | ${api_seg_global_size}=${vpp_hoststack_attr}[api_seg_global_size]
 | | ... | ${api_seg_api_size}=${vpp_hoststack_attr}[api_seg_api_size]
+| | ... | ${tcp_cc_algo}=${vpp_hoststack_attr}[tcp_cc_algo]
 | | ... | ${sess_evt_q_seg_size}=${vpp_hoststack_attr}[sess_evt_q_seg_size]
 | | ... | ${sess_evt_q_length}=${vpp_hoststack_attr}[sess_evt_q_length]
 | | ... | ${sess_prealloc_sess}=${vpp_hoststack_attr}[sess_prealloc_sess]
 | | ... | ${sess_lendpt_mem}=${vpp_hoststack_attr}[sess_lendpt_mem]
 | |
 | | Set To Dictionary | ${vpp_hoststack_attr} | rxq | ${rxq}
+| | Set To Dictionary | ${vpp_hoststack_attr} | rxd | ${rxd}
+| | Set To Dictionary | ${vpp_hoststack_attr} | txd | ${txd}
 | | Set To Dictionary | ${vpp_hoststack_attr} | phy_cores | ${phy_cores}
 | | Set To Dictionary | ${vpp_hoststack_attr}
 | | ... | vpp_api_socket | ${vpp_api_socket}
 | | Set To Dictionary | ${vpp_hoststack_attr}
 | | ... | api_seg_api_size | ${api_seg_api_size}
 | | Set To Dictionary | ${vpp_hoststack_attr}
+| | ... | tcp_cc_algo | ${tcp_cc_algo}
+| | Set To Dictionary | ${vpp_hoststack_attr}
 | | ... | sess_evt_q_seg_size | ${sess_evt_q_seg_size}
 | | Set To Dictionary | ${vpp_hoststack_attr}
 | | ... | sess_evt_q_length | ${sess_evt_q_length}
 | |
 | | ... | *Arguments:*
 | | ... | - ${vcl_config} - VCL configuration file name Type: string
-| | ... | - ${ld_preload} - Use the VCL LD_PRELOAD library Type: bool
-| | ... | - ${transparent_tls} - Use VCL Transparent-TLS mode Type: bool
+| | ... | - ${ld_preload} - Use the VCL LD_PRELOAD library Type: boolean
+| | ... | - ${transparent_tls} - Use VCL Transparent-TLS mode Type: boolean
 | | ... | - ${ip_version} - IP version (4 or 6) Type: int
 | |
 | | ... | *Example:*
 | |
 | | ... | *Arguments:*
 | | ... | - ${vcl_config} - VCL configuration file name Type: string
-| | ... | - ${ld_preload} - Use the VCL LD_PRELOAD library Type: bool
-| | ... | - ${transparent_tls} - Use VCL Transparent-TLS mode Type: bool
+| | ... | - ${ld_preload} - Use the VCL LD_PRELOAD library Type: boolean
+| | ... | - ${transparent_tls} - Use VCL Transparent-TLS mode Type: boolean
 | | ... | - ${ip_version} - IP version (4 or 6) Type: int
 | | ... | - ${parallel} - Number of parallel streams Type: int
-| | ... | - ${bytes} - Number of bytes to send Type: string
 | |
 | | ... | *Example:*
 | |
 | | ... | ${transparent_tls}=${iperf3_client_attr}[transparent_tls]
 | | ... | ${ip_version}=${iperf3_client_attr}[ip_version]
 | | ... | ${parallel}=${iperf3_client_attr}[parallel]
-| | ... | ${bytes}=${iperf3_client_attr}[bytes]
 | |
 | | Set To Dictionary | ${iperf3_client_attr} | vcl_config | ${vcl_config}
 | | Set To Dictionary | ${iperf3_client_attr} | ld_preload | ${ld_preload}
 | | ... | ${transparent_tls}
 | | Set To Dictionary | ${iperf3_client_attr} | ip_version | ${ip_version}
 | | Set To Dictionary | ${iperf3_client_attr} | parallel | ${parallel}
-| | Set To Dictionary | ${iperf3_client_attr} | bytes | ${bytes}
 
 | Run hoststack test program on DUT
 | | [Documentation]
 | | ... | - ${namespace} - Network namespace to run test program in Type: string
 | | ... | - ${cfg_vpp_feature} - VPP hoststack feature requiring
 | | ... | additional VPP configuration Type: string
+| | ... | - ${core_list} - Cpu core affinity list Type: string
 | | ... | - ${test_program} - Host Stack test program Type: dict
 | |
 | | ... | *Example:*
 | | ... | \| quic \| ${vpp_echo_server} \|
 | |
 | | [Arguments] | ${node} | ${intf} | ${ip4_addr} | ${ip4_mask}
-| | | ... | ${namespace} | ${cfg_vpp_feature} | ${test_program}
+| | | ... | ${namespace} | ${core_list} | ${cfg_vpp_feature}
+| | | ... | ${test_program}
 | |
 | | Run Keyword If | ${vpp_nsim_attr}[output_feature_enable]
 | | ... | Configure VPP NSIM | ${node} | ${vpp_nsim_attr} | ${intf}
 | | ... | ${ip4_mask}
 | | Vpp Node Interfaces Ready Wait | ${node}
 | | ${hoststack_test_program_pid}= | Start Hoststack Test Program
-| | ... | ${node} | ${namespace} | ${test_program}
+| | ... | ${node} | ${namespace} | ${core_list} | ${test_program}
 | | Return From Keyword | ${hoststack_test_program_pid}
 
 | Additional VPP Config For Feature quic
 | | Set Max Rate And Jumbo
 | | Add worker threads to all DUTs
 | | ... | ${vpp_hoststack_attr}[phy_cores] | ${vpp_hoststack_attr}[rxq]
+| | ... | ${vpp_hoststack_attr}[rxd] | ${vpp_hoststack_attr}[txd]
 | | Pre-initialize layer driver | ${nic_driver}
 | | FOR | ${dut} | IN | @{duts}
 | | | Import Library | resources.libraries.python.VppConfigGenerator
 | | | Run keyword | ${dut}.Add api segment api size
 | | | ... | ${vpp_hoststack_attr}[api_seg_api_size]
 | | | Run keyword | ${dut}.Add api segment gid | testuser
+| | | Run keyword | ${dut}.Add tcp congestion control algorithm
+| | | ... | ${vpp_hoststack_attr}[tcp_cc_algo]
 | | | Run keyword | ${dut}.Add session enable
 | | | Run keyword | ${dut}.Add session event queues memfd segment
 | | | Run keyword | ${dut}.Add session event queues segment size
 | | ... | ${dut2_if1_ip4_addr}
 | | Configure VPP Hoststack Attributes on all DUTs
 | | ${vpp_echo_server}= | Get VPP Echo Command | ${vpp_echo_server_attr}
+| | ${skip_cnt}= | Evaluate
+| | ... | ${CPU_CNT_SYSTEM} + ${CPU_CNT_MAIN} + ${vpp_hoststack_attr}[phy_cores]
+| | ${numa}= | Get interfaces numa node | ${dut2} | ${dut2_if1}
+| | ${core_list}= | Cpu list per node str | ${dut2} | ${numa}
+| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${vpp_echo_server_attr}[cpu_cnt]
 | | ${server_pid}= | Run hoststack test program on DUT
 | | ... | ${dut2} | ${dut2_if1} | ${dut2_if1_ip4_addr} | ${dut2_if1_ip4_prefix}
-| | ... | ${vpp_echo_server_attr}[namespace]
+| | ... | ${vpp_echo_server_attr}[namespace] | ${core_list}
 | | ... | ${vpp_echo_server_attr}[cfg_vpp_feature] | ${vpp_echo_server}
 | | ${vpp_echo_client}= | Get VPP Echo Command | ${vpp_echo_client_attr}
+| | ${numa}= | Get interfaces numa node | ${dut1} | ${dut1_if1}
+| | ${core_list}= | Cpu list per node str | ${dut1} | ${numa}
+| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${vpp_echo_client_attr}[cpu_cnt]
 | | ${client_pid}= | Run hoststack test program on DUT
 | | ... | ${dut1} | ${dut1_if1} | ${dut1_if1_ip4_addr} | ${dut1_if1_ip4_prefix}
-| | ... | ${vpp_echo_client_attr}[namespace]
+| | ... | ${vpp_echo_client_attr}[namespace] | ${core_list}
 | | ... | ${vpp_echo_client_attr}[cfg_vpp_feature] | ${vpp_echo_client}
 | | When Hoststack Test Program Finished | ${dut1} | ${client_pid}
 | | ${client_no_results} | ${client_output}=
 | | ... | ${dut2_if1_ip4_addr}
 | | Configure VPP Hoststack Attributes on all DUTs
 | | ${iperf3_server}= | Get Iperf3 Command | ${iperf3_server_attr}
+| | ${skip_cnt}= | Evaluate
+| | ... | ${CPU_CNT_SYSTEM} + ${CPU_CNT_MAIN} + ${vpp_hoststack_attr}[phy_cores]
+| | ${numa}= | Get interfaces numa node | ${dut2} | ${dut2_if1}
+| | ${core_list}= | Cpu list per node str | ${dut2} | ${numa}
+| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${iperf3_server_attr}[cpu_cnt]
 | | ${server_pid}= | Run hoststack test program on DUT
 | | ... | ${dut2} | ${dut2_if1} | ${dut2_if1_ip4_addr} | ${dut2_if1_ip4_prefix}
-| | ... | ${iperf3_server_attr}[namespace]
+| | ... | ${iperf3_server_attr}[namespace] | ${core_list}
 | | ... | ${iperf3_server_attr}[cfg_vpp_feature] | ${iperf3_server}
 | | ${iperf3_client}= | Get Iperf3 Command | ${iperf3_client_attr}
+| | ${numa}= | Get interfaces numa node | ${dut1} | ${dut1_if1}
+| | ${core_list}= | Cpu list per node str | ${dut1} | ${numa}
+| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${iperf3_client_attr}[cpu_cnt]
 | | ${client_pid}= | Run hoststack test program on DUT
 | | ... | ${dut1} | ${dut1_if1} | ${dut1_if1_ip4_addr} | ${dut1_if1_ip4_prefix}
-| | ... | ${iperf3_client_attr}[namespace]
+| | ... | ${iperf3_client_attr}[namespace] | ${core_list}
 | | ... | ${iperf3_client_attr}[cfg_vpp_feature] | ${iperf3_client}
 | | When Hoststack Test Program Finished | ${dut1} | ${client_pid}
 | | ${client_no_results} | ${client_output}=
index 04238d7..64365f3 100644 (file)
 | | | ${rxq_count_int}= | Run keyword if | ${rxq_count_int} == 0
 | | | ... | Set variable | ${1}
 | | | ... | ELSE | Set variable | ${rxq_count_int}
-| | | Run keyword if | ${cpu_count_int} > 0
-| | | ... | ${dut}.Add CPU Main Core | ${cpu_main}
+| | | Run Keyword | ${dut}.Add CPU Main Core | ${cpu_main}
 | | | Run keyword if | ${cpu_count_int} > 0
 | | | ... | ${dut}.Add CPU Corelist Workers | ${cpu_wt}
 | | | Run keyword if | ${smt_used}
index dd6ee56..1639488 100644 (file)
@@ -15,6 +15,9 @@
 | Library | resources.libraries.python.NodePath
 | Library | resources.libraries.python.VhostUser
 
+*** Variables ***
+| ${dpdk_no_tx_checksum_offload}= | ${True}
+
 *** Keywords ***
 | Set interfaces in path up
 | | [Documentation]
 | |
 | | Add DPDK pci devices to all DUTs
 | | FOR | ${dut} | IN | @{duts}
-| | | Run Keyword | ${dut}.Add DPDK No Tx Checksum Offload
+| | | Run Keyword If | ${dpdk_no_tx_checksum_offload}
+| | | ... | ${dut}.Add DPDK No Tx Checksum Offload
 | | | Run Keyword | ${dut}.Add DPDK Log Level | debug
 | | | Run Keyword | ${dut}.Add DPDK Uio Driver | vfio-pci
 | | | Run Keyword | ${dut}.Add DPDK Dev Default RXQ | ${rxq_count_int}
index 881f796..f000679 100644 (file)
@@ -16,6 +16,7 @@
 *** Settings ***
 | Library | resources.libraries.python.DPDK.DPDKTools
 | Library | resources.libraries.python.TrafficGenerator
+| Library | resources.libraries.python.DUTSetup
 |
 | Documentation | Suite teardown keywords.
 
 | | ... | ${tg} | ${tg['interfaces']['${tg_if1}']['pci_address']}
 | | Run Keyword And Ignore Error | PCI Driver Unbind
 | | ... | ${tg} | ${tg['interfaces']['${tg_if2}']['pci_address']}
+
+| Additional Suite Tear Down Action For hoststack
+| | [Documentation]
+| | ... | Additional teardown for suites which uses hoststack test programs.
+| | ... | Ensure all hoststack test programs are no longer running on all DUTS.
+| |
+| | FOR | ${dut} | IN | @{duts}
+| | | Kill Program | ${nodes['${dut}']} | iperf3
+| | | Kill Program | ${nodes['${dut}']} | vpp_echo
index 45183e3..b3f8fff 100644 (file)
     process: "testpmd"
   tags: kill-process
 
+- name: Kill processes - iperf3
+  import_tasks: kill_process.yaml
+  vars:
+    process: "iperf3"
+  tags: kill-process
+
+- name: Kill processes - vpp_echo
+  import_tasks: kill_process.yaml
+  vars:
+    process: "vpp_echo"
+  tags: kill-process
+
 - name: Remove file or dir - Core zip file
   file:
     state: absent
index 81b5d05..a907c61 100644 (file)
 | Resource | resources/libraries/robot/hoststack/hoststack.robot
 |
 | Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV
-| ... | TCP | NIC_Intel-X710 | DRV_VFIO_PCI
-| ... | eth-ip4tcp-ldpreload-iperf3
+| ... | TCP | NIC_Intel-X710 | DRV_VFIO_PCI | HOSTSTACK
+| ... | LDPRELOAD | IPERF3 | eth-ip4tcp-ldpreload-iperf3
 |
 | Suite Setup | Setup suite single link no tg
-| Suite Teardown | Tear down suite
+| Suite Teardown | Tear down suite | hoststack
 | Test Setup | Setup test
 | Test Teardown | Tear down test
 |
 | ${nic_name}= | Intel-X710
 | ${nic_driver}= | vfio-pci
 | ${overhead}= | ${0}
-| ${frame_size}= | IMIX_v4_1
+| ${frame_size}= | ${9000}
 | ${crypto_type}= | ${None}
 
 *** Keywords ***
 | Local template
-| | [Arguments] | ${phy_cores} | ${clients} | ${streams} | ${bytes}
+| | [Arguments] | ${phy_cores} | ${clients} | ${streams}
 | |
+| | Set Test Variable | ${dpdk_no_tx_checksum_offload} | ${False}
 | | Set VPP Hoststack Attributes | phy_cores=${phy_cores}
-| | Set Iperf3 Client Attributes | parallel=${streams} | bytes=${bytes}
+| | Set Iperf3 Client Attributes | parallel=${streams}
 | | ${no_results}= | Get Test Results From Hoststack Iperf3 Test
 | | Run Keyword If | ${no_results}==True | FAIL
 | | ... | No Test Results From Iperf3 client
 
 *** Test Cases ***
-| tc01-IMIX-1c-eth-ip4tcp-ldpreload-iperf3-bps
+| tc01-9000B-1c-eth-ip4tcp-ldpreload-iperf3-bps
 | | [Tags] | 1C | 1CLIENT | 1STREAM
-| | phy_cores=${1} | clients=${1} | streams=${1} | bytes=1G
+| | phy_cores=${1} | clients=${1} | streams=${1}
 
-| tc02-IMIX-1c-eth-ip4tcp-ldpreload-iperf3-bps
+| tc02-9000B-1c-eth-ip4tcp-ldpreload-iperf3-bps
 | | [Tags] | 1C | 1CLIENT | 10STREAM
-| | phy_cores=${1} | clients=${1} | streams=${10} | bytes=10G
-
-| tc03-IMIX-2c-eth-ip4tcp-ldpreload-iperf3-bps
-| | [Tags] | 2C | 1CLIENT | 10STREAM
-| | phy_cores=${2} | clients=${1} | streams=${10} | bytes=10G
-
-| tc04-IMIX-4c-eth-ip4tcp-ldpreload-iperf3-bps
-| | [Tags] | 4C | 1CLIENT | 10STREAM
-| | phy_cores=${4} | clients=${1} | streams=${10} | bytes=10G
+| | phy_cores=${1} | clients=${1} | streams=${10}