feat(core): Core allocation 67/39267/5
authorpmikus <peter.mikus@protonmail.ch>
Tue, 25 Jul 2023 10:47:12 +0000 (10:47 +0000)
committerPeter Mikus <peter.mikus@protonmail.ch>
Thu, 27 Jul 2023 08:25:02 +0000 (08:25 +0000)
Signed-off-by: pmikus <peter.mikus@protonmail.ch>
Change-Id: I782b87190dbee6e0a12c97f616b80539cd6614bd

resources/libraries/python/CpuUtils.py
resources/libraries/python/DPDK/L3fwdTest.py
resources/libraries/python/DPDK/TestpmdTest.py
resources/libraries/python/IPsecUtil.py
resources/libraries/python/InterfaceUtil.py
resources/libraries/robot/crypto/ipsec.robot
resources/libraries/robot/hoststack/hoststack.robot
resources/libraries/robot/shared/default.robot
resources/libraries/robot/shared/vm.robot

index 1e306f0..5f43e21 100644 (file)
@@ -17,7 +17,7 @@ from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
 
 __all__ = [u"CpuUtils"]
 
@@ -499,17 +499,15 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_vswitch(
-            nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
-        """Get affinity for vswitch.
+            nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch on all DUTs.
 
         :param nodes: Topology nodes.
-        :param node: Topology node string.
         :param phy_cores: Number of physical cores to allocate.
         :param rx_queues: Number of RX queues. (Optional, Default: None)
         :param rxd: Number of RX descriptors. (Optional, Default: None)
         :param txd: Number of TX descriptors. (Optional, Default: None)
         :type nodes: dict
-        :type node: str
         :type phy_cores: int
         :type rx_queues: int
         :type rxd: int
@@ -517,76 +515,82 @@ class CpuUtils:
         :returns: Compute resource information dictionary.
         :rtype: dict
         """
-        # Number of Data Plane physical cores.
-        dp_cores_count = BuiltIn().get_variable_value(
-            f"${{dp_cores_count}}", phy_cores
-        )
-        # Number of Feature Plane physical cores.
-        fp_cores_count = BuiltIn().get_variable_value(
-            f"${{fp_cores_count}}", phy_cores - dp_cores_count
-        )
-        # Ratio between RX queues and data plane threads.
-        rxq_ratio = BuiltIn().get_variable_value(
-            f"${{rxq_ratio}}", 1
-        )
-
-        dut_pf_keys = BuiltIn().get_variable_value(
-            f"${{{node}_pf_keys}}"
-        )
-        # SMT override in case of non standard test cases.
-        smt_used = BuiltIn().get_variable_value(
-            f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
-        )
-
-        cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
-        skip_cnt = Constants.CPU_CNT_SYSTEM
-        cpu_main = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=Constants.CPU_CNT_MAIN,
-            smt_used=False
-        )
-        skip_cnt += Constants.CPU_CNT_MAIN
-        cpu_dp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(dp_cores_count),
-            smt_used=smt_used
-        ) if int(dp_cores_count) else u""
-        skip_cnt = skip_cnt + int(dp_cores_count)
-        cpu_fp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(fp_cores_count),
-            smt_used=smt_used
-        ) if int(fp_cores_count) else u""
-
-        fp_count_int = \
-            int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(fp_cores_count)
-        dp_count_int = \
-            int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(dp_cores_count)
-
-        rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
-        rxq_count_int = 1 if not rxq_count_int else rxq_count_int
-
         compute_resource_info = dict()
-        compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
-        compute_resource_info[u"smt_used"] = smt_used
-        compute_resource_info[u"cpu_main"] = cpu_main
-        compute_resource_info[u"cpu_dp"] = cpu_dp
-        compute_resource_info[u"cpu_fp"] = cpu_fp
-        compute_resource_info[u"cpu_wt"] = \
-            u",".join(filter(None, [cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_alloc_str"] = \
-            u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_count_int"] = \
-            int(dp_cores_count) + int(fp_cores_count)
-        compute_resource_info[u"rxd_count_int"] = rxd
-        compute_resource_info[u"txd_count_int"] = txd
-        compute_resource_info[u"rxq_count_int"] = rxq_count_int
-        compute_resource_info[u"fp_count_int"] = fp_count_int
-        compute_resource_info[u"dp_count_int"] = dp_count_int
+        for node_name, node in nodes.items():
+            if node["type"] != NodeType.DUT:
+                continue
+            # Number of Data Plane physical cores.
+            dp_cores_count = BuiltIn().get_variable_value(
+                f"${{dp_cores_count}}", phy_cores
+            )
+            # Number of Feature Plane physical cores.
+            fp_cores_count = BuiltIn().get_variable_value(
+                f"${{fp_cores_count}}", phy_cores - dp_cores_count
+            )
+            # Ratio between RX queues and data plane threads.
+            rxq_ratio = BuiltIn().get_variable_value(
+                f"${{rxq_ratio}}", 1
+            )
+
+            dut_pf_keys = BuiltIn().get_variable_value(
+                f"${{{node_name}_pf_keys}}"
+            )
+            # SMT override in case of non standard test cases.
+            smt_used = BuiltIn().get_variable_value(
+                f"${{smt_used}}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+            )
+
+            cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+            skip_cnt = Constants.CPU_CNT_SYSTEM
+            cpu_main = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=Constants.CPU_CNT_MAIN,
+                smt_used=False
+            )
+            skip_cnt += Constants.CPU_CNT_MAIN
+            cpu_dp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(dp_cores_count),
+                smt_used=smt_used
+            ) if int(dp_cores_count) else ""
+            skip_cnt = skip_cnt + int(dp_cores_count)
+            cpu_fp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(fp_cores_count),
+                smt_used=smt_used
+            ) if int(fp_cores_count) else ""
+
+            fp_count_int = \
+                int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(fp_cores_count)
+            dp_count_int = \
+                int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(dp_cores_count)
+
+            rxq_count_int = \
+                int(rx_queues) if rx_queues \
+                else int(dp_count_int/rxq_ratio)
+            rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+            compute_resource_info["buffers_numa"] = \
+                215040 if smt_used else 107520
+            compute_resource_info["smt_used"] = smt_used
+            compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+            compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+            compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+            compute_resource_info[f"{node_name}_cpu_wt"] = \
+                ",".join(filter(None, [cpu_dp, cpu_fp]))
+            compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+                ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+            compute_resource_info["cpu_count_int"] = \
+                int(dp_cores_count) + int(fp_cores_count)
+            compute_resource_info["rxd_count_int"] = rxd
+            compute_resource_info["txd_count_int"] = txd
+            compute_resource_info["rxq_count_int"] = rxq_count_int
+            compute_resource_info["fp_count_int"] = fp_count_int
+            compute_resource_info["dp_count_int"] = dp_count_int
 
         return compute_resource_info
index 265806c..178c747 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -56,12 +56,11 @@ class L3fwdTest:
         cpu_count_int = dp_count_int = int(phy_cores)
         dp_cores = cpu_count_int+1
         tg_flip = topology_info[f"tg_if1_pci"] > topology_info[f"tg_if2_pci"]
-        for node in nodes:
-            if u"DUT" in node:
-                compute_resource_info = CpuUtils.get_affinity_vswitch(
-                    nodes, node, phy_cores, rx_queues=rx_queues,
-                    rxd=rxd, txd=txd
-                )
+        compute_resource_info = CpuUtils.get_affinity_vswitch(
+            nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+        )
+        for node_name, node in nodes.items():
+            if node["type"] == NodeType.DUT:
                 if dp_count_int > 1:
                     BuiltIn().set_tags('MTHREAD')
                 else:
@@ -70,12 +69,12 @@ class L3fwdTest:
                     f"{dp_count_int}T{cpu_count_int}C"
                 )
 
-                cpu_dp = compute_resource_info[u"cpu_dp"]
-                rxq_count_int = compute_resource_info[u"rxq_count_int"]
-                if1 = topology_info[f"{node}_pf1"][0]
-                if2 = topology_info[f"{node}_pf2"][0]
+                cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+                rxq_count_int = compute_resource_info["rxq_count_int"]
+                if1 = topology_info[f"{node_name}_pf1"][0]
+                if2 = topology_info[f"{node_name}_pf2"][0]
                 L3fwdTest.start_l3fwd(
-                    nodes, nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+                    nodes, node, if1=if1, if2=if2, lcores_list=cpu_dp,
                     nb_cores=dp_count_int, queue_nums=rxq_count_int,
                     jumbo_frames=jumbo_frames, tg_flip=tg_flip
                 )
index ca98da2..3baba30 100644 (file)
@@ -60,12 +60,11 @@ class TestpmdTest:
 
         cpu_count_int = dp_count_int = int(phy_cores)
         dp_cores = cpu_count_int+1
-        for node in nodes:
-            if u"DUT" in node:
-                compute_resource_info = CpuUtils.get_affinity_vswitch(
-                    nodes, node, phy_cores, rx_queues=rx_queues,
-                    rxd=rxd, txd=txd
-                )
+        compute_resource_info = CpuUtils.get_affinity_vswitch(
+            nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+        )
+        for node_name, node in nodes.items():
+            if node["type"] == NodeType.DUT:
                 if dp_count_int > 1:
                     BuiltIn().set_tags('MTHREAD')
                 else:
@@ -74,12 +73,12 @@ class TestpmdTest:
                     f"{dp_count_int}T{cpu_count_int}C"
                 )
 
-                cpu_dp = compute_resource_info[u"cpu_dp"]
-                rxq_count_int = compute_resource_info[u"rxq_count_int"]
-                if1 = topology_info[f"{node}_pf1"][0]
-                if2 = topology_info[f"{node}_pf2"][0]
+                cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+                rxq_count_int = compute_resource_info["rxq_count_int"]
+                if1 = topology_info[f"{node_name}_pf1"][0]
+                if2 = topology_info[f"{node_name}_pf2"][0]
                 TestpmdTest.start_testpmd(
-                    nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+                    node, if1=if1, if2=if2, lcores_list=cpu_dp,
                     nb_cores=dp_count_int, queue_nums=rxq_count_int,
                     jumbo_frames=jumbo_frames, rxq_size=nic_rxq_size,
                     txq_size=nic_txq_size
index 363558d..39c6a4c 100644 (file)
@@ -22,6 +22,8 @@ from ipaddress import ip_network, ip_address
 from random import choice
 from string import ascii_letters
 
+from robot.libraries.BuiltIn import BuiltIn
+
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.IncrementUtil import ObjIncrement
 from resources.libraries.python.InterfaceUtil import InterfaceUtil, \
@@ -358,25 +360,26 @@ class IPsecUtil:
 
     @staticmethod
     def vpp_ipsec_crypto_sw_scheduler_set_worker_on_all_duts(
-            nodes, workers, crypto_enable=False):
+            nodes, crypto_enable=False):
         """Enable or disable crypto on specific vpp worker threads.
 
         :param node: VPP node to enable or disable crypto for worker threads.
-        :param workers: List of VPP thread numbers.
         :param crypto_enable: Disable or enable crypto work.
         :type node: dict
-        :type workers: Iterable[int]
         :type crypto_enable: bool
         :raises RuntimeError: If failed to enable or disable crypto for worker
             thread or if no API reply received.
         """
-        for node in nodes.values():
-            if node[u"type"] == NodeType.DUT:
+        for node_name, node in nodes.items():
+            if node["type"] == NodeType.DUT:
                 thread_data = VPPUtil.vpp_show_threads(node)
                 worker_cnt = len(thread_data) - 1
                 if not worker_cnt:
                     return None
                 worker_ids = list()
+                workers = BuiltIn().get_variable_value(
+                    f"${{{node_name}_cpu_dp}}"
+                )
                 for item in thread_data:
                     if str(item.cpu_id) in workers.split(u","):
                         worker_ids.append(item.id)
index 42474b4..7d9164d 100644 (file)
@@ -18,6 +18,7 @@ from enum import IntEnum
 
 from ipaddress import ip_address
 from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.DUTSetup import DUTSetup
@@ -2013,7 +2014,7 @@ class InterfaceUtil:
 
     @staticmethod
     def vpp_round_robin_rx_placement_on_all_duts(
-            nodes, prefix, workers=None):
+            nodes, prefix, use_dp_cores=False):
         """Set Round Robin interface RX placement on worker threads
         on all DUTs.
 
@@ -2024,14 +2025,18 @@ class InterfaceUtil:
 
         :param nodes: Topology nodes.
         :param prefix: Interface name prefix.
-        :param workers: Comma separated worker index numbers intended for
-            dataplane work.
+        :param use_dp_cores: Limit to dataplane cores.
         :type nodes: dict
         :type prefix: str
-        :type workers: str
+        :type use_dp_cores: bool
         """
-        for node in nodes.values():
-            if node[u"type"] == NodeType.DUT:
+        for node_name, node in nodes.items():
+            if node["type"] == NodeType.DUT:
+                workers = None
+                if use_dp_cores:
+                    workers = BuiltIn().get_variable_value(
+                        f"${{{node_name}_cpu_dp}}"
+                    )
                 InterfaceUtil.vpp_round_robin_rx_placement(
                     node, prefix, workers
                 )
index 2e58a9c..64563de 100644 (file)
 | | ... | on all DUT nodes (leaving feature plane workers disabled).
 | |
 | | VPP Round Robin Rx Placement on all DUTs
-| | ... | ${nodes} | prefix=${EMPTY} | workers=${cpu_dp}
+| | ... | ${nodes} | prefix=${EMPTY} | use_dp_cores=${True}
 | | VPP IPSec Crypto SW Scheduler Set Worker on all DUTs
-| | ... | ${nodes} | workers=${cpu_dp} | crypto_enable=${False}
+| | ... | ${nodes} | crypto_enable=${False}
 
 | Enable SPD flow cache IPv4 Inbound
 | | [Documentation]
index 9dd3987..2661ea1 100644 (file)
 | | ${core_list}= | Cpu list per node str | ${dut} | ${numa}
 | | ... | skip_cnt=${skip_cnt} | cpu_cnt=${nginx_server_attr}[cpu_cnt]
 | | ${cpu_idle_list}= | Get cpu idle list | ${dut} | ${numa}
-| | ... | ${smt_used} | ${cpu_alloc_str}
+| | ... | ${smt_used} | ${${dut}_cpu_alloc_str}
 | | ${nginx_server}= | Get Nginx Command | ${nginx_server_attr}
 | | ... | ${nginx_version} | ${packages_dir}
 | | ${server_pid}= | Start Hoststack Test Program
index fabb440..44ade6d 100644 (file)
 | | [Arguments] | ${phy_cores} | ${rx_queues}=${None} | ${rxd}=${None}
 | | ... | ${txd}=${None}
 | |
+| | Create compute resources variables
+| | ... | ${phy_cores} | rx_queues=${rx_queues} | rxd=${rxd} | txd=${txd}
 | | FOR | ${dut} | IN | @{duts}
-| | | &{compute_resource_info}= | Get Affinity Vswitch
-| | | ... | ${nodes} | ${dut} | ${phy_cores} | rx_queues=${rx_queues}
-| | | ... | rxd=${rxd} | txd=${txd}
-| | | Set Test Variable | &{compute_resource_info}
-| | | Create compute resources variables
-| | | Run Keyword | ${dut}.Add CPU Main Core | ${cpu_main}
+| | | Run Keyword | ${dut}.Add CPU Main Core | ${${dut}_cpu_main}
 | | | Run Keyword If | ${cpu_count_int} > 0
-| | | ... | ${dut}.Add CPU Corelist Workers | ${cpu_wt}
+| | | ... | ${dut}.Add CPU Corelist Workers | ${${dut}_cpu_wt}
 | | | Run Keyword | ${dut}.Add Buffers Per Numa | ${buffers_numa}
 | | END
 
 | | ... | _NOTE:_ This KW sets various suite variables based on computed
 | | ... | resources.
 | |
+| | ... | *Arguments:*
+| | ... | - phy_cores - Number of physical cores to use. Type: integer
+| | ... | - rx_queues - Number of RX queues. Type: integer
+| | ... | - rxd - Number of RX descriptors. Type: integer
+| | ... | - txd - Number of TX descriptors. Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Create compute resources variables \| ${1} \| ${1} \|
+| |
+| | [Arguments] | ${phy_cores} | ${rx_queues}=${None}
+| | ... | ${rxd}=${None} | ${txd}=${None}
+| |
+| | &{compute_resource_info}= | Get Affinity Vswitch
+| | ... | ${nodes} | ${phy_cores} | rx_queues=${rx_queues}
+| | ... | rxd=${rxd} | txd=${txd}
 | | ${variables}= | Get Dictionary Keys | ${compute_resource_info}
 | | FOR | ${variable} | IN | @{variables}
 | | | ${value}= | Get From Dictionary | ${compute_resource_info} | ${variable}
index 029956c..e8618dc 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 | | ... | rxq_count_int=${rxq_count_int}
 | | ... | virtio_feature_mask=${virtio_feature_mask} | page_size=${page_size}
 | | ${cpu_wt}= | Run Keyword | vnf_manager.Start All VMs | pinning=${pinning}
-| | ${cpu_alloc_str}= | Catenate | SEPARATOR=, | ${cpu_alloc_str} | ${cpu_wt}
-| | Set Test Variable | ${cpu_alloc_str}
+| | ${${node}_cpu_alloc_str}= | Catenate | SEPARATOR=,
+| | ... | ${${node}_cpu_alloc_str} | ${cpu_wt}
+| | Set Test Variable | ${${node}_cpu_alloc_str}
 | | Run Keyword If | ${validate}
 | | ... | All VPP Interfaces Ready Wait | ${nodes} | retries=${300}
 | | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual