Revert "fix(jobspec): Delete ipsec nfv density tests"
[csit.git] / resources / libraries / python / TrafficGenerator.py
index 30be3b9..936cb3a 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """Performance testing traffic generator library."""
 
+import math
 import time
 
+from typing import Callable, List, Optional, Union
+
 from robot.api import logger
 from robot.libraries.BuiltIn import BuiltIn
 
 from .Constants import Constants
-from .CpuUtils import CpuUtils
 from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+    AbstractMeasurer, Config, GoalResult, MeasurementResult,
+    MultipleLossRatioSearch, SearchGoal,
+)
 from .PLRsearch.PLRsearch import PLRsearch
 from .OptionString import OptionString
 from .ssh import exec_cmd_no_error, exec_cmd
 from .topology import NodeType
 from .topology import NodeSubTypeTG
 from .topology import Topology
+from .TRexConfigGenerator import TrexConfig
+from .DUTSetup import DUTSetup as DS
 
 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
 
@@ -127,22 +132,18 @@ class TrexMode:
     STL = u"STL"
 
 
-# TODO: Pylint says too-many-instance-attributes.
 class TrafficGenerator(AbstractMeasurer):
     """Traffic Generator."""
 
-    # TODO: Remove "trex" from lines which could work with other TGs.
-
     # Use one instance of TrafficGenerator for all tests in test suite
     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
 
     def __init__(self):
-        # TODO: Separate into few dataclasses/dicts.
-        #       Pylint dislikes large unstructured state, and it is right.
         self._node = None
         self._mode = None
         # TG interface order mapping
         self._ifaces_reordered = False
+        self._ifaces = []
         # Result holding fields, to be removed.
         self._result = None
         self._loss = None
@@ -177,8 +178,7 @@ class TrafficGenerator(AbstractMeasurer):
         self.ramp_up_duration = None
         self.state_timeout = None
         # Transient data needed for async measurements.
-        self._xstats = (None, None)
-        # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
+        self._xstats = []
 
     @property
     def node(self):
@@ -251,101 +251,111 @@ class TrafficGenerator(AbstractMeasurer):
             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
         )
 
-    # TODO: pylint says disable=too-many-locals.
-    def initialize_traffic_generator(
-            self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
-            tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
-            tg_if2_dst_mac=None):
-        """TG initialization.
+    @staticmethod
+    def get_tg_type(tg_node):
+        """Log and return the installed traffic generator type.
 
-        TODO: Document why do we need (and how do we use) _ifaces_reordered.
+        :param tg_node: Node from topology file.
+        :type tg_node: dict
+        :returns: Traffic generator type string.
+        :rtype: str
+        :raises RuntimeError: If command returns nonzero return code.
+        """
+        return str(check_subtype(tg_node))
 
-        :param tg_node: Traffic generator node.
-        :param tg_if1: TG - name of first interface.
-        :param tg_if2: TG - name of second interface.
-        :param tg_if1_adj_node: TG if1 adjecent node.
-        :param tg_if1_adj_if: TG if1 adjecent interface.
-        :param tg_if2_adj_node: TG if2 adjecent node.
-        :param tg_if2_adj_if: TG if2 adjecent interface.
-        :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
-        :param tg_if1_dst_mac: Interface 1 destination MAC address.
-        :param tg_if2_dst_mac: Interface 2 destination MAC address.
+    @staticmethod
+    def get_tg_version(tg_node):
+        """Log and return the installed traffic generator version.
+
+        :param tg_node: Node from topology file.
         :type tg_node: dict
-        :type tg_if1: str
-        :type tg_if2: str
-        :type tg_if1_adj_node: dict
-        :type tg_if1_adj_if: str
-        :type tg_if2_adj_node: dict
-        :type tg_if2_adj_if: str
-        :type osi_layer: str
-        :type tg_if1_dst_mac: str
-        :type tg_if2_dst_mac: str
-        :returns: nothing
-        :raises RuntimeError: In case of issue during initialization.
+        :returns: Traffic generator version string.
+        :rtype: str
+        :raises RuntimeError: If command returns nonzero return code.
         """
         subtype = check_subtype(tg_node)
         if subtype == NodeSubTypeTG.TREX:
-            self._node = tg_node
-            self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
-            if1 = dict()
-            if2 = dict()
-            if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
-            if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
-            if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
-            if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
-
-            if osi_layer == u"L2":
-                if1[u"adj_addr"] = if2[u"addr"]
-                if2[u"adj_addr"] = if1[u"addr"]
-            elif osi_layer in (u"L3", u"L7"):
-                if1[u"adj_addr"] = Topology().get_interface_mac(
-                    tg_if1_adj_node, tg_if1_adj_if
-                )
-                if2[u"adj_addr"] = Topology().get_interface_mac(
-                    tg_if2_adj_node, tg_if2_adj_if
-                )
-            else:
-                raise ValueError(u"Unknown OSI layer!")
+            command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
+            message = u"Get T-Rex version failed!"
+            stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
+            return stdout.strip()
+        return "none"
 
-            # in case of switched environment we can override MAC addresses
-            if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
-                if1[u"adj_addr"] = tg_if1_dst_mac
-                if2[u"adj_addr"] = tg_if2_dst_mac
+    def initialize_traffic_generator(self, osi_layer, pfs=2):
+        """TG initialization.
 
-            if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
-                if1, if2 = if2, if1
-                self._ifaces_reordered = True
+        :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
+        :param pfs: Number of physical interfaces to configure.
+        :type osi_layer: str
+        :type pfs: int
+        :raises ValueError: If OSI layer is unknown.
+        """
+        if osi_layer not in ("L2", "L3", "L7"):
+            raise ValueError("Unknown OSI layer!")
 
-            master_thread_id, latency_thread_id, socket, threads = \
-                CpuUtils.get_affinity_trex(
-                    self._node, tg_if1, tg_if2,
-                    tg_dtc=Constants.TREX_CORE_COUNT)
+        topology = BuiltIn().get_variable_value("&{topology_info}")
+        self._node = topology["TG"]
+        subtype = check_subtype(self._node)
 
-            if osi_layer in (u"L2", u"L3", u"L7"):
-                exec_cmd_no_error(
-                    self._node,
-                    f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
-                    f"- version: 2\n"
-                    f"  c: {len(threads)}\n"
-                    f"  limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
-                    f"  interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
-                    f"  port_info:\n"
-                    f"      - dest_mac: \'{if1[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if1[u'addr']}\'\n"
-                    f"      - dest_mac: \'{if2[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if2[u'addr']}\'\n"
-                    f"  platform :\n"
-                    f"      master_thread_id: {master_thread_id}\n"
-                    f"      latency_thread_id: {latency_thread_id}\n"
-                    f"      dual_if:\n"
-                    f"          - socket: {socket}\n"
-                    f"            threads: {threads}\n"
-                    f"EOF'",
-                    sudo=True, message=u"T-Rex config generation!"
-                )
-            else:
-                raise ValueError(u"Unknown OSI layer!")
+        if subtype == NodeSubTypeTG.TREX:
+            trex_topology = list()
+            self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
+
+            for link in range(1, pfs, 2):
+                tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
+                tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
+                if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+                    ifl = BuiltIn().get_variable_value("${int}")
+                    last = topology["duts_count"]
+                    tg_if1_adj_addr = Topology().get_interface_mac(
+                        topology["DUT1"],
+                        BuiltIn().get_variable_value(
+                            f"${{DUT1_{ifl}{link}}}[0]"
+                        )
+                    )
+                    tg_if2_adj_addr = Topology().get_interface_mac(
+                        topology[f"DUT{last}"],
+                        BuiltIn().get_variable_value(
+                            f"${{DUT{last}_{ifl}{link+1}}}[0]"
+                        )
+                    )
 
+                if1_pci = topology[f"TG_pf{link}_pci"][0]
+                if2_pci = topology[f"TG_pf{link+1}_pci"][0]
+                if min(if1_pci, if2_pci) != if1_pci:
+                    self._ifaces.append(str(link))
+                    self._ifaces.append(str(link-1))
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link+1}"][0],
+                            dst_mac=tg_if2_adj_addr
+                        )
+                    )
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link}"][0],
+                            dst_mac=tg_if1_adj_addr
+                        )
+                    )
+                else:
+                    self._ifaces.append(str(link-1))
+                    self._ifaces.append(str(link))
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link}"][0],
+                            dst_mac=tg_if1_adj_addr
+                        )
+                    )
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link+1}"][0],
+                            dst_mac=tg_if2_adj_addr
+                        )
+                    )
+
+            TrexConfig.add_startup_configuration(
+                self._node, trex_topology
+            )
             TrafficGenerator.startup_trex(
                 self._node, osi_layer, subtype=subtype
             )
@@ -368,32 +378,41 @@ class TrafficGenerator(AbstractMeasurer):
         if subtype == NodeSubTypeTG.TREX:
             for _ in range(0, 3):
                 # Kill TRex only if it is already running.
-                cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
+                cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
                 exec_cmd_no_error(
-                    tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
+                    tg_node, cmd, sudo=True, message="Kill TRex failed!"
                 )
 
-                # Configure TRex.
-                ports = ''
-                for port in tg_node[u"interfaces"].values():
-                    if u'Mellanox' not in port.get(u'model'):
-                        ports += f" {port.get(u'pci_address')}"
-
-                cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
-                    f"./dpdk_nic_bind.py -u {ports} || true\""
-                exec_cmd_no_error(
-                    tg_node, cmd, sudo=True,
-                    message=u"Unbind PCI ports from driver failed!"
-                )
+                # Prepare interfaces for TRex.
+                tg_port_drv = Constants.TREX_PORT_DRIVER
+                mlx_driver = ""
+                for port in tg_node["interfaces"].values():
+                    if "Mellanox" in port.get("model"):
+                        mlx_driver = port.get("driver")
+                        pci_addr = port.get("pci_address")
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver == mlx_driver:
+                            pass
+                        elif not cur_driver:
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                        else:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                    else:
+                        pci_addr = port.get("pci_address")
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                        DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
 
                 # Start TRex.
                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
-                trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
-                trex_cmd.add(u"-i")
-                trex_cmd.add(u"--prefix $(hostname)")
-                trex_cmd.add(u"--hdrh")
-                trex_cmd.add(u"--no-scapy-server")
-                trex_cmd.add_if(u"--astf", osi_layer == u"L7")
+                trex_cmd = OptionString(["nohup", "./t-rex-64"])
+                trex_cmd.add("-i")
+                trex_cmd.add("--prefix $(hostname)")
+                trex_cmd.add("--hdrh")
+                trex_cmd.add("--no-scapy-server")
+                trex_cmd.add_if("--astf", osi_layer == "L7")
                 # OptionString does not create double space if extra is empty.
                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
@@ -401,33 +420,33 @@ class TrafficGenerator(AbstractMeasurer):
                 try:
                     exec_cmd_no_error(tg_node, cmd, sudo=True)
                 except RuntimeError:
-                    cmd = u"sh -c \"cat /tmp/trex.log\""
+                    cmd = "sh -c \"cat /tmp/trex.log\""
                     exec_cmd_no_error(
                         tg_node, cmd, sudo=True,
-                        message=u"Get TRex logs failed!"
+                        message="Get TRex logs failed!"
                     )
-                    raise RuntimeError(u"Start TRex failed!")
+                    raise RuntimeError("Start TRex failed!")
 
                 # Test T-Rex API responsiveness.
                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
-                if osi_layer in (u"L2", u"L3"):
-                    cmd += u"trex_stl_assert.py"
-                elif osi_layer == u"L7":
-                    cmd += u"trex_astf_assert.py"
+                if osi_layer in ("L2", "L3"):
+                    cmd += "trex_stl_assert.py"
+                elif osi_layer == "L7":
+                    cmd += "trex_astf_assert.py"
                 else:
-                    raise ValueError(u"Unknown OSI layer!")
+                    raise ValueError("Unknown OSI layer!")
                 try:
                     exec_cmd_no_error(
                         tg_node, cmd, sudo=True,
-                        message=u"T-Rex API is not responding!", retries=20
+                        message="T-Rex API is not responding!", retries=20
                     )
                 except RuntimeError:
                     continue
                 return
             # After max retries TRex is still not responding to API critical
             # error occurred.
-            exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
-            raise RuntimeError(u"Start T-Rex failed after multiple retries!")
+            exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
+            raise RuntimeError("Start T-Rex failed after multiple retries!")
 
     @staticmethod
     def is_trex_running(node):
@@ -438,7 +457,7 @@ class TrafficGenerator(AbstractMeasurer):
         :returns: True if T-Rex is running otherwise False.
         :rtype: bool
         """
-        ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
+        ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
         return bool(int(ret) == 0)
 
     @staticmethod
@@ -471,17 +490,17 @@ class TrafficGenerator(AbstractMeasurer):
         :type node: dict
         :raises RuntimeError: If stop traffic script fails.
         """
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_astf_stop.py'")
-        command_line.change_prefix(u"--")
-        for index, value in enumerate(self._xstats):
+        command_line.add("--xstat")
+        for value in self._xstats:
             if value is not None:
-                value = value.replace(u"'", u"\"")
-                command_line.add_equals(f"xstat{index}", f"'{value}'")
+                value = value.replace("'", "\"")
+                command_line.add(f"'{value}'")
         stdout, _ = exec_cmd_no_error(
             node, command_line,
-            message=u"T-Rex ASTF runtime error!"
+            message="T-Rex ASTF runtime error!"
         )
         self._parse_traffic_results(stdout)
 
@@ -495,17 +514,17 @@ class TrafficGenerator(AbstractMeasurer):
         :type node: dict
         :raises RuntimeError: If stop traffic script fails.
         """
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_stl_stop.py'")
-        command_line.change_prefix(u"--")
-        for index, value in enumerate(self._xstats):
+        command_line.add("--xstat")
+        for value in self._xstats:
             if value is not None:
-                value = value.replace(u"'", u"\"")
-                command_line.add_equals(f"xstat{index}", f"'{value}'")
+                value = value.replace("'", "\"")
+                command_line.add(f"'{value}'")
         stdout, _ = exec_cmd_no_error(
             node, command_line,
-            message=u"T-Rex STL runtime error!"
+            message="T-Rex STL runtime error!"
         )
         self._parse_traffic_results(stdout)
 
@@ -513,7 +532,7 @@ class TrafficGenerator(AbstractMeasurer):
         """Stop all traffic on TG.
 
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         :raises ValueError: If TG traffic profile is not supported.
         """
         subtype = check_subtype(self._node)
@@ -533,7 +552,7 @@ class TrafficGenerator(AbstractMeasurer):
         """Compute duration for profile driver.
 
         The final result is influenced by transaction scale and duration limit.
-        It is assumed a higher level function has already set those to self.
+        It is assumed a higher level function has already set those on self.
         The duration argument is the target value from search point of view,
         before the overrides are applied here.
 
@@ -559,7 +578,6 @@ class TrafficGenerator(AbstractMeasurer):
             # so we can compare with what telemetry suggests
             # the real duration was.
             logger.debug(f"Expected duration {computed_duration}")
-            computed_duration += 0.1115
         if not self.duration_limit:
             return computed_duration, True
         limited_duration = min(computed_duration, self.duration_limit)
@@ -611,8 +629,6 @@ class TrafficGenerator(AbstractMeasurer):
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         computed_duration, _ = self._compute_duration(duration, multiplier)
 
         command_line = OptionString().add(u"python3")
@@ -625,6 +641,9 @@ class TrafficGenerator(AbstractMeasurer):
         )
         command_line.add_with_value(u"duration", f"{computed_duration!r}")
         command_line.add_with_value(u"frame_size", self.frame_size)
+        command_line.add_with_value(
+            u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
+        )
         command_line.add_with_value(u"multiplier", multiplier)
         command_line.add_with_value(u"port_0", p_0)
         command_line.add_with_value(u"port_1", p_1)
@@ -634,6 +653,9 @@ class TrafficGenerator(AbstractMeasurer):
         command_line.add_if(u"async_start", async_call)
         command_line.add_if(u"latency", self.use_latency)
         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+        command_line.add_with_value(
+            u"delay", Constants.PERF_TRIAL_ASTF_DELAY
+        )
 
         self._start_time = time.monotonic()
         self._rate = multiplier
@@ -650,7 +672,7 @@ class TrafficGenerator(AbstractMeasurer):
             self._sent = None
             self._loss = None
             self._latency = None
-            xstats = [None, None]
+            xstats = []
             self._l7_data = dict()
             self._l7_data[u"client"] = dict()
             self._l7_data[u"client"][u"active_flows"] = None
@@ -683,10 +705,8 @@ class TrafficGenerator(AbstractMeasurer):
             index = 0
             for line in stdout.splitlines():
                 if f"Xstats snapshot {index}: " in line:
-                    xstats[index] = line[19:]
+                    xstats.append(line[19:])
                     index += 1
-                if index == 2:
-                    break
             self._xstats = tuple(xstats)
         else:
             self._target_duration = duration
@@ -714,40 +734,36 @@ class TrafficGenerator(AbstractMeasurer):
         :raises RuntimeError: In case of T-Rex driver issue.
         """
         self.check_mode(TrexMode.STL)
-        p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
 
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_stl_profile.py'")
-        command_line.change_prefix(u"--")
+        command_line.change_prefix("--")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
         command_line.add_with_value(
-            u"profile", f"'{dirname}/{self.traffic_profile}.py'"
+            "profile", f"'{dirname}/{self.traffic_profile}.py'"
         )
-        command_line.add_with_value(u"duration", f"{duration!r}")
-        command_line.add_with_value(u"frame_size", self.frame_size)
-        command_line.add_with_value(u"rate", f"{rate!r}")
-        command_line.add_with_value(u"port_0", p_0)
-        command_line.add_with_value(u"port_1", p_1)
+        command_line.add_with_value("duration", f"{duration!r}")
+        command_line.add_with_value("frame_size", self.frame_size)
+        command_line.add_with_value("rate", f"{rate!r}")
+        command_line.add_with_value("ports", " ".join(self._ifaces))
         command_line.add_with_value(
-            u"traffic_directions", self.traffic_directions
+            "traffic_directions", self.traffic_directions
         )
-        command_line.add_if(u"async_start", async_call)
-        command_line.add_if(u"latency", self.use_latency)
-        command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+        command_line.add_if("async_start", async_call)
+        command_line.add_if("latency", self.use_latency)
+        command_line.add_if("force", Constants.TREX_SEND_FORCE)
+        command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
 
-        # TODO: This is ugly. Handle parsing better.
         self._start_time = time.monotonic()
-        self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
+        self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
         stdout, _ = exec_cmd_no_error(
             self._node, command_line, timeout=int(duration) + 60,
-            message=u"T-Rex STL runtime error"
+            message="T-Rex STL runtime error"
         )
 
         if async_call:
@@ -759,14 +775,12 @@ class TrafficGenerator(AbstractMeasurer):
             self._loss = None
             self._latency = None
 
-            xstats = [None, None]
+            xstats = []
             index = 0
             for line in stdout.splitlines():
                 if f"Xstats snapshot {index}: " in line:
-                    xstats[index] = line[19:]
+                    xstats.append(line[19:])
                     index += 1
-                if index == 2:
-                    break
             self._xstats = tuple(xstats)
         else:
             self._target_duration = duration
@@ -789,7 +803,7 @@ class TrafficGenerator(AbstractMeasurer):
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
             ramp_up_only=False,
         ):
         """Send traffic from all configured interfaces on TG.
@@ -856,7 +870,7 @@ class TrafficGenerator(AbstractMeasurer):
         :type state_timeout: float
         :type ramp_up_only: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         self.set_rate_provider_defaults(
@@ -903,7 +917,7 @@ class TrafficGenerator(AbstractMeasurer):
         :type rate: float
         :type async_call: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         subtype = check_subtype(self._node)
@@ -914,7 +928,6 @@ class TrafficGenerator(AbstractMeasurer):
                 )
             elif u"trex-stl" in self.traffic_profile:
                 unit_rate_str = str(rate) + u"pps"
-                # TODO: Suport transaction_scale et al?
                 self.trex_stl_start_remote_exec(
                     duration, unit_rate_str, async_call
                 )
@@ -955,15 +968,14 @@ class TrafficGenerator(AbstractMeasurer):
         :type async_call: bool
         :type ramp_up_only: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         complete = False
         if self.ramp_up_rate:
             # Figure out whether we need to insert a ramp-up trial.
-            # TODO: Give up on async_call=True?
-            if self.ramp_up_start is None:
-                # We never ramped up yet.
+            if ramp_up_only or self.ramp_up_start is None:
+                # We never ramped up yet (at least not in this test case).
                 ramp_up_needed = True
             else:
                 # We ramped up before, but maybe it was too long ago.
@@ -1011,7 +1023,7 @@ class TrafficGenerator(AbstractMeasurer):
         trial_end = time.monotonic()
         if self.ramp_up_rate:
             # Optimization: No loss acts as a good ramp-up, if it was complete.
-            if complete and result is not None and result.loss_count == 0:
+            if complete and result is not None and result.loss_ratio == 0.0:
                 logger.debug(u"Good trial acts as a ramp-up")
                 self.ramp_up_start = trial_start
                 self.ramp_up_stop = trial_end
@@ -1033,14 +1045,12 @@ class TrafficGenerator(AbstractMeasurer):
     def fail_if_no_traffic_forwarded(self):
         """Fail if no traffic forwarded.
 
-        TODO: Check number of passed transactions instead.
-
         :returns: nothing
         :raises Exception: If no traffic forwarded.
         """
         if self._received is None:
             raise RuntimeError(u"The traffic generation has not been issued")
-        if self._received == u"0":
+        if self._received == 0:
             raise RuntimeError(u"No traffic forwarded")
 
     def partial_traffic_loss_accepted(
@@ -1185,21 +1195,20 @@ class TrafficGenerator(AbstractMeasurer):
                         int(self._result.get(u"server_tcp_rx_bytes", 0))
 
     def _get_measurement_result(self):
-        """Return the result of last measurement as ReceiveRateMeasurement.
+        """Return the result of last measurement as MeasurementResult.
 
         Separate function, as measurements can end either by time
         or by explicit call, this is the common block at the end.
 
-        The target_tr field of ReceiveRateMeasurement is in
+        The intended_load field of MeasurementResult is in
         transactions per second. Transmit count and loss count units
         depend on the transaction type. Usually they are in transactions
-        per second, or aggregate packets per second.
-
-        TODO: Fail on running or already reported measurement.
+        per second, or aggregated packets per second.
 
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         """
+        duration_with_overheads = time.monotonic() - self._start_time
         try:
             # Client duration seems to include a setup period
             # where TRex does not send any packets yet.
@@ -1226,18 +1235,29 @@ class TrafficGenerator(AbstractMeasurer):
         if not target_duration:
             target_duration = approximated_duration
         transmit_rate = self._rate
+        unsent = 0
         if self.transaction_type == u"packet":
             partial_attempt_count = self._sent
-            expected_attempt_count = self._sent
-            fail_count = self._loss
+            packet_rate = transmit_rate * self.ppta
+            # We have a float. TRex way of rounding it is not obvious.
+            # The biggest source of mismatch is Inter Stream Gap.
+            # So the code tolerates 10 usec of missing packets.
+            expected_attempt_count = (target_duration - 1e-5) * packet_rate
+            expected_attempt_count = math.ceil(expected_attempt_count)
+            # TRex can send more.
+            expected_attempt_count = max(expected_attempt_count, self._sent)
+            unsent = expected_attempt_count - self._sent
+            pass_count = self._received
+            loss_count = self._loss
         elif self.transaction_type == u"udp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_cps.")
             partial_attempt_count = self._l7_data[u"client"][u"sent"]
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
+            unsent = expected_attempt_count - partial_attempt_count
             pass_count = self._l7_data[u"client"][u"received"]
-            fail_count = expected_attempt_count - pass_count
+            loss_count = partial_attempt_count - pass_count
         elif self.transaction_type == u"tcp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
@@ -1245,17 +1265,19 @@ class TrafficGenerator(AbstractMeasurer):
             partial_attempt_count = ctca
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
+            unsent = expected_attempt_count - partial_attempt_count
             # From TCP point of view, server/connects counts full connections,
             # but we are testing NAT session so client/connects counts that
             # (half connections from TCP point of view).
             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
-            fail_count = expected_attempt_count - pass_count
+            loss_count = partial_attempt_count - pass_count
         elif self.transaction_type == u"udp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_pps.")
             partial_attempt_count = self._sent
             expected_attempt_count = self.transaction_scale * self.ppta
-            fail_count = self._loss + (expected_attempt_count - self._sent)
+            unsent = expected_attempt_count - self._sent
+            loss_count = self._loss
         elif self.transaction_type == u"tcp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1268,26 +1290,31 @@ class TrafficGenerator(AbstractMeasurer):
             # A simple workaround is to add absolute difference.
             # Probability of retransmissions exactly cancelling
             # packets unsent due to duration stretching is quite low.
-            fail_count = self._loss + abs(expected_attempt_count - self._sent)
+            unsent = abs(expected_attempt_count - self._sent)
+            loss_count = self._loss
         else:
             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
-        if fail_count < 0 and not self.negative_loss:
-            fail_count = 0
-        measurement = ReceiveRateMeasurement(
-            duration=target_duration,
-            target_tr=transmit_rate,
-            transmit_count=expected_attempt_count,
-            loss_count=fail_count,
-            approximated_duration=approximated_duration,
-            partial_transmit_count=partial_attempt_count,
+        if unsent and isinstance(self._approximated_duration, float):
+            # Do not report unsent for "manual".
+            logger.debug(f"Unsent packets/transactions: {unsent}")
+        if loss_count < 0 and not self.negative_loss:
+            loss_count = 0
+        measurement = MeasurementResult(
+            intended_duration=target_duration,
+            intended_load=transmit_rate,
+            offered_count=partial_attempt_count,
+            loss_count=loss_count,
+            offered_duration=approximated_duration,
+            duration_with_overheads=duration_with_overheads,
+            intended_count=expected_attempt_count,
         )
         measurement.latency = self.get_latency_int()
         return measurement
 
-    def measure(self, duration, transmit_rate):
+    def measure(self, intended_duration, intended_load):
         """Run trial measurement, parse and return results.
 
-        The input rate is for transactions. Stateles bidirectional traffic
+        The intended load is for transactions. Stateles bidirectional traffic
         is understood as sequence of (asynchronous) transactions,
         two packets each.
 
@@ -1295,35 +1322,32 @@ class TrafficGenerator(AbstractMeasurer):
         the count either transactions or packets (aggregated over directions).
 
         Optionally, this method sleeps if measurement finished before
-        the time specified as duration.
+        the time specified as intended_duration (PLRsearch needs time for math).
 
-        :param duration: Trial duration [s].
-        :param transmit_rate: Target rate in transactions per second.
-        :type duration: float
-        :type transmit_rate: float
+        :param intended_duration: Trial duration [s].
+        :param intended_load: Target rate in transactions per second.
+        :type intended_duration: float
+        :type intended_load: float
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         :raises RuntimeError: If TG is not set or if node is not TG
             or if subtype is not specified.
         :raises NotImplementedError: If TG is not supported.
         """
-        duration = float(duration)
+        intended_duration = float(intended_duration)
         time_start = time.monotonic()
-        time_stop = time_start + duration
+        time_stop = time_start + intended_duration
         if self.resetter:
             self.resetter()
         result = self._send_traffic_on_tg_with_ramp_up(
-            duration=duration,
-            rate=transmit_rate,
+            duration=intended_duration,
+            rate=intended_load,
             async_call=False,
         )
         logger.debug(f"trial measurement result: {result!r}")
         # In PLRsearch, computation needs the specified time to complete.
         if self.sleep_till_duration:
-            sleeptime = time_stop - time.monotonic()
-            if sleeptime > 0.0:
-                # TODO: Sometimes we have time to do additional trials here,
-                # adapt PLRsearch to accept all the results.
+            while (sleeptime := time_stop - time.monotonic()) > 0.0:
                 time.sleep(sleeptime)
         return result
 
@@ -1343,7 +1367,7 @@ class TrafficGenerator(AbstractMeasurer):
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
         ):
         """Store values accessed by measure().
 
@@ -1364,7 +1388,6 @@ class TrafficGenerator(AbstractMeasurer):
         :param transaction_type: An identifier specifying which counters
             and formulas to use when computing attempted and failed
             transactions. Default: "packet".
-            TODO: Does this also specify parsing for the measured duration?
         :param duration_limit: Zero or maximum limit for computed (or given)
             duration.
         :param negative_loss: If false, negative loss is reported as zero loss.
@@ -1394,7 +1417,7 @@ class TrafficGenerator(AbstractMeasurer):
         self.frame_size = frame_size
         self.traffic_profile = str(traffic_profile)
         self.resetter = resetter
-        self.ppta = ppta
+        self.ppta = int(ppta)
         self.traffic_directions = int(traffic_directions)
         self.transaction_duration = float(transaction_duration)
         self.transaction_scale = int(transaction_scale)
@@ -1412,59 +1435,59 @@ class OptimizedSearch:
     """Class to be imported as Robot Library, containing search keywords.
 
     Aside of setting up measurer and forwarding arguments,
-    the main business is to translate min/max rate from unidir to aggregate.
+    the main business is to translate min/max rate from unidir to aggregated.
     """
 
     @staticmethod
-    def perform_optimized_ndrpdr_search(
-            frame_size,
-            traffic_profile,
-            minimum_transmit_rate,
-            maximum_transmit_rate,
-            packet_loss_ratio=0.005,
-            final_relative_width=0.005,
-            final_trial_duration=30.0,
-            initial_trial_duration=1.0,
-            number_of_intermediate_phases=2,
-            timeout=720.0,
-            doublings=1,
-            ppta=1,
-            resetter=None,
-            traffic_directions=2,
-            transaction_duration=0.0,
-            transaction_scale=0,
-            transaction_type=u"packet",
-            use_latency=False,
-            ramp_up_rate=None,
-            ramp_up_duration=None,
-            state_timeout=300.0,
-    ):
+    def perform_mlr_search(
+        frame_size: Union[int, str],
+        traffic_profile: str,
+        min_load: float,
+        max_load: float,
+        loss_ratio: float = 0.005,
+        relative_width: float = 0.005,
+        initial_trial_duration: float = 1.0,
+        final_trial_duration: float = 1.0,
+        duration_sum: float = 21.0,
+        expansion_coefficient: int = 2,
+        preceding_targets: int = 2,
+        search_duration_max: float = 1200.0,
+        ppta: int = 1,
+        resetter: Optional[Callable[[], None]] = None,
+        traffic_directions: int = 2,
+        transaction_duration: float = 0.0,
+        transaction_scale: int = 0,
+        transaction_type: str = "packet",
+        use_latency: bool = False,
+        ramp_up_rate: float = 0.0,
+        ramp_up_duration: float = 0.0,
+        state_timeout: float = 240.0,
+    ) -> List[GoalResult]:
         """Setup initialized TG, perform optimized search, return intervals.
 
-        If transaction_scale is nonzero, all non-init trial durations
-        are set to 2.0 (as they do not affect the real trial duration)
+        If transaction_scale is nonzero, all init and non-init trial durations
+        are set to 1.0 (as they do not affect the real trial duration)
         and zero intermediate phases are used.
-        The initial phase still uses 1.0 seconds, to force remeasurement.
-        That makes initial phase act as a warmup.
+        This way no re-measurement happens.
+        Warmup has to be handled via resetter or ramp-up mechanisms.
 
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
-        :param minimum_transmit_rate: Minimal load in transactions per second.
-        :param maximum_transmit_rate: Maximal load in transactions per second.
-        :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
-        :param final_relative_width: Final lower bound transmit rate
+        :param min_load: Minimal load in transactions per second.
+        :param max_load: Maximal load in transactions per second.
+        :param loss_ratio: Ratio of packets lost, for PDR [1].
+        :param relative_width: Final lower bound intended load
             cannot be more distant that this multiple of upper bound [1].
-        :param final_trial_duration: Trial duration for the final phase [s].
         :param initial_trial_duration: Trial duration for the initial phase
             and also for the first intermediate phase [s].
-        :param number_of_intermediate_phases: Number of intermediate phases
+        :param final_trial_duration: Trial duration for the final phase [s].
+        :param duration_sum: Max sum of duration for deciding [s].
+        :param expansion_coefficient: In external search multiply width by this.
+        :param preceding_targets: Number of intermediate phases
             to perform before the final phase [1].
-        :param timeout: The search will fail itself when not finished
-            before this overall time [s].
-        :param doublings: How many doublings to do in external search step.
-            Default 1 is suitable for fairly stable tests,
-            less stable tests might get better overal duration with 2 or more.
+        :param search_duration_max: The search will fail itself
+            when not finished before this overall time [s].
         :param ppta: Packets per transaction, aggregated over directions.
             Needed for udp_pps which does not have a good transaction counter,
             so we need to compute expected number of packets.
@@ -1485,15 +1508,16 @@ class OptimizedSearch:
         :param state_timeout: Time of life of DUT state [s].
         :type frame_size: str or int
         :type traffic_profile: str
-        :type minimum_transmit_rate: float
-        :type maximum_transmit_rate: float
-        :type packet_loss_ratio: float
-        :type final_relative_width: float
-        :type final_trial_duration: float
+        :type min_load: float
+        :type max_load: float
+        :type loss_ratio: float
+        :type relative_width: float
         :type initial_trial_duration: float
-        :type number_of_intermediate_phases: int
-        :type timeout: float
-        :type doublings: int
+        :type final_trial_duration: float
+        :type duration_sum: float
+        :type expansion_coefficient: int
+        :type preceding_targets: int
+        :type search_duration_max: float
         :type ppta: int
         :type resetter: Optional[Callable[[], None]]
         :type traffic_directions: int
@@ -1504,10 +1528,12 @@ class OptimizedSearch:
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
-        :returns: Structure containing narrowed down NDR and PDR intervals
-            and their measurements.
-        :rtype: NdrPdrResult
-        :raises RuntimeError: If total duration is larger than timeout.
+        :returns: Goal result (based on unidirectional tps) for each goal.
+            The result contains both the offered load for stat trial,
+            and the conditional throughput for display.
+        :rtype: List[GoalResult]
+        :raises RuntimeError: If search duration exceeds search_duration_max
+            or if min load becomes an upper bound for any search goal.
         """
         # we need instance of TrafficGenerator instantiated by Robot Framework
         # to be able to use trex_stl-*()
@@ -1515,13 +1541,12 @@ class OptimizedSearch:
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have two call sites, so this saves space,
-        #       even though this is surprising for log readers.
         if transaction_scale:
             initial_trial_duration = 1.0
-            final_trial_duration = 2.0
-            number_of_intermediate_phases = 0
-            timeout += transaction_scale * 3e-4
+            final_trial_duration = 1.0
+            preceding_targets = 1
+            # TODO: Move the value to Constants.py?
+            search_duration_max += transaction_scale * 3e-4
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,
             traffic_profile=traffic_profile,
@@ -1537,28 +1562,43 @@ class OptimizedSearch:
             ramp_up_duration=ramp_up_duration,
             state_timeout=state_timeout,
         )
-        algorithm = MultipleLossRatioSearch(
-            measurer=tg_instance,
-            final_trial_duration=final_trial_duration,
-            final_relative_width=final_relative_width,
-            number_of_intermediate_phases=number_of_intermediate_phases,
-            initial_trial_duration=initial_trial_duration,
-            timeout=timeout,
-            doublings=doublings,
-        )
-        result = algorithm.narrow_down_ndr_and_pdr(
-            min_rate=minimum_transmit_rate,
-            max_rate=maximum_transmit_rate,
-            packet_loss_ratio=packet_loss_ratio,
-        )
-        return result
+        if loss_ratio:
+            loss_ratios = [0.0, loss_ratio]
+            exceed_ratio = 0.5
+        else:
+            # Happens in reconf tests.
+            loss_ratios = [0.0]
+            exceed_ratio = 0.0
+        goals = [
+            SearchGoal(
+                loss_ratio=loss_ratio,
+                exceed_ratio=exceed_ratio,
+                relative_width=relative_width,
+                initial_trial_duration=initial_trial_duration,
+                final_trial_duration=final_trial_duration,
+                duration_sum=duration_sum,
+                preceding_targets=preceding_targets,
+                expansion_coefficient=expansion_coefficient,
+                fail_fast=True,
+            )
+            for loss_ratio in loss_ratios
+        ]
+        config = Config()
+        config.goals = goals
+        config.min_load = min_load
+        config.max_load = max_load
+        config.search_duration_max = search_duration_max
+        config.warmup_duration = 1.0
+        algorithm = MultipleLossRatioSearch(config)
+        results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+        return [results[goal] for goal in goals]
 
     @staticmethod
     def perform_soak_search(
             frame_size,
             traffic_profile,
-            minimum_transmit_rate,
-            maximum_transmit_rate,
+            min_load,
+            max_load,
             plr_target=1e-7,
             tdpt=0.1,
             initial_count=50,
@@ -1573,16 +1613,16 @@ class OptimizedSearch:
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
     ):
         """Setup initialized TG, perform soak search, return avg and stdev.
 
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
-        :param minimum_transmit_rate: Minimal load in transactions per second.
-        :param maximum_transmit_rate: Maximal load in transactions per second.
-        :param plr_target: Fraction of packets lost to achieve [1].
+        :param min_load: Minimal load in transactions per second.
+        :param max_load: Maximal load in transactions per second.
+        :param plr_target: Ratio of packets lost to achieve [1].
         :param tdpt: Trial duration per trial.
             The algorithm linearly increases trial duration with trial number,
             this is the increment between succesive trials, in seconds.
@@ -1615,8 +1655,8 @@ class OptimizedSearch:
         :param state_timeout: Time of life of DUT state [s].
         :type frame_size: str or int
         :type traffic_profile: str
-        :type minimum_transmit_rate: float
-        :type maximum_transmit_rate: float
+        :type min_load: float
+        :type max_load: float
         :type plr_target: float
         :type initial_count: int
         :type timeout: float
@@ -1631,18 +1671,14 @@ class OptimizedSearch:
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
-        :returns: Average and stdev of estimated aggregate rate giving PLR.
+        :returns: Average and stdev of estimated aggregated rate giving PLR.
         :rtype: 2-tuple of float
         """
         tg_instance = BuiltIn().get_library_instance(
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have a single call site
-        #       but MLRsearch has two and we want the two to be used similarly.
         if transaction_scale:
-            # TODO: What is a good value for max scale?
-            # TODO: Scale the timeout with transaction scale.
             timeout = 7200.0
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,
@@ -1669,7 +1705,7 @@ class OptimizedSearch:
             trace_enabled=trace_enabled,
         )
         result = algorithm.search(
-            min_rate=minimum_transmit_rate,
-            max_rate=maximum_transmit_rate,
+            min_rate=min_load,
+            max_rate=max_load,
         )
         return result