Revert "fix(jobspec): Delete ipsec nfv density tests"
[csit.git] / resources / libraries / python / TrafficGenerator.py
index 5d9a056..936cb3a 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 import math
 import time
 
+from typing import Callable, List, Optional, Union
+
 from robot.api import logger
 from robot.libraries.BuiltIn import BuiltIn
 
 from .Constants import Constants
-from .CpuUtils import CpuUtils
 from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+    AbstractMeasurer, Config, GoalResult, MeasurementResult,
+    MultipleLossRatioSearch, SearchGoal,
+)
 from .PLRsearch.PLRsearch import PLRsearch
 from .OptionString import OptionString
 from .ssh import exec_cmd_no_error, exec_cmd
 from .topology import NodeType
 from .topology import NodeSubTypeTG
 from .topology import Topology
+from .TRexConfigGenerator import TrexConfig
+from .DUTSetup import DUTSetup as DS
 
 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
 
@@ -128,22 +132,18 @@ class TrexMode:
     STL = u"STL"
 
 
-# TODO: Pylint says too-many-instance-attributes.
 class TrafficGenerator(AbstractMeasurer):
     """Traffic Generator."""
 
-    # TODO: Remove "trex" from lines which could work with other TGs.
-
     # Use one instance of TrafficGenerator for all tests in test suite
     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
 
     def __init__(self):
-        # TODO: Separate into few dataclasses/dicts.
-        #       Pylint dislikes large unstructured state, and it is right.
         self._node = None
         self._mode = None
         # TG interface order mapping
         self._ifaces_reordered = False
+        self._ifaces = []
         # Result holding fields, to be removed.
         self._result = None
         self._loss = None
@@ -178,8 +178,7 @@ class TrafficGenerator(AbstractMeasurer):
         self.ramp_up_duration = None
         self.state_timeout = None
         # Transient data needed for async measurements.
-        self._xstats = (None, None)
-        # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
+        self._xstats = []
 
     @property
     def node(self):
@@ -280,122 +279,83 @@ class TrafficGenerator(AbstractMeasurer):
             message = u"Get T-Rex version failed!"
             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
             return stdout.strip()
-        else:
-            return "none"
+        return "none"
 
-    # TODO: pylint disable=too-many-locals.
-    def initialize_traffic_generator(
-            self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
-            tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
-            tg_if2_dst_mac=None):
+    def initialize_traffic_generator(self, osi_layer, pfs=2):
         """TG initialization.
 
-        TODO: Document why do we need (and how do we use) _ifaces_reordered.
-
-        :param tg_node: Traffic generator node.
-        :param tg_if1: TG - name of first interface.
-        :param tg_if2: TG - name of second interface.
-        :param tg_if1_adj_node: TG if1 adjecent node.
-        :param tg_if1_adj_if: TG if1 adjecent interface.
-        :param tg_if2_adj_node: TG if2 adjecent node.
-        :param tg_if2_adj_if: TG if2 adjecent interface.
         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
-        :param tg_if1_dst_mac: Interface 1 destination MAC address.
-        :param tg_if2_dst_mac: Interface 2 destination MAC address.
-        :type tg_node: dict
-        :type tg_if1: str
-        :type tg_if2: str
-        :type tg_if1_adj_node: dict
-        :type tg_if1_adj_if: str
-        :type tg_if2_adj_node: dict
-        :type tg_if2_adj_if: str
+        :param pfs: Number of physical interfaces to configure.
         :type osi_layer: str
-        :type tg_if1_dst_mac: str
-        :type tg_if2_dst_mac: str
-        :returns: nothing
-        :raises RuntimeError: In case of issue during initialization.
+        :type pfs: int
+        :raises ValueError: If OSI layer is unknown.
         """
-        subtype = check_subtype(tg_node)
-        if subtype == NodeSubTypeTG.TREX:
-            self._node = tg_node
-            self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
-            if1 = dict()
-            if2 = dict()
-            if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
-            if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
-            if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
-            if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
-
-            if osi_layer == u"L2":
-                if1[u"adj_addr"] = if2[u"addr"]
-                if2[u"adj_addr"] = if1[u"addr"]
-            elif osi_layer in (u"L3", u"L7"):
-                if1[u"adj_addr"] = Topology().get_interface_mac(
-                    tg_if1_adj_node, tg_if1_adj_if
-                )
-                if2[u"adj_addr"] = Topology().get_interface_mac(
-                    tg_if2_adj_node, tg_if2_adj_if
-                )
-            else:
-                raise ValueError(u"Unknown OSI layer!")
-
-            # in case of switched environment we can override MAC addresses
-            if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
-                if1[u"adj_addr"] = tg_if1_dst_mac
-                if2[u"adj_addr"] = tg_if2_dst_mac
-
-            if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
-                if1, if2 = if2, if1
-                self._ifaces_reordered = True
-
-            master_thread_id, latency_thread_id, socket, threads = \
-                CpuUtils.get_affinity_trex(
-                    self._node, tg_if1, tg_if2,
-                    tg_dtc=Constants.TREX_CORE_COUNT)
+        if osi_layer not in ("L2", "L3", "L7"):
+            raise ValueError("Unknown OSI layer!")
 
-            if osi_layer in (u"L2", u"L3", u"L7"):
-                exec_cmd_no_error(
-                    self._node,
-                    f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
-                    f"- version: 2\n"
-                    f"  c: {len(threads)}\n"
-                    f"  limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
-                    f"  interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
-                    f"  port_info:\n"
-                    f"      - dest_mac: \'{if1[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if1[u'addr']}\'\n"
-                    f"      - dest_mac: \'{if2[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if2[u'addr']}\'\n"
-                    f"  platform :\n"
-                    f"      master_thread_id: {master_thread_id}\n"
-                    f"      latency_thread_id: {latency_thread_id}\n"
-                    f"      dual_if:\n"
-                    f"          - socket: {socket}\n"
-                    f"            threads: {threads}\n"
-                    f"EOF'",
-                    sudo=True, message=u"T-Rex config generation!"
-                )
+        topology = BuiltIn().get_variable_value("&{topology_info}")
+        self._node = topology["TG"]
+        subtype = check_subtype(self._node)
 
-                if Constants.TREX_RX_DESCRIPTORS_COUNT != 0:
-                    exec_cmd_no_error(
-                        self._node,
-                        f"sh -c 'cat << EOF >> /etc/trex_cfg.yaml\n"
-                        f"  rx_desc: {Constants.TREX_RX_DESCRIPTORS_COUNT}\n"
-                        f"EOF'",
-                        sudo=True, message=u"T-Rex rx_desc modification!"
+        if subtype == NodeSubTypeTG.TREX:
+            trex_topology = list()
+            self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
+
+            for link in range(1, pfs, 2):
+                tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
+                tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
+                if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+                    ifl = BuiltIn().get_variable_value("${int}")
+                    last = topology["duts_count"]
+                    tg_if1_adj_addr = Topology().get_interface_mac(
+                        topology["DUT1"],
+                        BuiltIn().get_variable_value(
+                            f"${{DUT1_{ifl}{link}}}[0]"
+                        )
+                    )
+                    tg_if2_adj_addr = Topology().get_interface_mac(
+                        topology[f"DUT{last}"],
+                        BuiltIn().get_variable_value(
+                            f"${{DUT{last}_{ifl}{link+1}}}[0]"
+                        )
                     )
 
-                if Constants.TREX_TX_DESCRIPTORS_COUNT != 0:
-                    exec_cmd_no_error(
-                        self._node,
-                        f"sh -c 'cat << EOF >> /etc/trex_cfg.yaml\n"
-                        f"  tx_desc: {Constants.TREX_TX_DESCRIPTORS_COUNT}\n"
-                        f"EOF'",
-                        sudo=True, message=u"T-Rex tx_desc modification!"
+                if1_pci = topology[f"TG_pf{link}_pci"][0]
+                if2_pci = topology[f"TG_pf{link+1}_pci"][0]
+                if min(if1_pci, if2_pci) != if1_pci:
+                    self._ifaces.append(str(link))
+                    self._ifaces.append(str(link-1))
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link+1}"][0],
+                            dst_mac=tg_if2_adj_addr
+                        )
+                    )
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link}"][0],
+                            dst_mac=tg_if1_adj_addr
+                        )
+                    )
+                else:
+                    self._ifaces.append(str(link-1))
+                    self._ifaces.append(str(link))
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link}"][0],
+                            dst_mac=tg_if1_adj_addr
+                        )
+                    )
+                    trex_topology.append(
+                        dict(
+                            interface=topology[f"TG_pf{link+1}"][0],
+                            dst_mac=tg_if2_adj_addr
+                        )
                     )
-            else:
-                raise ValueError(u"Unknown OSI layer!")
 
+            TrexConfig.add_startup_configuration(
+                self._node, trex_topology
+            )
             TrafficGenerator.startup_trex(
                 self._node, osi_layer, subtype=subtype
             )
@@ -418,32 +378,41 @@ class TrafficGenerator(AbstractMeasurer):
         if subtype == NodeSubTypeTG.TREX:
             for _ in range(0, 3):
                 # Kill TRex only if it is already running.
-                cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
+                cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
                 exec_cmd_no_error(
-                    tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
+                    tg_node, cmd, sudo=True, message="Kill TRex failed!"
                 )
 
-                # Configure TRex.
-                ports = ''
-                for port in tg_node[u"interfaces"].values():
-                    if u'Mellanox' not in port.get(u'model'):
-                        ports += f" {port.get(u'pci_address')}"
-
-                cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
-                    f"./dpdk_nic_bind.py -u {ports} || true\""
-                exec_cmd_no_error(
-                    tg_node, cmd, sudo=True,
-                    message=u"Unbind PCI ports from driver failed!"
-                )
+                # Prepare interfaces for TRex.
+                tg_port_drv = Constants.TREX_PORT_DRIVER
+                mlx_driver = ""
+                for port in tg_node["interfaces"].values():
+                    if "Mellanox" in port.get("model"):
+                        mlx_driver = port.get("driver")
+                        pci_addr = port.get("pci_address")
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver == mlx_driver:
+                            pass
+                        elif not cur_driver:
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                        else:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                    else:
+                        pci_addr = port.get("pci_address")
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                        DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
 
                 # Start TRex.
                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
-                trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
-                trex_cmd.add(u"-i")
-                trex_cmd.add(u"--prefix $(hostname)")
-                trex_cmd.add(u"--hdrh")
-                trex_cmd.add(u"--no-scapy-server")
-                trex_cmd.add_if(u"--astf", osi_layer == u"L7")
+                trex_cmd = OptionString(["nohup", "./t-rex-64"])
+                trex_cmd.add("-i")
+                trex_cmd.add("--prefix $(hostname)")
+                trex_cmd.add("--hdrh")
+                trex_cmd.add("--no-scapy-server")
+                trex_cmd.add_if("--astf", osi_layer == "L7")
                 # OptionString does not create double space if extra is empty.
                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
@@ -451,33 +420,33 @@ class TrafficGenerator(AbstractMeasurer):
                 try:
                     exec_cmd_no_error(tg_node, cmd, sudo=True)
                 except RuntimeError:
-                    cmd = u"sh -c \"cat /tmp/trex.log\""
+                    cmd = "sh -c \"cat /tmp/trex.log\""
                     exec_cmd_no_error(
                         tg_node, cmd, sudo=True,
-                        message=u"Get TRex logs failed!"
+                        message="Get TRex logs failed!"
                     )
-                    raise RuntimeError(u"Start TRex failed!")
+                    raise RuntimeError("Start TRex failed!")
 
                 # Test T-Rex API responsiveness.
                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
-                if osi_layer in (u"L2", u"L3"):
-                    cmd += u"trex_stl_assert.py"
-                elif osi_layer == u"L7":
-                    cmd += u"trex_astf_assert.py"
+                if osi_layer in ("L2", "L3"):
+                    cmd += "trex_stl_assert.py"
+                elif osi_layer == "L7":
+                    cmd += "trex_astf_assert.py"
                 else:
-                    raise ValueError(u"Unknown OSI layer!")
+                    raise ValueError("Unknown OSI layer!")
                 try:
                     exec_cmd_no_error(
                         tg_node, cmd, sudo=True,
-                        message=u"T-Rex API is not responding!", retries=20
+                        message="T-Rex API is not responding!", retries=20
                     )
                 except RuntimeError:
                     continue
                 return
             # After max retries TRex is still not responding to API critical
             # error occurred.
-            exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
-            raise RuntimeError(u"Start T-Rex failed after multiple retries!")
+            exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
+            raise RuntimeError("Start T-Rex failed after multiple retries!")
 
     @staticmethod
     def is_trex_running(node):
@@ -488,7 +457,7 @@ class TrafficGenerator(AbstractMeasurer):
         :returns: True if T-Rex is running otherwise False.
         :rtype: bool
         """
-        ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
+        ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
         return bool(int(ret) == 0)
 
     @staticmethod
@@ -521,17 +490,17 @@ class TrafficGenerator(AbstractMeasurer):
         :type node: dict
         :raises RuntimeError: If stop traffic script fails.
         """
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_astf_stop.py'")
-        command_line.change_prefix(u"--")
-        for index, value in enumerate(self._xstats):
+        command_line.add("--xstat")
+        for value in self._xstats:
             if value is not None:
-                value = value.replace(u"'", u"\"")
-                command_line.add_equals(f"xstat{index}", f"'{value}'")
+                value = value.replace("'", "\"")
+                command_line.add(f"'{value}'")
         stdout, _ = exec_cmd_no_error(
             node, command_line,
-            message=u"T-Rex ASTF runtime error!"
+            message="T-Rex ASTF runtime error!"
         )
         self._parse_traffic_results(stdout)
 
@@ -545,17 +514,17 @@ class TrafficGenerator(AbstractMeasurer):
         :type node: dict
         :raises RuntimeError: If stop traffic script fails.
         """
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_stl_stop.py'")
-        command_line.change_prefix(u"--")
-        for index, value in enumerate(self._xstats):
+        command_line.add("--xstat")
+        for value in self._xstats:
             if value is not None:
-                value = value.replace(u"'", u"\"")
-                command_line.add_equals(f"xstat{index}", f"'{value}'")
+                value = value.replace("'", "\"")
+                command_line.add(f"'{value}'")
         stdout, _ = exec_cmd_no_error(
             node, command_line,
-            message=u"T-Rex STL runtime error!"
+            message="T-Rex STL runtime error!"
         )
         self._parse_traffic_results(stdout)
 
@@ -563,7 +532,7 @@ class TrafficGenerator(AbstractMeasurer):
         """Stop all traffic on TG.
 
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         :raises ValueError: If TG traffic profile is not supported.
         """
         subtype = check_subtype(self._node)
@@ -583,7 +552,7 @@ class TrafficGenerator(AbstractMeasurer):
         """Compute duration for profile driver.
 
         The final result is influenced by transaction scale and duration limit.
-        It is assumed a higher level function has already set those to self.
+        It is assumed a higher level function has already set those on self.
         The duration argument is the target value from search point of view,
         before the overrides are applied here.
 
@@ -660,8 +629,6 @@ class TrafficGenerator(AbstractMeasurer):
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         computed_duration, _ = self._compute_duration(duration, multiplier)
 
         command_line = OptionString().add(u"python3")
@@ -705,7 +672,7 @@ class TrafficGenerator(AbstractMeasurer):
             self._sent = None
             self._loss = None
             self._latency = None
-            xstats = [None, None]
+            xstats = []
             self._l7_data = dict()
             self._l7_data[u"client"] = dict()
             self._l7_data[u"client"][u"active_flows"] = None
@@ -738,10 +705,8 @@ class TrafficGenerator(AbstractMeasurer):
             index = 0
             for line in stdout.splitlines():
                 if f"Xstats snapshot {index}: " in line:
-                    xstats[index] = line[19:]
+                    xstats.append(line[19:])
                     index += 1
-                if index == 2:
-                    break
             self._xstats = tuple(xstats)
         else:
             self._target_duration = duration
@@ -769,41 +734,36 @@ class TrafficGenerator(AbstractMeasurer):
         :raises RuntimeError: In case of T-Rex driver issue.
         """
         self.check_mode(TrexMode.STL)
-        p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
 
-        command_line = OptionString().add(u"python3")
+        command_line = OptionString().add("python3")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
         command_line.add(f"'{dirname}/trex_stl_profile.py'")
-        command_line.change_prefix(u"--")
+        command_line.change_prefix("--")
         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
         command_line.add_with_value(
-            u"profile", f"'{dirname}/{self.traffic_profile}.py'"
+            "profile", f"'{dirname}/{self.traffic_profile}.py'"
         )
-        command_line.add_with_value(u"duration", f"{duration!r}")
-        command_line.add_with_value(u"frame_size", self.frame_size)
-        command_line.add_with_value(u"rate", f"{rate!r}")
-        command_line.add_with_value(u"port_0", p_0)
-        command_line.add_with_value(u"port_1", p_1)
+        command_line.add_with_value("duration", f"{duration!r}")
+        command_line.add_with_value("frame_size", self.frame_size)
+        command_line.add_with_value("rate", f"{rate!r}")
+        command_line.add_with_value("ports", " ".join(self._ifaces))
         command_line.add_with_value(
-            u"traffic_directions", self.traffic_directions
+            "traffic_directions", self.traffic_directions
         )
-        command_line.add_if(u"async_start", async_call)
-        command_line.add_if(u"latency", self.use_latency)
-        command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
-        command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
+        command_line.add_if("async_start", async_call)
+        command_line.add_if("latency", self.use_latency)
+        command_line.add_if("force", Constants.TREX_SEND_FORCE)
+        command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
 
-        # TODO: This is ugly. Handle parsing better.
         self._start_time = time.monotonic()
-        self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
+        self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
         stdout, _ = exec_cmd_no_error(
             self._node, command_line, timeout=int(duration) + 60,
-            message=u"T-Rex STL runtime error"
+            message="T-Rex STL runtime error"
         )
 
         if async_call:
@@ -815,14 +775,12 @@ class TrafficGenerator(AbstractMeasurer):
             self._loss = None
             self._latency = None
 
-            xstats = [None, None]
+            xstats = []
             index = 0
             for line in stdout.splitlines():
                 if f"Xstats snapshot {index}: " in line:
-                    xstats[index] = line[19:]
+                    xstats.append(line[19:])
                     index += 1
-                if index == 2:
-                    break
             self._xstats = tuple(xstats)
         else:
             self._target_duration = duration
@@ -912,7 +870,7 @@ class TrafficGenerator(AbstractMeasurer):
         :type state_timeout: float
         :type ramp_up_only: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         self.set_rate_provider_defaults(
@@ -959,7 +917,7 @@ class TrafficGenerator(AbstractMeasurer):
         :type rate: float
         :type async_call: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         subtype = check_subtype(self._node)
@@ -970,7 +928,6 @@ class TrafficGenerator(AbstractMeasurer):
                 )
             elif u"trex-stl" in self.traffic_profile:
                 unit_rate_str = str(rate) + u"pps"
-                # TODO: Suport transaction_scale et al?
                 self.trex_stl_start_remote_exec(
                     duration, unit_rate_str, async_call
                 )
@@ -1011,13 +968,12 @@ class TrafficGenerator(AbstractMeasurer):
         :type async_call: bool
         :type ramp_up_only: bool
         :returns: TG results.
-        :rtype: ReceiveRateMeasurement or None
+        :rtype: MeasurementResult or None
         :raises ValueError: If TG traffic profile is not supported.
         """
         complete = False
         if self.ramp_up_rate:
             # Figure out whether we need to insert a ramp-up trial.
-            # TODO: Give up on async_call=True?
             if ramp_up_only or self.ramp_up_start is None:
                 # We never ramped up yet (at least not in this test case).
                 ramp_up_needed = True
@@ -1067,7 +1023,7 @@ class TrafficGenerator(AbstractMeasurer):
         trial_end = time.monotonic()
         if self.ramp_up_rate:
             # Optimization: No loss acts as a good ramp-up, if it was complete.
-            if complete and result is not None and result.loss_count == 0:
+            if complete and result is not None and result.loss_ratio == 0.0:
                 logger.debug(u"Good trial acts as a ramp-up")
                 self.ramp_up_start = trial_start
                 self.ramp_up_stop = trial_end
@@ -1089,8 +1045,6 @@ class TrafficGenerator(AbstractMeasurer):
     def fail_if_no_traffic_forwarded(self):
         """Fail if no traffic forwarded.
 
-        TODO: Check number of passed transactions instead.
-
         :returns: nothing
         :raises Exception: If no traffic forwarded.
         """
@@ -1241,21 +1195,20 @@ class TrafficGenerator(AbstractMeasurer):
                         int(self._result.get(u"server_tcp_rx_bytes", 0))
 
     def _get_measurement_result(self):
-        """Return the result of last measurement as ReceiveRateMeasurement.
+        """Return the result of last measurement as MeasurementResult.
 
         Separate function, as measurements can end either by time
         or by explicit call, this is the common block at the end.
 
-        The target_tr field of ReceiveRateMeasurement is in
+        The intended_load field of MeasurementResult is in
         transactions per second. Transmit count and loss count units
         depend on the transaction type. Usually they are in transactions
         per second, or aggregated packets per second.
 
-        TODO: Fail on running or already reported measurement.
-
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         """
+        duration_with_overheads = time.monotonic() - self._start_time
         try:
             # Client duration seems to include a setup period
             # where TRex does not send any packets yet.
@@ -1295,7 +1248,7 @@ class TrafficGenerator(AbstractMeasurer):
             expected_attempt_count = max(expected_attempt_count, self._sent)
             unsent = expected_attempt_count - self._sent
             pass_count = self._received
-            fail_count = expected_attempt_count - pass_count
+            loss_count = self._loss
         elif self.transaction_type == u"udp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_cps.")
@@ -1304,7 +1257,7 @@ class TrafficGenerator(AbstractMeasurer):
             expected_attempt_count = self.transaction_scale
             unsent = expected_attempt_count - partial_attempt_count
             pass_count = self._l7_data[u"client"][u"received"]
-            fail_count = expected_attempt_count - pass_count
+            loss_count = partial_attempt_count - pass_count
         elif self.transaction_type == u"tcp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
@@ -1317,14 +1270,14 @@ class TrafficGenerator(AbstractMeasurer):
             # but we are testing NAT session so client/connects counts that
             # (half connections from TCP point of view).
             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
-            fail_count = expected_attempt_count - pass_count
+            loss_count = partial_attempt_count - pass_count
         elif self.transaction_type == u"udp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_pps.")
             partial_attempt_count = self._sent
             expected_attempt_count = self.transaction_scale * self.ppta
             unsent = expected_attempt_count - self._sent
-            fail_count = self._loss + unsent
+            loss_count = self._loss
         elif self.transaction_type == u"tcp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1338,29 +1291,30 @@ class TrafficGenerator(AbstractMeasurer):
             # Probability of retransmissions exactly cancelling
             # packets unsent due to duration stretching is quite low.
             unsent = abs(expected_attempt_count - self._sent)
-            fail_count = self._loss + unsent
+            loss_count = self._loss
         else:
             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
         if unsent and isinstance(self._approximated_duration, float):
             # Do not report unsent for "manual".
             logger.debug(f"Unsent packets/transactions: {unsent}")
-        if fail_count < 0 and not self.negative_loss:
-            fail_count = 0
-        measurement = ReceiveRateMeasurement(
-            duration=target_duration,
-            target_tr=transmit_rate,
-            transmit_count=expected_attempt_count,
-            loss_count=fail_count,
-            approximated_duration=approximated_duration,
-            partial_transmit_count=partial_attempt_count,
+        if loss_count < 0 and not self.negative_loss:
+            loss_count = 0
+        measurement = MeasurementResult(
+            intended_duration=target_duration,
+            intended_load=transmit_rate,
+            offered_count=partial_attempt_count,
+            loss_count=loss_count,
+            offered_duration=approximated_duration,
+            duration_with_overheads=duration_with_overheads,
+            intended_count=expected_attempt_count,
         )
         measurement.latency = self.get_latency_int()
         return measurement
 
-    def measure(self, duration, transmit_rate):
+    def measure(self, intended_duration, intended_load):
         """Run trial measurement, parse and return results.
 
-        The input rate is for transactions. Stateles bidirectional traffic
+        The intended load is for transactions. Stateles bidirectional traffic
         is understood as sequence of (asynchronous) transactions,
         two packets each.
 
@@ -1368,35 +1322,32 @@ class TrafficGenerator(AbstractMeasurer):
         the count either transactions or packets (aggregated over directions).
 
         Optionally, this method sleeps if measurement finished before
-        the time specified as duration.
+        the time specified as intended_duration (PLRsearch needs time for math).
 
-        :param duration: Trial duration [s].
-        :param transmit_rate: Target rate in transactions per second.
-        :type duration: float
-        :type transmit_rate: float
+        :param intended_duration: Trial duration [s].
+        :param intended_load: Target rate in transactions per second.
+        :type intended_duration: float
+        :type intended_load: float
         :returns: Structure containing the result of the measurement.
-        :rtype: ReceiveRateMeasurement
+        :rtype: MeasurementResult
         :raises RuntimeError: If TG is not set or if node is not TG
             or if subtype is not specified.
         :raises NotImplementedError: If TG is not supported.
         """
-        duration = float(duration)
+        intended_duration = float(intended_duration)
         time_start = time.monotonic()
-        time_stop = time_start + duration
+        time_stop = time_start + intended_duration
         if self.resetter:
             self.resetter()
         result = self._send_traffic_on_tg_with_ramp_up(
-            duration=duration,
-            rate=transmit_rate,
+            duration=intended_duration,
+            rate=intended_load,
             async_call=False,
         )
         logger.debug(f"trial measurement result: {result!r}")
         # In PLRsearch, computation needs the specified time to complete.
         if self.sleep_till_duration:
-            sleeptime = time_stop - time.monotonic()
-            if sleeptime > 0.0:
-                # TODO: Sometimes we have time to do additional trials here,
-                # adapt PLRsearch to accept all the results.
+            while (sleeptime := time_stop - time.monotonic()) > 0.0:
                 time.sleep(sleeptime)
         return result
 
@@ -1437,7 +1388,6 @@ class TrafficGenerator(AbstractMeasurer):
         :param transaction_type: An identifier specifying which counters
             and formulas to use when computing attempted and failed
             transactions. Default: "packet".
-            TODO: Does this also specify parsing for the measured duration?
         :param duration_limit: Zero or maximum limit for computed (or given)
             duration.
         :param negative_loss: If false, negative loss is reported as zero loss.
@@ -1467,7 +1417,7 @@ class TrafficGenerator(AbstractMeasurer):
         self.frame_size = frame_size
         self.traffic_profile = str(traffic_profile)
         self.resetter = resetter
-        self.ppta = ppta
+        self.ppta = int(ppta)
         self.traffic_directions = int(traffic_directions)
         self.transaction_duration = float(transaction_duration)
         self.transaction_scale = int(transaction_scale)
@@ -1489,29 +1439,30 @@ class OptimizedSearch:
     """
 
     @staticmethod
-    def perform_optimized_ndrpdr_search(
-            frame_size,
-            traffic_profile,
-            minimum_transmit_rate,
-            maximum_transmit_rate,
-            packet_loss_ratio=0.005,
-            final_relative_width=0.005,
-            final_trial_duration=30.0,
-            initial_trial_duration=1.0,
-            number_of_intermediate_phases=2,
-            timeout=1200.0,
-            ppta=1,
-            resetter=None,
-            traffic_directions=2,
-            transaction_duration=0.0,
-            transaction_scale=0,
-            transaction_type=u"packet",
-            use_latency=False,
-            ramp_up_rate=None,
-            ramp_up_duration=None,
-            state_timeout=240.0,
-            expansion_coefficient=4.0,
-    ):
+    def perform_mlr_search(
+        frame_size: Union[int, str],
+        traffic_profile: str,
+        min_load: float,
+        max_load: float,
+        loss_ratio: float = 0.005,
+        relative_width: float = 0.005,
+        initial_trial_duration: float = 1.0,
+        final_trial_duration: float = 1.0,
+        duration_sum: float = 21.0,
+        expansion_coefficient: int = 2,
+        preceding_targets: int = 2,
+        search_duration_max: float = 1200.0,
+        ppta: int = 1,
+        resetter: Optional[Callable[[], None]] = None,
+        traffic_directions: int = 2,
+        transaction_duration: float = 0.0,
+        transaction_scale: int = 0,
+        transaction_type: str = "packet",
+        use_latency: bool = False,
+        ramp_up_rate: float = 0.0,
+        ramp_up_duration: float = 0.0,
+        state_timeout: float = 240.0,
+    ) -> List[GoalResult]:
         """Setup initialized TG, perform optimized search, return intervals.
 
         If transaction_scale is nonzero, all init and non-init trial durations
@@ -1523,18 +1474,20 @@ class OptimizedSearch:
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
-        :param minimum_transmit_rate: Minimal load in transactions per second.
-        :param maximum_transmit_rate: Maximal load in transactions per second.
-        :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
-        :param final_relative_width: Final lower bound transmit rate
+        :param min_load: Minimal load in transactions per second.
+        :param max_load: Maximal load in transactions per second.
+        :param loss_ratio: Ratio of packets lost, for PDR [1].
+        :param relative_width: Final lower bound intended load
             cannot be more distant that this multiple of upper bound [1].
-        :param final_trial_duration: Trial duration for the final phase [s].
         :param initial_trial_duration: Trial duration for the initial phase
             and also for the first intermediate phase [s].
-        :param number_of_intermediate_phases: Number of intermediate phases
+        :param final_trial_duration: Trial duration for the final phase [s].
+        :param duration_sum: Max sum of duration for deciding [s].
+        :param expansion_coefficient: In external search multiply width by this.
+        :param preceding_targets: Number of intermediate phases
             to perform before the final phase [1].
-        :param timeout: The search will fail itself when not finished
-            before this overall time [s].
+        :param search_duration_max: The search will fail itself
+            when not finished before this overall time [s].
         :param ppta: Packets per transaction, aggregated over directions.
             Needed for udp_pps which does not have a good transaction counter,
             so we need to compute expected number of packets.
@@ -1553,17 +1506,18 @@ class OptimizedSearch:
         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
         :param ramp_up_duration: Duration of ramp-up trials [s].
         :param state_timeout: Time of life of DUT state [s].
-        :param expansion_coefficient: In external search multiply width by this.
         :type frame_size: str or int
         :type traffic_profile: str
-        :type minimum_transmit_rate: float
-        :type maximum_transmit_rate: float
-        :type packet_loss_ratio: float
-        :type final_relative_width: float
-        :type final_trial_duration: float
+        :type min_load: float
+        :type max_load: float
+        :type loss_ratio: float
+        :type relative_width: float
         :type initial_trial_duration: float
-        :type number_of_intermediate_phases: int
-        :type timeout: float
+        :type final_trial_duration: float
+        :type duration_sum: float
+        :type expansion_coefficient: int
+        :type preceding_targets: int
+        :type search_duration_max: float
         :type ppta: int
         :type resetter: Optional[Callable[[], None]]
         :type traffic_directions: int
@@ -1574,11 +1528,12 @@ class OptimizedSearch:
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
-        :type expansion_coefficient: float
-        :returns: Structure containing narrowed down NDR and PDR intervals
-            and their measurements.
-        :rtype: List[Receiverateinterval]
-        :raises RuntimeError: If total duration is larger than timeout.
+        :returns: Goal result (based on unidirectional tps) for each goal.
+            The result contains both the offered load for stat trial,
+            and the conditional throughput for display.
+        :rtype: List[GoalResult]
+        :raises RuntimeError: If search duration exceeds search_duration_max
+            or if min load becomes an upper bound for any search goal.
         """
         # we need instance of TrafficGenerator instantiated by Robot Framework
         # to be able to use trex_stl-*()
@@ -1586,13 +1541,12 @@ class OptimizedSearch:
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have two call sites, so this saves space,
-        #       even though this is surprising for log readers.
         if transaction_scale:
             initial_trial_duration = 1.0
             final_trial_duration = 1.0
-            number_of_intermediate_phases = 0
-            timeout += transaction_scale * 3e-4
+            preceding_targets = 1
+            # TODO: Move the value to Constants.py?
+            search_duration_max += transaction_scale * 3e-4
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,
             traffic_profile=traffic_profile,
@@ -1608,34 +1562,43 @@ class OptimizedSearch:
             ramp_up_duration=ramp_up_duration,
             state_timeout=state_timeout,
         )
-        algorithm = MultipleLossRatioSearch(
-            measurer=tg_instance,
-            final_trial_duration=final_trial_duration,
-            final_relative_width=final_relative_width,
-            number_of_intermediate_phases=number_of_intermediate_phases,
-            initial_trial_duration=initial_trial_duration,
-            timeout=timeout,
-            debug=logger.debug,
-            expansion_coefficient=expansion_coefficient,
-        )
-        if packet_loss_ratio:
-            packet_loss_ratios = [0.0, packet_loss_ratio]
+        if loss_ratio:
+            loss_ratios = [0.0, loss_ratio]
+            exceed_ratio = 0.5
         else:
             # Happens in reconf tests.
-            packet_loss_ratios = [packet_loss_ratio]
-        results = algorithm.narrow_down_intervals(
-            min_rate=minimum_transmit_rate,
-            max_rate=maximum_transmit_rate,
-            packet_loss_ratios=packet_loss_ratios,
-        )
-        return results
+            loss_ratios = [0.0]
+            exceed_ratio = 0.0
+        goals = [
+            SearchGoal(
+                loss_ratio=loss_ratio,
+                exceed_ratio=exceed_ratio,
+                relative_width=relative_width,
+                initial_trial_duration=initial_trial_duration,
+                final_trial_duration=final_trial_duration,
+                duration_sum=duration_sum,
+                preceding_targets=preceding_targets,
+                expansion_coefficient=expansion_coefficient,
+                fail_fast=True,
+            )
+            for loss_ratio in loss_ratios
+        ]
+        config = Config()
+        config.goals = goals
+        config.min_load = min_load
+        config.max_load = max_load
+        config.search_duration_max = search_duration_max
+        config.warmup_duration = 1.0
+        algorithm = MultipleLossRatioSearch(config)
+        results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+        return [results[goal] for goal in goals]
 
     @staticmethod
     def perform_soak_search(
             frame_size,
             traffic_profile,
-            minimum_transmit_rate,
-            maximum_transmit_rate,
+            min_load,
+            max_load,
             plr_target=1e-7,
             tdpt=0.1,
             initial_count=50,
@@ -1657,8 +1620,8 @@ class OptimizedSearch:
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
-        :param minimum_transmit_rate: Minimal load in transactions per second.
-        :param maximum_transmit_rate: Maximal load in transactions per second.
+        :param min_load: Minimal load in transactions per second.
+        :param max_load: Maximal load in transactions per second.
         :param plr_target: Ratio of packets lost to achieve [1].
         :param tdpt: Trial duration per trial.
             The algorithm linearly increases trial duration with trial number,
@@ -1692,8 +1655,8 @@ class OptimizedSearch:
         :param state_timeout: Time of life of DUT state [s].
         :type frame_size: str or int
         :type traffic_profile: str
-        :type minimum_transmit_rate: float
-        :type maximum_transmit_rate: float
+        :type min_load: float
+        :type max_load: float
         :type plr_target: float
         :type initial_count: int
         :type timeout: float
@@ -1715,11 +1678,7 @@ class OptimizedSearch:
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have a single call site
-        #       but MLRsearch has two and we want the two to be used similarly.
         if transaction_scale:
-            # TODO: What is a good value for max scale?
-            # TODO: Scale the timeout with transaction scale.
             timeout = 7200.0
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,
@@ -1746,7 +1705,7 @@ class OptimizedSearch:
             trace_enabled=trace_enabled,
         )
         result = algorithm.search(
-            min_rate=minimum_transmit_rate,
-            max_rate=maximum_transmit_rate,
+            min_rate=min_load,
+            max_rate=max_load,
         )
         return result