fix(TrafficGenerator): correct tg_topology reverse
[csit.git] / resources / libraries / python / TrafficGenerator.py
index ce01aa6..afb9388 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """Performance testing traffic generator library."""
 
 
 """Performance testing traffic generator library."""
 
+import math
 import time
 
 from robot.api import logger
 from robot.libraries.BuiltIn import BuiltIn
 
 from .Constants import Constants
 import time
 
 from robot.api import logger
 from robot.libraries.BuiltIn import BuiltIn
 
 from .Constants import Constants
-from .CpuUtils import CpuUtils
 from .DropRateSearch import DropRateSearch
 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
 from .DropRateSearch import DropRateSearch
 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
@@ -30,6 +30,8 @@ from .ssh import exec_cmd_no_error, exec_cmd
 from .topology import NodeType
 from .topology import NodeSubTypeTG
 from .topology import Topology
 from .topology import NodeType
 from .topology import NodeSubTypeTG
 from .topology import Topology
+from .TRexConfigGenerator import TrexInitConfig
+from .DUTSetup import DUTSetup as DS
 
 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
 
 
 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
 
@@ -127,18 +129,13 @@ class TrexMode:
     STL = u"STL"
 
 
     STL = u"STL"
 
 
-# TODO: Pylint says too-many-instance-attributes.
 class TrafficGenerator(AbstractMeasurer):
     """Traffic Generator."""
 
 class TrafficGenerator(AbstractMeasurer):
     """Traffic Generator."""
 
-    # TODO: Remove "trex" from lines which could work with other TGs.
-
     # Use one instance of TrafficGenerator for all tests in test suite
     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
 
     def __init__(self):
     # Use one instance of TrafficGenerator for all tests in test suite
     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
 
     def __init__(self):
-        # TODO: Separate into few dataclasses/dicts.
-        #       Pylint dislikes large unstructured state, and it is right.
         self._node = None
         self._mode = None
         # TG interface order mapping
         self._node = None
         self._mode = None
         # TG interface order mapping
@@ -178,7 +175,6 @@ class TrafficGenerator(AbstractMeasurer):
         self.state_timeout = None
         # Transient data needed for async measurements.
         self._xstats = (None, None)
         self.state_timeout = None
         # Transient data needed for async measurements.
         self._xstats = (None, None)
-        # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
 
     @property
     def node(self):
 
     @property
     def node(self):
@@ -251,15 +247,43 @@ class TrafficGenerator(AbstractMeasurer):
             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
         )
 
             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
         )
 
-    # TODO: pylint says disable=too-many-locals.
+    @staticmethod
+    def get_tg_type(tg_node):
+        """Log and return the installed traffic generator type.
+
+        :param tg_node: Node from topology file.
+        :type tg_node: dict
+        :returns: Traffic generator type string.
+        :rtype: str
+        :raises RuntimeError: If command returns nonzero return code.
+        """
+        return str(check_subtype(tg_node))
+
+    @staticmethod
+    def get_tg_version(tg_node):
+        """Log and return the installed traffic generator version.
+
+        :param tg_node: Node from topology file.
+        :type tg_node: dict
+        :returns: Traffic generator version string.
+        :rtype: str
+        :raises RuntimeError: If command returns nonzero return code.
+        """
+        subtype = check_subtype(tg_node)
+        if subtype == NodeSubTypeTG.TREX:
+            command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
+            message = u"Get T-Rex version failed!"
+            stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
+            return stdout.strip()
+        else:
+            return "none"
+
     def initialize_traffic_generator(
             self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
             tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
             tg_if2_dst_mac=None):
         """TG initialization.
 
     def initialize_traffic_generator(
             self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
             tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
             tg_if2_dst_mac=None):
         """TG initialization.
 
-        TODO: Document why do we need (and how do we use) _ifaces_reordered.
-
         :param tg_node: Traffic generator node.
         :param tg_if1: TG - name of first interface.
         :param tg_if2: TG - name of second interface.
         :param tg_node: Traffic generator node.
         :param tg_if1: TG - name of first interface.
         :param tg_if2: TG - name of second interface.
@@ -286,69 +310,32 @@ class TrafficGenerator(AbstractMeasurer):
         subtype = check_subtype(tg_node)
         if subtype == NodeSubTypeTG.TREX:
             self._node = tg_node
         subtype = check_subtype(tg_node)
         if subtype == NodeSubTypeTG.TREX:
             self._node = tg_node
-            self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
-            if1 = dict()
-            if2 = dict()
-            if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
-            if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
-            if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
-            if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
-
-            if osi_layer == u"L2":
-                if1[u"adj_addr"] = if2[u"addr"]
-                if2[u"adj_addr"] = if1[u"addr"]
-            elif osi_layer in (u"L3", u"L7"):
-                if1[u"adj_addr"] = Topology().get_interface_mac(
+            self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
+
+            if osi_layer == "L2":
+                tg_if1_adj_addr = Topology().get_interface_mac(tg_node, tg_if2)
+                tg_if2_adj_addr = Topology().get_interface_mac(tg_node, tg_if1)
+            elif osi_layer in ("L3", "L7"):
+                tg_if1_adj_addr = Topology().get_interface_mac(
                     tg_if1_adj_node, tg_if1_adj_if
                 )
                     tg_if1_adj_node, tg_if1_adj_if
                 )
-                if2[u"adj_addr"] = Topology().get_interface_mac(
+                tg_if2_adj_addr = Topology().get_interface_mac(
                     tg_if2_adj_node, tg_if2_adj_if
                 )
             else:
                     tg_if2_adj_node, tg_if2_adj_if
                 )
             else:
-                raise ValueError(u"Unknown OSI layer!")
-
-            # in case of switched environment we can override MAC addresses
-            if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
-                if1[u"adj_addr"] = tg_if1_dst_mac
-                if2[u"adj_addr"] = tg_if2_dst_mac
-
-            if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
-                if1, if2 = if2, if1
+                raise ValueError("Unknown OSI layer!")
+
+            tg_topology = list()
+            tg_topology.append(dict(interface=tg_if1, dst_mac=tg_if1_adj_addr))
+            tg_topology.append(dict(interface=tg_if2, dst_mac=tg_if2_adj_addr))
+            if1_pci = Topology().get_interface_pci_addr(self._node, tg_if1)
+            if2_pci = Topology().get_interface_pci_addr(self._node, tg_if2)
+            if min(if1_pci, if2_pci) != if1_pci:
                 self._ifaces_reordered = True
                 self._ifaces_reordered = True
+                tg_topology.reverse()
 
 
-            master_thread_id, latency_thread_id, socket, threads = \
-                CpuUtils.get_affinity_trex(
-                    self._node, tg_if1, tg_if2,
-                    tg_dtc=Constants.TREX_CORE_COUNT)
-
-            if osi_layer in (u"L2", u"L3", u"L7"):
-                exec_cmd_no_error(
-                    self._node,
-                    f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
-                    f"- version: 2\n"
-                    f"  c: {len(threads)}\n"
-                    f"  limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
-                    f"  interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
-                    f"  port_info:\n"
-                    f"      - dest_mac: \'{if1[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if1[u'addr']}\'\n"
-                    f"      - dest_mac: \'{if2[u'adj_addr']}\'\n"
-                    f"        src_mac: \'{if2[u'addr']}\'\n"
-                    f"  platform :\n"
-                    f"      master_thread_id: {master_thread_id}\n"
-                    f"      latency_thread_id: {latency_thread_id}\n"
-                    f"      dual_if:\n"
-                    f"          - socket: {socket}\n"
-                    f"            threads: {threads}\n"
-                    f"EOF'",
-                    sudo=True, message=u"T-Rex config generation!"
-                )
-            else:
-                raise ValueError(u"Unknown OSI layer!")
-
-            TrafficGenerator.startup_trex(
-                self._node, osi_layer, subtype=subtype
-            )
+            TrexInitConfig.init_trex_startup_configuration(tg_node, tg_topology)
+            TrafficGenerator.startup_trex(tg_node, osi_layer, subtype=subtype)
 
     @staticmethod
     def startup_trex(tg_node, osi_layer, subtype=None):
 
     @staticmethod
     def startup_trex(tg_node, osi_layer, subtype=None):
@@ -373,18 +360,27 @@ class TrafficGenerator(AbstractMeasurer):
                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
                 )
 
                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
                 )
 
-                # Configure TRex.
-                ports = ''
+                # Prepare interfaces for TRex.
+                tg_port_drv = Constants.TREX_PORT_DRIVER
+                mlx_driver = u""
                 for port in tg_node[u"interfaces"].values():
                 for port in tg_node[u"interfaces"].values():
-                    if u'Mellanox' not in port.get(u'model'):
-                        ports += f" {port.get(u'pci_address')}"
-
-                cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
-                    f"./dpdk_nic_bind.py -u {ports} || true\""
-                exec_cmd_no_error(
-                    tg_node, cmd, sudo=True,
-                    message=u"Unbind PCI ports from driver failed!"
-                )
+                    if u"Mellanox" in port.get(u"model"):
+                        mlx_driver = port.get(u"driver")
+                        pci_addr = port.get(u'pci_address')
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver == mlx_driver:
+                            pass
+                        elif not cur_driver:
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                        else:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                            DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+                    else:
+                        pci_addr = port.get(u'pci_address')
+                        cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+                        if cur_driver:
+                            DS.pci_driver_unbind(tg_node, pci_addr)
+                        DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
 
                 # Start TRex.
                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
 
                 # Start TRex.
                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
@@ -559,7 +555,6 @@ class TrafficGenerator(AbstractMeasurer):
             # so we can compare with what telemetry suggests
             # the real duration was.
             logger.debug(f"Expected duration {computed_duration}")
             # so we can compare with what telemetry suggests
             # the real duration was.
             logger.debug(f"Expected duration {computed_duration}")
-            computed_duration += 0.1115
         if not self.duration_limit:
             return computed_duration, True
         limited_duration = min(computed_duration, self.duration_limit)
         if not self.duration_limit:
             return computed_duration, True
         limited_duration = min(computed_duration, self.duration_limit)
@@ -611,8 +606,6 @@ class TrafficGenerator(AbstractMeasurer):
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         computed_duration, _ = self._compute_duration(duration, multiplier)
 
         command_line = OptionString().add(u"python3")
         computed_duration, _ = self._compute_duration(duration, multiplier)
 
         command_line = OptionString().add(u"python3")
@@ -625,6 +618,9 @@ class TrafficGenerator(AbstractMeasurer):
         )
         command_line.add_with_value(u"duration", f"{computed_duration!r}")
         command_line.add_with_value(u"frame_size", self.frame_size)
         )
         command_line.add_with_value(u"duration", f"{computed_duration!r}")
         command_line.add_with_value(u"frame_size", self.frame_size)
+        command_line.add_with_value(
+            u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
+        )
         command_line.add_with_value(u"multiplier", multiplier)
         command_line.add_with_value(u"port_0", p_0)
         command_line.add_with_value(u"port_1", p_1)
         command_line.add_with_value(u"multiplier", multiplier)
         command_line.add_with_value(u"port_0", p_0)
         command_line.add_with_value(u"port_1", p_1)
@@ -634,6 +630,9 @@ class TrafficGenerator(AbstractMeasurer):
         command_line.add_if(u"async_start", async_call)
         command_line.add_if(u"latency", self.use_latency)
         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
         command_line.add_if(u"async_start", async_call)
         command_line.add_if(u"latency", self.use_latency)
         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+        command_line.add_with_value(
+            u"delay", Constants.PERF_TRIAL_ASTF_DELAY
+        )
 
         self._start_time = time.monotonic()
         self._rate = multiplier
 
         self._start_time = time.monotonic()
         self._rate = multiplier
@@ -718,8 +717,6 @@ class TrafficGenerator(AbstractMeasurer):
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
         if not isinstance(duration, (float, int)):
             duration = float(duration)
 
-        # TODO: Refactor the code so duration is computed only once,
-        # and both the initial and the computed durations are logged.
         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
 
         command_line = OptionString().add(u"python3")
         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
 
         command_line = OptionString().add(u"python3")
@@ -741,8 +738,8 @@ class TrafficGenerator(AbstractMeasurer):
         command_line.add_if(u"async_start", async_call)
         command_line.add_if(u"latency", self.use_latency)
         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
         command_line.add_if(u"async_start", async_call)
         command_line.add_if(u"latency", self.use_latency)
         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+        command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
 
 
-        # TODO: This is ugly. Handle parsing better.
         self._start_time = time.monotonic()
         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
         stdout, _ = exec_cmd_no_error(
         self._start_time = time.monotonic()
         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
         stdout, _ = exec_cmd_no_error(
@@ -789,7 +786,7 @@ class TrafficGenerator(AbstractMeasurer):
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
             ramp_up_only=False,
         ):
         """Send traffic from all configured interfaces on TG.
             ramp_up_only=False,
         ):
         """Send traffic from all configured interfaces on TG.
@@ -914,7 +911,6 @@ class TrafficGenerator(AbstractMeasurer):
                 )
             elif u"trex-stl" in self.traffic_profile:
                 unit_rate_str = str(rate) + u"pps"
                 )
             elif u"trex-stl" in self.traffic_profile:
                 unit_rate_str = str(rate) + u"pps"
-                # TODO: Suport transaction_scale et al?
                 self.trex_stl_start_remote_exec(
                     duration, unit_rate_str, async_call
                 )
                 self.trex_stl_start_remote_exec(
                     duration, unit_rate_str, async_call
                 )
@@ -961,7 +957,6 @@ class TrafficGenerator(AbstractMeasurer):
         complete = False
         if self.ramp_up_rate:
             # Figure out whether we need to insert a ramp-up trial.
         complete = False
         if self.ramp_up_rate:
             # Figure out whether we need to insert a ramp-up trial.
-            # TODO: Give up on async_call=True?
             if ramp_up_only or self.ramp_up_start is None:
                 # We never ramped up yet (at least not in this test case).
                 ramp_up_needed = True
             if ramp_up_only or self.ramp_up_start is None:
                 # We never ramped up yet (at least not in this test case).
                 ramp_up_needed = True
@@ -1033,14 +1028,12 @@ class TrafficGenerator(AbstractMeasurer):
     def fail_if_no_traffic_forwarded(self):
         """Fail if no traffic forwarded.
 
     def fail_if_no_traffic_forwarded(self):
         """Fail if no traffic forwarded.
 
-        TODO: Check number of passed transactions instead.
-
         :returns: nothing
         :raises Exception: If no traffic forwarded.
         """
         if self._received is None:
             raise RuntimeError(u"The traffic generation has not been issued")
         :returns: nothing
         :raises Exception: If no traffic forwarded.
         """
         if self._received is None:
             raise RuntimeError(u"The traffic generation has not been issued")
-        if self._received == u"0":
+        if self._received == 0:
             raise RuntimeError(u"No traffic forwarded")
 
     def partial_traffic_loss_accepted(
             raise RuntimeError(u"No traffic forwarded")
 
     def partial_traffic_loss_accepted(
@@ -1193,9 +1186,7 @@ class TrafficGenerator(AbstractMeasurer):
         The target_tr field of ReceiveRateMeasurement is in
         transactions per second. Transmit count and loss count units
         depend on the transaction type. Usually they are in transactions
         The target_tr field of ReceiveRateMeasurement is in
         transactions per second. Transmit count and loss count units
         depend on the transaction type. Usually they are in transactions
-        per second, or aggregate packets per second.
-
-        TODO: Fail on running or already reported measurement.
+        per second, or aggregated packets per second.
 
         :returns: Structure containing the result of the measurement.
         :rtype: ReceiveRateMeasurement
 
         :returns: Structure containing the result of the measurement.
         :rtype: ReceiveRateMeasurement
@@ -1226,16 +1217,27 @@ class TrafficGenerator(AbstractMeasurer):
         if not target_duration:
             target_duration = approximated_duration
         transmit_rate = self._rate
         if not target_duration:
             target_duration = approximated_duration
         transmit_rate = self._rate
+        unsent = 0
         if self.transaction_type == u"packet":
             partial_attempt_count = self._sent
         if self.transaction_type == u"packet":
             partial_attempt_count = self._sent
-            expected_attempt_count = self._sent
-            fail_count = self._loss
+            packet_rate = transmit_rate * self.ppta
+            # We have a float. TRex way of rounding it is not obvious.
+            # The biggest source of mismatch is Inter Stream Gap.
+            # So the code tolerates 10 usec of missing packets.
+            expected_attempt_count = (target_duration - 1e-5) * packet_rate
+            expected_attempt_count = math.ceil(expected_attempt_count)
+            # TRex can send more.
+            expected_attempt_count = max(expected_attempt_count, self._sent)
+            unsent = expected_attempt_count - self._sent
+            pass_count = self._received
+            fail_count = expected_attempt_count - pass_count
         elif self.transaction_type == u"udp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_cps.")
             partial_attempt_count = self._l7_data[u"client"][u"sent"]
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
         elif self.transaction_type == u"udp_cps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit udp_cps.")
             partial_attempt_count = self._l7_data[u"client"][u"sent"]
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
+            unsent = expected_attempt_count - partial_attempt_count
             pass_count = self._l7_data[u"client"][u"received"]
             fail_count = expected_attempt_count - pass_count
         elif self.transaction_type == u"tcp_cps":
             pass_count = self._l7_data[u"client"][u"received"]
             fail_count = expected_attempt_count - pass_count
         elif self.transaction_type == u"tcp_cps":
@@ -1245,6 +1247,7 @@ class TrafficGenerator(AbstractMeasurer):
             partial_attempt_count = ctca
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
             partial_attempt_count = ctca
             # We do not care whether TG is slow, it should have attempted all.
             expected_attempt_count = self.transaction_scale
+            unsent = expected_attempt_count - partial_attempt_count
             # From TCP point of view, server/connects counts full connections,
             # but we are testing NAT session so client/connects counts that
             # (half connections from TCP point of view).
             # From TCP point of view, server/connects counts full connections,
             # but we are testing NAT session so client/connects counts that
             # (half connections from TCP point of view).
@@ -1255,7 +1258,8 @@ class TrafficGenerator(AbstractMeasurer):
                 raise RuntimeError(u"Add support for no-limit udp_pps.")
             partial_attempt_count = self._sent
             expected_attempt_count = self.transaction_scale * self.ppta
                 raise RuntimeError(u"Add support for no-limit udp_pps.")
             partial_attempt_count = self._sent
             expected_attempt_count = self.transaction_scale * self.ppta
-            fail_count = self._loss + (expected_attempt_count - self._sent)
+            unsent = expected_attempt_count - self._sent
+            fail_count = self._loss + unsent
         elif self.transaction_type == u"tcp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
         elif self.transaction_type == u"tcp_pps":
             if not self.transaction_scale:
                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1268,9 +1272,13 @@ class TrafficGenerator(AbstractMeasurer):
             # A simple workaround is to add absolute difference.
             # Probability of retransmissions exactly cancelling
             # packets unsent due to duration stretching is quite low.
             # A simple workaround is to add absolute difference.
             # Probability of retransmissions exactly cancelling
             # packets unsent due to duration stretching is quite low.
-            fail_count = self._loss + abs(expected_attempt_count - self._sent)
+            unsent = abs(expected_attempt_count - self._sent)
+            fail_count = self._loss + unsent
         else:
             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
         else:
             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
+        if unsent and isinstance(self._approximated_duration, float):
+            # Do not report unsent for "manual".
+            logger.debug(f"Unsent packets/transactions: {unsent}")
         if fail_count < 0 and not self.negative_loss:
             fail_count = 0
         measurement = ReceiveRateMeasurement(
         if fail_count < 0 and not self.negative_loss:
             fail_count = 0
         measurement = ReceiveRateMeasurement(
@@ -1322,8 +1330,6 @@ class TrafficGenerator(AbstractMeasurer):
         if self.sleep_till_duration:
             sleeptime = time_stop - time.monotonic()
             if sleeptime > 0.0:
         if self.sleep_till_duration:
             sleeptime = time_stop - time.monotonic()
             if sleeptime > 0.0:
-                # TODO: Sometimes we have time to do additional trials here,
-                # adapt PLRsearch to accept all the results.
                 time.sleep(sleeptime)
         return result
 
                 time.sleep(sleeptime)
         return result
 
@@ -1343,7 +1349,7 @@ class TrafficGenerator(AbstractMeasurer):
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
         ):
         """Store values accessed by measure().
 
         ):
         """Store values accessed by measure().
 
@@ -1364,7 +1370,6 @@ class TrafficGenerator(AbstractMeasurer):
         :param transaction_type: An identifier specifying which counters
             and formulas to use when computing attempted and failed
             transactions. Default: "packet".
         :param transaction_type: An identifier specifying which counters
             and formulas to use when computing attempted and failed
             transactions. Default: "packet".
-            TODO: Does this also specify parsing for the measured duration?
         :param duration_limit: Zero or maximum limit for computed (or given)
             duration.
         :param negative_loss: If false, negative loss is reported as zero loss.
         :param duration_limit: Zero or maximum limit for computed (or given)
             duration.
         :param negative_loss: If false, negative loss is reported as zero loss.
@@ -1412,7 +1417,7 @@ class OptimizedSearch:
     """Class to be imported as Robot Library, containing search keywords.
 
     Aside of setting up measurer and forwarding arguments,
     """Class to be imported as Robot Library, containing search keywords.
 
     Aside of setting up measurer and forwarding arguments,
-    the main business is to translate min/max rate from unidir to aggregate.
+    the main business is to translate min/max rate from unidir to aggregated.
     """
 
     @staticmethod
     """
 
     @staticmethod
@@ -1426,8 +1431,7 @@ class OptimizedSearch:
             final_trial_duration=30.0,
             initial_trial_duration=1.0,
             number_of_intermediate_phases=2,
             final_trial_duration=30.0,
             initial_trial_duration=1.0,
             number_of_intermediate_phases=2,
-            timeout=720.0,
-            doublings=1,
+            timeout=1200.0,
             ppta=1,
             resetter=None,
             traffic_directions=2,
             ppta=1,
             resetter=None,
             traffic_directions=2,
@@ -1437,22 +1441,23 @@ class OptimizedSearch:
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
+            expansion_coefficient=4.0,
     ):
         """Setup initialized TG, perform optimized search, return intervals.
 
     ):
         """Setup initialized TG, perform optimized search, return intervals.
 
-        If transaction_scale is nonzero, all non-init trial durations
-        are set to 2.0 (as they do not affect the real trial duration)
+        If transaction_scale is nonzero, all init and non-init trial durations
+        are set to 1.0 (as they do not affect the real trial duration)
         and zero intermediate phases are used.
         and zero intermediate phases are used.
-        The initial phase still uses 1.0 seconds, to force remeasurement.
-        That makes initial phase act as a warmup.
+        This way no re-measurement happens.
+        Warmup has to be handled via resetter or ramp-up mechanisms.
 
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
         :param minimum_transmit_rate: Minimal load in transactions per second.
         :param maximum_transmit_rate: Maximal load in transactions per second.
 
         :param frame_size: Frame size identifier or value [B].
         :param traffic_profile: Module name as a traffic profile identifier.
             See GPL/traffic_profiles/trex for implemented modules.
         :param minimum_transmit_rate: Minimal load in transactions per second.
         :param maximum_transmit_rate: Maximal load in transactions per second.
-        :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
+        :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
         :param final_relative_width: Final lower bound transmit rate
             cannot be more distant that this multiple of upper bound [1].
         :param final_trial_duration: Trial duration for the final phase [s].
         :param final_relative_width: Final lower bound transmit rate
             cannot be more distant that this multiple of upper bound [1].
         :param final_trial_duration: Trial duration for the final phase [s].
@@ -1462,9 +1467,6 @@ class OptimizedSearch:
             to perform before the final phase [1].
         :param timeout: The search will fail itself when not finished
             before this overall time [s].
             to perform before the final phase [1].
         :param timeout: The search will fail itself when not finished
             before this overall time [s].
-        :param doublings: How many doublings to do in external search step.
-            Default 1 is suitable for fairly stable tests,
-            less stable tests might get better overal duration with 2 or more.
         :param ppta: Packets per transaction, aggregated over directions.
             Needed for udp_pps which does not have a good transaction counter,
             so we need to compute expected number of packets.
         :param ppta: Packets per transaction, aggregated over directions.
             Needed for udp_pps which does not have a good transaction counter,
             so we need to compute expected number of packets.
@@ -1483,6 +1485,7 @@ class OptimizedSearch:
         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
         :param ramp_up_duration: Duration of ramp-up trials [s].
         :param state_timeout: Time of life of DUT state [s].
         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
         :param ramp_up_duration: Duration of ramp-up trials [s].
         :param state_timeout: Time of life of DUT state [s].
+        :param expansion_coefficient: In external search multiply width by this.
         :type frame_size: str or int
         :type traffic_profile: str
         :type minimum_transmit_rate: float
         :type frame_size: str or int
         :type traffic_profile: str
         :type minimum_transmit_rate: float
@@ -1493,7 +1496,6 @@ class OptimizedSearch:
         :type initial_trial_duration: float
         :type number_of_intermediate_phases: int
         :type timeout: float
         :type initial_trial_duration: float
         :type number_of_intermediate_phases: int
         :type timeout: float
-        :type doublings: int
         :type ppta: int
         :type resetter: Optional[Callable[[], None]]
         :type traffic_directions: int
         :type ppta: int
         :type resetter: Optional[Callable[[], None]]
         :type traffic_directions: int
@@ -1504,9 +1506,10 @@ class OptimizedSearch:
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
+        :type expansion_coefficient: float
         :returns: Structure containing narrowed down NDR and PDR intervals
             and their measurements.
         :returns: Structure containing narrowed down NDR and PDR intervals
             and their measurements.
-        :rtype: NdrPdrResult
+        :rtype: List[Receiverateinterval]
         :raises RuntimeError: If total duration is larger than timeout.
         """
         # we need instance of TrafficGenerator instantiated by Robot Framework
         :raises RuntimeError: If total duration is larger than timeout.
         """
         # we need instance of TrafficGenerator instantiated by Robot Framework
@@ -1515,11 +1518,9 @@ class OptimizedSearch:
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have two call sites, so this saves space,
-        #       even though this is surprising for log readers.
         if transaction_scale:
             initial_trial_duration = 1.0
         if transaction_scale:
             initial_trial_duration = 1.0
-            final_trial_duration = 2.0
+            final_trial_duration = 1.0
             number_of_intermediate_phases = 0
             timeout += transaction_scale * 3e-4
         tg_instance.set_rate_provider_defaults(
             number_of_intermediate_phases = 0
             timeout += transaction_scale * 3e-4
         tg_instance.set_rate_provider_defaults(
@@ -1544,14 +1545,20 @@ class OptimizedSearch:
             number_of_intermediate_phases=number_of_intermediate_phases,
             initial_trial_duration=initial_trial_duration,
             timeout=timeout,
             number_of_intermediate_phases=number_of_intermediate_phases,
             initial_trial_duration=initial_trial_duration,
             timeout=timeout,
-            doublings=doublings,
+            debug=logger.debug,
+            expansion_coefficient=expansion_coefficient,
         )
         )
-        result = algorithm.narrow_down_ndr_and_pdr(
+        if packet_loss_ratio:
+            packet_loss_ratios = [0.0, packet_loss_ratio]
+        else:
+            # Happens in reconf tests.
+            packet_loss_ratios = [packet_loss_ratio]
+        results = algorithm.narrow_down_intervals(
             min_rate=minimum_transmit_rate,
             max_rate=maximum_transmit_rate,
             min_rate=minimum_transmit_rate,
             max_rate=maximum_transmit_rate,
-            packet_loss_ratio=packet_loss_ratio,
+            packet_loss_ratios=packet_loss_ratios,
         )
         )
-        return result
+        return results
 
     @staticmethod
     def perform_soak_search(
 
     @staticmethod
     def perform_soak_search(
@@ -1573,7 +1580,7 @@ class OptimizedSearch:
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
             use_latency=False,
             ramp_up_rate=None,
             ramp_up_duration=None,
-            state_timeout=300.0,
+            state_timeout=240.0,
     ):
         """Setup initialized TG, perform soak search, return avg and stdev.
 
     ):
         """Setup initialized TG, perform soak search, return avg and stdev.
 
@@ -1582,7 +1589,7 @@ class OptimizedSearch:
             See GPL/traffic_profiles/trex for implemented modules.
         :param minimum_transmit_rate: Minimal load in transactions per second.
         :param maximum_transmit_rate: Maximal load in transactions per second.
             See GPL/traffic_profiles/trex for implemented modules.
         :param minimum_transmit_rate: Minimal load in transactions per second.
         :param maximum_transmit_rate: Maximal load in transactions per second.
-        :param plr_target: Fraction of packets lost to achieve [1].
+        :param plr_target: Ratio of packets lost to achieve [1].
         :param tdpt: Trial duration per trial.
             The algorithm linearly increases trial duration with trial number,
             this is the increment between succesive trials, in seconds.
         :param tdpt: Trial duration per trial.
             The algorithm linearly increases trial duration with trial number,
             this is the increment between succesive trials, in seconds.
@@ -1631,18 +1638,14 @@ class OptimizedSearch:
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
         :type ramp_up_rate: float
         :type ramp_up_duration: float
         :type state_timeout: float
-        :returns: Average and stdev of estimated aggregate rate giving PLR.
+        :returns: Average and stdev of estimated aggregated rate giving PLR.
         :rtype: 2-tuple of float
         """
         tg_instance = BuiltIn().get_library_instance(
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
         :rtype: 2-tuple of float
         """
         tg_instance = BuiltIn().get_library_instance(
             u"resources.libraries.python.TrafficGenerator"
         )
         # Overrides for fixed transaction amount.
-        # TODO: Move to robot code? We have a single call site
-        #       but MLRsearch has two and we want the two to be used similarly.
         if transaction_scale:
         if transaction_scale:
-            # TODO: What is a good value for max scale?
-            # TODO: Scale the timeout with transaction scale.
             timeout = 7200.0
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,
             timeout = 7200.0
         tg_instance.set_rate_provider_defaults(
             frame_size=frame_size,