-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import math
import time
+from typing import Callable, List, Optional, Union
+
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from .Constants import Constants
from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+ AbstractMeasurer, Config, GoalResult, MeasurementResult,
+ MultipleLossRatioSearch, SearchGoal,
+)
from .PLRsearch.PLRsearch import PLRsearch
from .OptionString import OptionString
from .ssh import exec_cmd_no_error, exec_cmd
self._mode = None
# TG interface order mapping
self._ifaces_reordered = False
+ self._ifaces = []
# Result holding fields, to be removed.
self._result = None
self._loss = None
self.ramp_up_duration = None
self.state_timeout = None
# Transient data needed for async measurements.
- self._xstats = ()
+ self._xstats = []
@property
def node(self):
message = u"Get T-Rex version failed!"
stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
return stdout.strip()
- else:
- return "none"
+ return "none"
- def initialize_traffic_generator(self, osi_layer, parallel_links=1):
+ def initialize_traffic_generator(self, osi_layer, pfs=2):
"""TG initialization.
- :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
- :param parallel_links: Number of parallel links to configure.
+ :param osi_layer: OSI Layer testing type.
+ :param pfs: Number of physical interfaces to configure.
:type osi_layer: str
- :type parallel_links: int
+ :type pfs: int
:raises ValueError: If OSI layer is unknown.
"""
- if osi_layer not in ("L2", "L3", "L7"):
+ if osi_layer not in ("L2", "L3", "L3_1", "L7"):
raise ValueError("Unknown OSI layer!")
topology = BuiltIn().get_variable_value("&{topology_info}")
trex_topology = list()
self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
- for l in range(1, parallel_links*2, 2):
- tg_if1_adj_addr = topology[f"TG_pf{l+1}_mac"][0]
- tg_if2_adj_addr = topology[f"TG_pf{l}_mac"][0]
- if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+ for link in range(1, pfs, 2):
+ tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
+ tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
+ skip = 0 if osi_layer in ("L3_1",) else 1
+ if osi_layer in ("L3", "L3_1", "L7") and "DUT1" \
+ in topology.keys():
ifl = BuiltIn().get_variable_value("${int}")
last = topology["duts_count"]
tg_if1_adj_addr = Topology().get_interface_mac(
- topology["DUT1"],
+ topology["DUT1"],
BuiltIn().get_variable_value(
- f"${{DUT1_{ifl}{l}}}[0]"
+ f"${{DUT1_{ifl}{link}}}[0]"
)
)
tg_if2_adj_addr = Topology().get_interface_mac(
- topology[f"DUT{last}"],
+ topology[f"DUT{last}"],
BuiltIn().get_variable_value(
- f"${{DUT{last}_{ifl}{l+1}}}[0]"
+ f"${{DUT{last}_{ifl}{link+skip}}}[0]"
)
)
- trex_topology.append(
- dict(
- interface=topology[f"TG_pf{l}"][0],
- dst_mac=tg_if1_adj_addr
+ if1_pci = topology[f"TG_pf{link}_pci"][0]
+ if2_pci = topology[f"TG_pf{link+1}_pci"][0]
+ if min(if1_pci, if2_pci) != if1_pci:
+ self._ifaces.append(str(link))
+ self._ifaces.append(str(link-1))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
)
- )
- trex_topology.append(
- dict(
- interface=topology[f"TG_pf{l+1}"][0],
- dst_mac=tg_if2_adj_addr
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ else:
+ self._ifaces.append(str(link-1))
+ self._ifaces.append(str(link))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
)
- )
- if1_pci = topology[f"TG_pf{l}_pci"][0]
- if2_pci = topology[f"TG_pf{l+1}_pci"][0]
- if min(if1_pci, if2_pci) != if1_pci:
- self._ifaces_reordered = True
- trex_topology.reverse()
TrexConfig.add_startup_configuration(
self._node, trex_topology
"""Startup sequence for the TRex traffic generator.
:param tg_node: Traffic generator node.
- :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
+ :param osi_layer: OSI Layer testing type.
:param subtype: Traffic generator sub-type.
:type tg_node: dict
:type osi_layer: str
if subtype == NodeSubTypeTG.TREX:
for _ in range(0, 3):
# Kill TRex only if it is already running.
- cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
+ cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
exec_cmd_no_error(
- tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
+ tg_node, cmd, sudo=True, message="Kill TRex failed!"
)
# Prepare interfaces for TRex.
tg_port_drv = Constants.TREX_PORT_DRIVER
- mlx_driver = u""
- for port in tg_node[u"interfaces"].values():
- if u"Mellanox" in port.get(u"model"):
- mlx_driver = port.get(u"driver")
- pci_addr = port.get(u'pci_address')
+ mlx_driver = ""
+ for port in tg_node["interfaces"].values():
+ if "Mellanox" in port.get("model"):
+ mlx_driver = port.get("driver")
+ pci_addr = port.get("pci_address")
cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
if cur_driver == mlx_driver:
pass
DS.pci_driver_unbind(tg_node, pci_addr)
DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
else:
- pci_addr = port.get(u'pci_address')
+ pci_addr = port.get("pci_address")
cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
if cur_driver:
DS.pci_driver_unbind(tg_node, pci_addr)
# Start TRex.
cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
- trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
- trex_cmd.add(u"-i")
- trex_cmd.add(u"--prefix $(hostname)")
- trex_cmd.add(u"--hdrh")
- trex_cmd.add(u"--no-scapy-server")
- trex_cmd.add_if(u"--astf", osi_layer == u"L7")
+ trex_cmd = OptionString(["nohup", "./t-rex-64"])
+ trex_cmd.add("-i")
+ trex_cmd.add("--prefix $(hostname)")
+ trex_cmd.add("--hdrh")
+ trex_cmd.add("--no-scapy-server")
+ trex_cmd.add_if("--astf", osi_layer == "L7")
# OptionString does not create double space if extra is empty.
trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
try:
exec_cmd_no_error(tg_node, cmd, sudo=True)
except RuntimeError:
- cmd = u"sh -c \"cat /tmp/trex.log\""
+ cmd = "sh -c \"cat /tmp/trex.log\""
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"Get TRex logs failed!"
+ message="Get TRex logs failed!"
)
- raise RuntimeError(u"Start TRex failed!")
+ raise RuntimeError("Start TRex failed!")
# Test T-Rex API responsiveness.
cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
- if osi_layer in (u"L2", u"L3"):
- cmd += u"trex_stl_assert.py"
- elif osi_layer == u"L7":
- cmd += u"trex_astf_assert.py"
+ if osi_layer in ("L2", "L3", "L3_1"):
+ cmd += "trex_stl_assert.py"
+ elif osi_layer == "L7":
+ cmd += "trex_astf_assert.py"
else:
- raise ValueError(u"Unknown OSI layer!")
+ raise ValueError("Unknown OSI layer!")
try:
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"T-Rex API is not responding!", retries=20
+ message="T-Rex API is not responding!", retries=20
)
except RuntimeError:
continue
return
# After max retries TRex is still not responding to API critical
# error occurred.
- exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
- raise RuntimeError(u"Start T-Rex failed after multiple retries!")
+ exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
+ raise RuntimeError("Start T-Rex failed after multiple retries!")
@staticmethod
def is_trex_running(node):
:returns: True if T-Rex is running otherwise False.
:rtype: bool
"""
- ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
+ ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
return bool(int(ret) == 0)
@staticmethod
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_astf_stop.py'")
- command_line.change_prefix(u"--")
- for index, value in enumerate(self._xstats):
+ command_line.add("--xstat")
+ for value in self._xstats:
if value is not None:
- value = value.replace(u"'", u"\"")
- command_line.add_equals(f"xstat{index}", f"'{value}'")
+ value = value.replace("'", "\"")
+ command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex ASTF runtime error!"
+ message="T-Rex ASTF runtime error!"
)
self._parse_traffic_results(stdout)
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_stop.py'")
command_line.add("--xstat")
- for index, value in enumerate(self._xstats):
+ for value in self._xstats:
if value is not None:
value = value.replace("'", "\"")
command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex STL runtime error!"
+ message="T-Rex STL runtime error!", include_reason=True
)
self._parse_traffic_results(stdout)
"""Stop all traffic on TG.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
"""Compute duration for profile driver.
The final result is influenced by transaction scale and duration limit.
- It is assumed a higher level function has already set those to self.
+ It is assumed a higher level function has already set those on self.
The duration argument is the target value from search point of view,
before the overrides are applied here.
index = 0
for line in stdout.splitlines():
if f"Xstats snapshot {index}: " in line:
- xstats[index] = line[19:]
+ xstats.append(line[19:])
index += 1
self._xstats = tuple(xstats)
else:
:raises RuntimeError: In case of T-Rex driver issue.
"""
self.check_mode(TrexMode.STL)
- p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
if not isinstance(duration, (float, int)):
duration = float(duration)
duration, _ = self._compute_duration(duration=duration, multiplier=rate)
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_profile.py'")
- command_line.change_prefix(u"--")
+ command_line.change_prefix("--")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
command_line.add_with_value(
- u"profile", f"'{dirname}/{self.traffic_profile}.py'"
+ "profile", f"'{dirname}/{self.traffic_profile}.py'"
)
- command_line.add_with_value(u"duration", f"{duration!r}")
- command_line.add_with_value(u"frame_size", self.frame_size)
- command_line.add_with_value(u"rate", f"{rate!r}")
- command_line.add_with_value(u"port_0", p_0)
- command_line.add_with_value(u"port_1", p_1)
+ command_line.add_with_value("duration", f"{duration!r}")
+ command_line.add_with_value("frame_size", self.frame_size)
+ command_line.add_with_value("rate", f"{rate!r}")
+ command_line.add_with_value("ports", " ".join(self._ifaces))
command_line.add_with_value(
- u"traffic_directions", self.traffic_directions
+ "traffic_directions", self.traffic_directions
)
- command_line.add_if(u"async_start", async_call)
- command_line.add_if(u"latency", self.use_latency)
- command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
- command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
+ command_line.add_if("async_start", async_call)
+ command_line.add_if("latency", self.use_latency)
+ command_line.add_if("force", Constants.TREX_SEND_FORCE)
+ command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
self._start_time = time.monotonic()
- self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
+ self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
stdout, _ = exec_cmd_no_error(
self._node, command_line, timeout=int(duration) + 60,
- message=u"T-Rex STL runtime error"
+ message="T-Rex STL runtime error", include_reason=True
)
if async_call:
:type state_timeout: float
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
self.set_rate_provider_defaults(
:type rate: float
:type async_call: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
:type async_call: bool
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
complete = False
trial_end = time.monotonic()
if self.ramp_up_rate:
# Optimization: No loss acts as a good ramp-up, if it was complete.
- if complete and result is not None and result.loss_count == 0:
+ if complete and result is not None and result.loss_ratio == 0.0:
logger.debug(u"Good trial acts as a ramp-up")
self.ramp_up_start = trial_start
self.ramp_up_stop = trial_end
int(self._result.get(u"server_tcp_rx_bytes", 0))
def _get_measurement_result(self):
- """Return the result of last measurement as ReceiveRateMeasurement.
+ """Return the result of last measurement as MeasurementResult.
Separate function, as measurements can end either by time
or by explicit call, this is the common block at the end.
- The target_tr field of ReceiveRateMeasurement is in
+ The intended_load field of MeasurementResult is in
transactions per second. Transmit count and loss count units
depend on the transaction type. Usually they are in transactions
per second, or aggregated packets per second.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
"""
+ duration_with_overheads = time.monotonic() - self._start_time
try:
# Client duration seems to include a setup period
# where TRex does not send any packets yet.
expected_attempt_count = max(expected_attempt_count, self._sent)
unsent = expected_attempt_count - self._sent
pass_count = self._received
- fail_count = expected_attempt_count - pass_count
+ loss_count = self._loss
elif self.transaction_type == u"udp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_cps.")
expected_attempt_count = self.transaction_scale
unsent = expected_attempt_count - partial_attempt_count
pass_count = self._l7_data[u"client"][u"received"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"tcp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_cps.")
# but we are testing NAT session so client/connects counts that
# (half connections from TCP point of view).
pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"udp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_pps.")
partial_attempt_count = self._sent
expected_attempt_count = self.transaction_scale * self.ppta
unsent = expected_attempt_count - self._sent
- fail_count = self._loss + unsent
+ loss_count = self._loss
elif self.transaction_type == u"tcp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_pps.")
# Probability of retransmissions exactly cancelling
# packets unsent due to duration stretching is quite low.
unsent = abs(expected_attempt_count - self._sent)
- fail_count = self._loss + unsent
+ loss_count = self._loss
else:
raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
if unsent and isinstance(self._approximated_duration, float):
# Do not report unsent for "manual".
logger.debug(f"Unsent packets/transactions: {unsent}")
- if fail_count < 0 and not self.negative_loss:
- fail_count = 0
- measurement = ReceiveRateMeasurement(
- duration=target_duration,
- target_tr=transmit_rate,
- transmit_count=expected_attempt_count,
- loss_count=fail_count,
- approximated_duration=approximated_duration,
- partial_transmit_count=partial_attempt_count,
+ if loss_count < 0 and not self.negative_loss:
+ loss_count = 0
+ measurement = MeasurementResult(
+ intended_duration=target_duration,
+ intended_load=transmit_rate,
+ offered_count=partial_attempt_count,
+ loss_count=loss_count,
+ offered_duration=approximated_duration,
+ duration_with_overheads=duration_with_overheads,
+ intended_count=expected_attempt_count,
)
measurement.latency = self.get_latency_int()
return measurement
- def measure(self, duration, transmit_rate):
+ def measure(self, intended_duration, intended_load):
"""Run trial measurement, parse and return results.
- The input rate is for transactions. Stateles bidirectional traffic
+ The intended load is for transactions. Stateles bidirectional traffic
is understood as sequence of (asynchronous) transactions,
two packets each.
the count either transactions or packets (aggregated over directions).
Optionally, this method sleeps if measurement finished before
- the time specified as duration.
+ the time specified as intended_duration (PLRsearch needs time for math).
- :param duration: Trial duration [s].
- :param transmit_rate: Target rate in transactions per second.
- :type duration: float
- :type transmit_rate: float
+ :param intended_duration: Trial duration [s].
+ :param intended_load: Target rate in transactions per second.
+ :type intended_duration: float
+ :type intended_load: float
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises RuntimeError: If TG is not set or if node is not TG
or if subtype is not specified.
:raises NotImplementedError: If TG is not supported.
"""
- duration = float(duration)
+ intended_duration = float(intended_duration)
time_start = time.monotonic()
- time_stop = time_start + duration
+ time_stop = time_start + intended_duration
if self.resetter:
self.resetter()
result = self._send_traffic_on_tg_with_ramp_up(
- duration=duration,
- rate=transmit_rate,
+ duration=intended_duration,
+ rate=intended_load,
async_call=False,
)
logger.debug(f"trial measurement result: {result!r}")
# In PLRsearch, computation needs the specified time to complete.
if self.sleep_till_duration:
- sleeptime = time_stop - time.monotonic()
- if sleeptime > 0.0:
+ while (sleeptime := time_stop - time.monotonic()) > 0.0:
time.sleep(sleeptime)
return result
self.frame_size = frame_size
self.traffic_profile = str(traffic_profile)
self.resetter = resetter
- self.ppta = ppta
+ self.ppta = int(ppta)
self.traffic_directions = int(traffic_directions)
self.transaction_duration = float(transaction_duration)
self.transaction_scale = int(transaction_scale)
"""
@staticmethod
- def perform_optimized_ndrpdr_search(
- frame_size,
- traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
- packet_loss_ratio=0.005,
- final_relative_width=0.005,
- final_trial_duration=30.0,
- initial_trial_duration=1.0,
- number_of_intermediate_phases=2,
- timeout=1200.0,
- ppta=1,
- resetter=None,
- traffic_directions=2,
- transaction_duration=0.0,
- transaction_scale=0,
- transaction_type=u"packet",
- use_latency=False,
- ramp_up_rate=None,
- ramp_up_duration=None,
- state_timeout=240.0,
- expansion_coefficient=4.0,
- ):
+ def perform_mlr_search(
+ frame_size: Union[int, str],
+ traffic_profile: str,
+ min_load: float,
+ max_load: float,
+ loss_ratio: float = 0.005,
+ relative_width: float = 0.005,
+ initial_trial_duration: float = 1.0,
+ final_trial_duration: float = 1.0,
+ duration_sum: float = 21.0,
+ expansion_coefficient: int = 2,
+ preceding_targets: int = 2,
+ search_duration_max: float = 1200.0,
+ ppta: int = 1,
+ resetter: Optional[Callable[[], None]] = None,
+ traffic_directions: int = 2,
+ transaction_duration: float = 0.0,
+ transaction_scale: int = 0,
+ transaction_type: str = "packet",
+ use_latency: bool = False,
+ ramp_up_rate: float = 0.0,
+ ramp_up_duration: float = 0.0,
+ state_timeout: float = 240.0,
+ ) -> List[GoalResult]:
"""Setup initialized TG, perform optimized search, return intervals.
If transaction_scale is nonzero, all init and non-init trial durations
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
- :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
- :param final_relative_width: Final lower bound transmit rate
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
+ :param loss_ratio: Ratio of packets lost, for PDR [1].
+ :param relative_width: Final lower bound intended load
cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
:param initial_trial_duration: Trial duration for the initial phase
and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param duration_sum: Max sum of duration for deciding [s].
+ :param expansion_coefficient: In external search multiply width by this.
+ :param preceding_targets: Number of intermediate phases
to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
+ :param search_duration_max: The search will fail itself
+ when not finished before this overall time [s].
:param ppta: Packets per transaction, aggregated over directions.
Needed for udp_pps which does not have a good transaction counter,
so we need to compute expected number of packets.
:param ramp_up_rate: Rate to use in ramp-up trials [pps].
:param ramp_up_duration: Duration of ramp-up trials [s].
:param state_timeout: Time of life of DUT state [s].
- :param expansion_coefficient: In external search multiply width by this.
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- :type packet_loss_ratio: float
- :type final_relative_width: float
- :type final_trial_duration: float
+ :type min_load: float
+ :type max_load: float
+ :type loss_ratio: float
+ :type relative_width: float
:type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
+ :type final_trial_duration: float
+ :type duration_sum: float
+ :type expansion_coefficient: int
+ :type preceding_targets: int
+ :type search_duration_max: float
:type ppta: int
:type resetter: Optional[Callable[[], None]]
:type traffic_directions: int
:type ramp_up_rate: float
:type ramp_up_duration: float
:type state_timeout: float
- :type expansion_coefficient: float
- :returns: Structure containing narrowed down NDR and PDR intervals
- and their measurements.
- :rtype: List[Receiverateinterval]
- :raises RuntimeError: If total duration is larger than timeout.
+ :returns: Goal result (based on unidirectional tps) for each goal.
+ The result contains both the offered load for stat trial,
+ and the conditional throughput for display.
+ :rtype: List[GoalResult]
+ :raises RuntimeError: If search duration exceeds search_duration_max
+ or if min load becomes an upper bound for any search goal.
"""
# we need instance of TrafficGenerator instantiated by Robot Framework
# to be able to use trex_stl-*()
if transaction_scale:
initial_trial_duration = 1.0
final_trial_duration = 1.0
- number_of_intermediate_phases = 0
- timeout += transaction_scale * 3e-4
+ preceding_targets = 1
+ # TODO: Move the value to Constants.py?
+ search_duration_max += transaction_scale * 3e-4
tg_instance.set_rate_provider_defaults(
frame_size=frame_size,
traffic_profile=traffic_profile,
ramp_up_duration=ramp_up_duration,
state_timeout=state_timeout,
)
- algorithm = MultipleLossRatioSearch(
- measurer=tg_instance,
- final_trial_duration=final_trial_duration,
- final_relative_width=final_relative_width,
- number_of_intermediate_phases=number_of_intermediate_phases,
- initial_trial_duration=initial_trial_duration,
- timeout=timeout,
- debug=logger.debug,
- expansion_coefficient=expansion_coefficient,
- )
- if packet_loss_ratio:
- packet_loss_ratios = [0.0, packet_loss_ratio]
+ if loss_ratio:
+ loss_ratios = [0.0, loss_ratio]
+ exceed_ratio = 0.5
else:
# Happens in reconf tests.
- packet_loss_ratios = [packet_loss_ratio]
- results = algorithm.narrow_down_intervals(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
- packet_loss_ratios=packet_loss_ratios,
- )
- return results
+ loss_ratios = [0.0]
+ exceed_ratio = 0.0
+ goals = [
+ SearchGoal(
+ loss_ratio=loss_ratio,
+ exceed_ratio=exceed_ratio,
+ relative_width=relative_width,
+ initial_trial_duration=initial_trial_duration,
+ final_trial_duration=final_trial_duration,
+ duration_sum=duration_sum,
+ preceding_targets=preceding_targets,
+ expansion_coefficient=expansion_coefficient,
+ fail_fast=True,
+ )
+ for loss_ratio in loss_ratios
+ ]
+ config = Config()
+ config.goals = goals
+ config.min_load = min_load
+ config.max_load = max_load
+ config.search_duration_max = search_duration_max
+ config.warmup_duration = 1.0
+ algorithm = MultipleLossRatioSearch(config)
+ results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+ return [results[goal] for goal in goals]
@staticmethod
def perform_soak_search(
frame_size,
traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
+ min_load,
+ max_load,
plr_target=1e-7,
tdpt=0.1,
initial_count=50,
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
:param plr_target: Ratio of packets lost to achieve [1].
:param tdpt: Trial duration per trial.
The algorithm linearly increases trial duration with trial number,
:param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
+ :type min_load: float
+ :type max_load: float
:type plr_target: float
:type initial_count: int
:type timeout: float
trace_enabled=trace_enabled,
)
result = algorithm.search(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
+ min_rate=min_load,
+ max_rate=max_load,
)
return result