1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Performance testing traffic generator library."""
19 from typing import Callable, List, Optional, Union
21 from robot.api import logger
22 from robot.libraries.BuiltIn import BuiltIn
24 from .Constants import Constants
25 from .DropRateSearch import DropRateSearch
26 from .MLRsearch import (
27 AbstractMeasurer, Config, GoalResult, MeasurementResult,
28 MultipleLossRatioSearch, SearchGoal,
30 from .PLRsearch.PLRsearch import PLRsearch
31 from .OptionString import OptionString
32 from .ssh import exec_cmd_no_error, exec_cmd
33 from .topology import NodeType
34 from .topology import NodeSubTypeTG
35 from .topology import Topology
36 from .TRexConfigGenerator import TrexConfig
37 from .DUTSetup import DUTSetup as DS
39 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
42 def check_subtype(node):
43 """Return supported subtype of given node, or raise an exception.
45 Currently only one subtype is supported,
46 but we want our code to be ready for other ones.
48 :param node: Topology node to check. Can be None.
49 :type node: dict or NoneType
50 :returns: Subtype detected.
52 :raises RuntimeError: If node is not supported, message explains how.
54 if node.get(u"type") is None:
55 msg = u"Node type is not defined"
56 elif node[u"type"] != NodeType.TG:
57 msg = f"Node type is {node[u'type']!r}, not a TG"
58 elif node.get(u"subtype") is None:
59 msg = u"TG subtype is not defined"
60 elif node[u"subtype"] != NodeSubTypeTG.TREX:
61 msg = f"TG subtype {node[u'subtype']!r} is not supported"
63 return NodeSubTypeTG.TREX
64 raise RuntimeError(msg)
67 class TGDropRateSearchImpl(DropRateSearch):
68 """Drop Rate Search implementation."""
71 # super(TGDropRateSearchImpl, self).__init__()
74 self, rate, frame_size, loss_acceptance, loss_acceptance_type,
76 """Runs the traffic and evaluate the measured results.
78 :param rate: Offered traffic load.
79 :param frame_size: Size of frame.
80 :param loss_acceptance: Permitted drop ratio or frames count.
81 :param loss_acceptance_type: Type of permitted loss.
82 :param traffic_profile: Module name as a traffic profile identifier.
83 See GPL/traffic_profiles/trex for implemented modules.
86 :type loss_acceptance: float
87 :type loss_acceptance_type: LossAcceptanceType
88 :type traffic_profile: str
89 :returns: Drop threshold exceeded? (True/False)
91 :raises NotImplementedError: If TG is not supported.
92 :raises RuntimeError: If TG is not specified.
94 # we need instance of TrafficGenerator instantiated by Robot Framework
95 # to be able to use trex_stl-*()
96 tg_instance = BuiltIn().get_library_instance(
97 u"resources.libraries.python.TrafficGenerator"
99 subtype = check_subtype(tg_instance.node)
100 if subtype == NodeSubTypeTG.TREX:
101 unit_rate = str(rate) + self.get_rate_type_str()
102 tg_instance.trex_stl_start_remote_exec(
103 self.get_duration(), unit_rate, frame_size, traffic_profile
105 loss = tg_instance.get_loss()
106 sent = tg_instance.get_sent()
107 if self.loss_acceptance_type_is_percentage():
108 loss = (float(loss) / float(sent)) * 100
110 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
112 return float(loss) <= float(loss_acceptance)
115 def get_latency(self):
116 """Returns min/avg/max latency.
118 :returns: Latency stats.
121 tg_instance = BuiltIn().get_library_instance(
122 u"resources.libraries.python.TrafficGenerator"
124 return tg_instance.get_latency_int()
128 """Defines mode of T-Rex traffic generator."""
129 # Advanced stateful mode
135 class TrafficGenerator(AbstractMeasurer):
136 """Traffic Generator."""
138 # Use one instance of TrafficGenerator for all tests in test suite
139 ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
144 # TG interface order mapping
145 self._ifaces_reordered = False
146 # Result holding fields, to be removed.
151 self._received = None
152 self._approximated_rate = None
153 self._approximated_duration = None
155 # Measurement input fields, needed for async stop result.
156 self._start_time = None
157 self._stop_time = None
159 self._target_duration = None
160 self._duration = None
161 # Other input parameters, not knowable from measure() signature.
162 self.frame_size = None
163 self.traffic_profile = None
164 self.traffic_directions = None
165 self.negative_loss = None
166 self.use_latency = None
169 self.transaction_scale = None
170 self.transaction_duration = None
171 self.sleep_till_duration = None
172 self.transaction_type = None
173 self.duration_limit = None
174 self.ramp_up_start = None
175 self.ramp_up_stop = None
176 self.ramp_up_rate = None
177 self.ramp_up_duration = None
178 self.state_timeout = None
179 # Transient data needed for async measurements.
186 :returns: Traffic generator node.
192 """Return number of lost packets.
194 :returns: Number of lost packets.
200 """Return number of sent packets.
202 :returns: Number of sent packets.
207 def get_received(self):
208 """Return number of received packets.
210 :returns: Number of received packets.
213 return self._received
215 def get_latency_int(self):
216 """Return rounded min/avg/max latency.
218 :returns: Latency stats.
223 def get_approximated_rate(self):
224 """Return approximated rate computed as ratio of transmitted packets
225 over duration of trial.
227 :returns: Approximated rate.
230 return self._approximated_rate
232 def get_l7_data(self):
235 :returns: Number of received packets.
240 def check_mode(self, expected_mode):
243 :param expected_mode: Expected traffic generator mode.
244 :type expected_mode: object
245 :raises RuntimeError: In case of unexpected TG mode.
247 if self._mode == expected_mode:
250 f"{self._node[u'subtype']} not running in {expected_mode} mode!"
254 def get_tg_type(tg_node):
255 """Log and return the installed traffic generator type.
257 :param tg_node: Node from topology file.
259 :returns: Traffic generator type string.
261 :raises RuntimeError: If command returns nonzero return code.
263 return str(check_subtype(tg_node))
266 def get_tg_version(tg_node):
267 """Log and return the installed traffic generator version.
269 :param tg_node: Node from topology file.
271 :returns: Traffic generator version string.
273 :raises RuntimeError: If command returns nonzero return code.
275 subtype = check_subtype(tg_node)
276 if subtype == NodeSubTypeTG.TREX:
277 command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
278 message = u"Get T-Rex version failed!"
279 stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
280 return stdout.strip()
283 def initialize_traffic_generator(self, osi_layer, parallel_links=1):
284 """TG initialization.
286 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
287 :param parallel_links: Number of parallel links to configure.
289 :type parallel_links: int
290 :raises ValueError: If OSI layer is unknown.
292 if osi_layer not in ("L2", "L3", "L7"):
293 raise ValueError("Unknown OSI layer!")
295 topology = BuiltIn().get_variable_value("&{topology_info}")
296 self._node = topology["TG"]
297 subtype = check_subtype(self._node)
299 if subtype == NodeSubTypeTG.TREX:
300 trex_topology = list()
301 self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
303 for link in range(1, parallel_links*2, 2):
304 tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
305 tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
306 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
307 ifl = BuiltIn().get_variable_value("${int}")
308 last = topology["duts_count"]
309 tg_if1_adj_addr = Topology().get_interface_mac(
311 BuiltIn().get_variable_value(
312 f"${{DUT1_{ifl}{link}}}[0]"
315 tg_if2_adj_addr = Topology().get_interface_mac(
316 topology[f"DUT{last}"],
317 BuiltIn().get_variable_value(
318 f"${{DUT{last}_{ifl}{link+1}}}[0]"
322 trex_topology.append(
324 interface=topology[f"TG_pf{link}"][0],
325 dst_mac=tg_if1_adj_addr
328 trex_topology.append(
330 interface=topology[f"TG_pf{link+1}"][0],
331 dst_mac=tg_if2_adj_addr
334 if1_pci = topology[f"TG_pf{link}_pci"][0]
335 if2_pci = topology[f"TG_pf{link+1}_pci"][0]
336 if min(if1_pci, if2_pci) != if1_pci:
337 self._ifaces_reordered = True
338 trex_topology.reverse()
340 TrexConfig.add_startup_configuration(
341 self._node, trex_topology
343 TrafficGenerator.startup_trex(
344 self._node, osi_layer, subtype=subtype
348 def startup_trex(tg_node, osi_layer, subtype=None):
349 """Startup sequence for the TRex traffic generator.
351 :param tg_node: Traffic generator node.
352 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
353 :param subtype: Traffic generator sub-type.
356 :type subtype: NodeSubTypeTG
357 :raises RuntimeError: If T-Rex startup failed.
358 :raises ValueError: If OSI layer is not supported.
361 subtype = check_subtype(tg_node)
362 if subtype == NodeSubTypeTG.TREX:
363 for _ in range(0, 3):
364 # Kill TRex only if it is already running.
365 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
367 tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
370 # Prepare interfaces for TRex.
371 tg_port_drv = Constants.TREX_PORT_DRIVER
373 for port in tg_node[u"interfaces"].values():
374 if u"Mellanox" in port.get(u"model"):
375 mlx_driver = port.get(u"driver")
376 pci_addr = port.get(u'pci_address')
377 cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
378 if cur_driver == mlx_driver:
381 DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
383 DS.pci_driver_unbind(tg_node, pci_addr)
384 DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
386 pci_addr = port.get(u'pci_address')
387 cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
389 DS.pci_driver_unbind(tg_node, pci_addr)
390 DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
393 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
394 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
396 trex_cmd.add(u"--prefix $(hostname)")
397 trex_cmd.add(u"--hdrh")
398 trex_cmd.add(u"--no-scapy-server")
399 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
400 # OptionString does not create double space if extra is empty.
401 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
402 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
403 cmd = f"sh -c \"{inner_command}\" > /dev/null"
405 exec_cmd_no_error(tg_node, cmd, sudo=True)
407 cmd = u"sh -c \"cat /tmp/trex.log\""
409 tg_node, cmd, sudo=True,
410 message=u"Get TRex logs failed!"
412 raise RuntimeError(u"Start TRex failed!")
414 # Test T-Rex API responsiveness.
415 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
416 if osi_layer in (u"L2", u"L3"):
417 cmd += u"trex_stl_assert.py"
418 elif osi_layer == u"L7":
419 cmd += u"trex_astf_assert.py"
421 raise ValueError(u"Unknown OSI layer!")
424 tg_node, cmd, sudo=True,
425 message=u"T-Rex API is not responding!", retries=20
430 # After max retries TRex is still not responding to API critical
432 exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
433 raise RuntimeError(u"Start T-Rex failed after multiple retries!")
436 def is_trex_running(node):
437 """Check if T-Rex is running using pidof.
439 :param node: Traffic generator node.
441 :returns: True if T-Rex is running otherwise False.
444 ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
445 return bool(int(ret) == 0)
448 def teardown_traffic_generator(node):
451 :param node: Traffic generator node.
454 :raises RuntimeError: If node type is not a TG,
455 or if T-Rex teardown fails.
457 subtype = check_subtype(node)
458 if subtype == NodeSubTypeTG.TREX:
462 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
464 message=u"T-Rex kill failed!"
467 def trex_astf_stop_remote_exec(self, node):
468 """Execute T-Rex ASTF script on remote node over ssh to stop running
471 Internal state is updated with measurement results.
473 :param node: T-Rex generator node.
475 :raises RuntimeError: If stop traffic script fails.
477 command_line = OptionString().add("python3")
478 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
479 command_line.add(f"'{dirname}/trex_astf_stop.py'")
480 command_line.add("--xstat")
481 for value in self._xstats:
482 if value is not None:
483 value = value.replace("'", "\"")
484 command_line.add(f"'{value}'")
485 stdout, _ = exec_cmd_no_error(
487 message="T-Rex ASTF runtime error!"
489 self._parse_traffic_results(stdout)
491 def trex_stl_stop_remote_exec(self, node):
492 """Execute T-Rex STL script on remote node over ssh to stop running
495 Internal state is updated with measurement results.
497 :param node: T-Rex generator node.
499 :raises RuntimeError: If stop traffic script fails.
501 command_line = OptionString().add("python3")
502 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
503 command_line.add(f"'{dirname}/trex_stl_stop.py'")
504 command_line.add("--xstat")
505 for value in self._xstats:
506 if value is not None:
507 value = value.replace("'", "\"")
508 command_line.add(f"'{value}'")
509 stdout, _ = exec_cmd_no_error(
511 message="T-Rex STL runtime error!"
513 self._parse_traffic_results(stdout)
515 def stop_traffic_on_tg(self):
516 """Stop all traffic on TG.
518 :returns: Structure containing the result of the measurement.
519 :rtype: MeasurementResult
520 :raises ValueError: If TG traffic profile is not supported.
522 subtype = check_subtype(self._node)
523 if subtype != NodeSubTypeTG.TREX:
524 raise ValueError(f"Unsupported TG subtype: {subtype!r}")
525 if u"trex-astf" in self.traffic_profile:
526 self.trex_astf_stop_remote_exec(self._node)
527 elif u"trex-stl" in self.traffic_profile:
528 self.trex_stl_stop_remote_exec(self._node)
530 raise ValueError(u"Unsupported T-Rex traffic profile!")
531 self._stop_time = time.monotonic()
533 return self._get_measurement_result()
535 def _compute_duration(self, duration, multiplier):
536 """Compute duration for profile driver.
538 The final result is influenced by transaction scale and duration limit.
539 It is assumed a higher level function has already set those on self.
540 The duration argument is the target value from search point of view,
541 before the overrides are applied here.
543 Minus one (signalling async traffic start) is kept.
545 Completeness flag is also included. Duration limited or async trials
546 are not considered complete for ramp-up purposes.
548 :param duration: Time expressed in seconds for how long to send traffic.
549 :param multiplier: Traffic rate in transactions per second.
550 :type duration: float
551 :type multiplier: float
552 :returns: New duration and whether it was a complete ramp-up candidate.
557 return duration, False
558 computed_duration = duration
559 if self.transaction_scale:
560 computed_duration = self.transaction_scale / multiplier
561 # Log the computed duration,
562 # so we can compare with what telemetry suggests
563 # the real duration was.
564 logger.debug(f"Expected duration {computed_duration}")
565 if not self.duration_limit:
566 return computed_duration, True
567 limited_duration = min(computed_duration, self.duration_limit)
568 return limited_duration, (limited_duration == computed_duration)
570 def trex_astf_start_remote_exec(
571 self, duration, multiplier, async_call=False):
572 """Execute T-Rex ASTF script on remote node over ssh to start running
575 In sync mode, measurement results are stored internally.
576 In async mode, initial data including xstats are stored internally.
578 This method contains the logic to compute duration as maximum time
579 if transaction_scale is nonzero.
580 The transaction_scale argument defines (limits) how many transactions
581 will be started in total. As that amount of transaction can take
582 considerable time (sometimes due to explicit delays in the profile),
583 the real time a trial needs to finish is computed here. For now,
584 in that case the duration argument is ignored, assuming it comes
585 from ASTF-unaware search algorithm. The overall time a single
586 transaction needs is given in parameter transaction_duration,
587 it includes both explicit delays and implicit time it takes
588 to transfer data (or whatever the transaction does).
590 Currently it is observed TRex does not start the ASTF traffic
591 immediately, an ad-hoc constant is added to the computed duration
592 to compensate for that.
594 If transaction_scale is zero, duration is not recomputed.
595 It is assumed the subsequent result parsing gets the real duration
596 if the traffic stops sooner for any reason.
598 Currently, it is assumed traffic profile defines a single transaction.
599 To avoid heavy logic here, the input rate is expected to be in
600 transactions per second, as that directly translates to TRex multiplier,
601 (assuming the profile does not override the default cps value of one).
603 :param duration: Time expressed in seconds for how long to send traffic.
604 :param multiplier: Traffic rate in transactions per second.
605 :param async_call: If enabled then don't wait for all incoming traffic.
606 :type duration: float
607 :type multiplier: int
608 :type async_call: bool
609 :raises RuntimeError: In case of T-Rex driver issue.
611 self.check_mode(TrexMode.ASTF)
612 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
613 if not isinstance(duration, (float, int)):
614 duration = float(duration)
616 computed_duration, _ = self._compute_duration(duration, multiplier)
618 command_line = OptionString().add(u"python3")
619 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
620 command_line.add(f"'{dirname}/trex_astf_profile.py'")
621 command_line.change_prefix(u"--")
622 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
623 command_line.add_with_value(
624 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
626 command_line.add_with_value(u"duration", f"{computed_duration!r}")
627 command_line.add_with_value(u"frame_size", self.frame_size)
628 command_line.add_with_value(
629 u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
631 command_line.add_with_value(u"multiplier", multiplier)
632 command_line.add_with_value(u"port_0", p_0)
633 command_line.add_with_value(u"port_1", p_1)
634 command_line.add_with_value(
635 u"traffic_directions", self.traffic_directions
637 command_line.add_if(u"async_start", async_call)
638 command_line.add_if(u"latency", self.use_latency)
639 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
640 command_line.add_with_value(
641 u"delay", Constants.PERF_TRIAL_ASTF_DELAY
644 self._start_time = time.monotonic()
645 self._rate = multiplier
646 stdout, _ = exec_cmd_no_error(
647 self._node, command_line, timeout=computed_duration + 10.0,
648 message=u"T-Rex ASTF runtime error!"
653 self._target_duration = None
654 self._duration = None
655 self._received = None
660 self._l7_data = dict()
661 self._l7_data[u"client"] = dict()
662 self._l7_data[u"client"][u"active_flows"] = None
663 self._l7_data[u"client"][u"established_flows"] = None
664 self._l7_data[u"client"][u"traffic_duration"] = None
665 self._l7_data[u"server"] = dict()
666 self._l7_data[u"server"][u"active_flows"] = None
667 self._l7_data[u"server"][u"established_flows"] = None
668 self._l7_data[u"server"][u"traffic_duration"] = None
669 if u"udp" in self.traffic_profile:
670 self._l7_data[u"client"][u"udp"] = dict()
671 self._l7_data[u"client"][u"udp"][u"connects"] = None
672 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
673 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
674 self._l7_data[u"server"][u"udp"] = dict()
675 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
676 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
677 elif u"tcp" in self.traffic_profile:
678 self._l7_data[u"client"][u"tcp"] = dict()
679 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
680 self._l7_data[u"client"][u"tcp"][u"connects"] = None
681 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
682 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
683 self._l7_data[u"server"][u"tcp"] = dict()
684 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
685 self._l7_data[u"server"][u"tcp"][u"connects"] = None
686 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
688 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
690 for line in stdout.splitlines():
691 if f"Xstats snapshot {index}: " in line:
692 xstats.append(line[19:])
694 self._xstats = tuple(xstats)
696 self._target_duration = duration
697 self._duration = computed_duration
698 self._parse_traffic_results(stdout)
700 def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
701 """Execute T-Rex STL script on remote node over ssh to start running
704 In sync mode, measurement results are stored internally.
705 In async mode, initial data including xstats are stored internally.
707 Mode-unaware code (e.g. in search algorithms) works with transactions.
708 To keep the logic simple, multiplier is set to that value.
709 As bidirectional traffic profiles send packets in both directions,
710 they are treated as transactions with two packets (one per direction).
712 :param duration: Time expressed in seconds for how long to send traffic.
713 :param rate: Traffic rate in transactions per second.
714 :param async_call: If enabled then don't wait for all incoming traffic.
715 :type duration: float
717 :type async_call: bool
718 :raises RuntimeError: In case of T-Rex driver issue.
720 self.check_mode(TrexMode.STL)
721 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
722 if not isinstance(duration, (float, int)):
723 duration = float(duration)
725 duration, _ = self._compute_duration(duration=duration, multiplier=rate)
727 command_line = OptionString().add(u"python3")
728 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
729 command_line.add(f"'{dirname}/trex_stl_profile.py'")
730 command_line.change_prefix(u"--")
731 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
732 command_line.add_with_value(
733 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
735 command_line.add_with_value(u"duration", f"{duration!r}")
736 command_line.add_with_value(u"frame_size", self.frame_size)
737 command_line.add_with_value(u"rate", f"{rate!r}")
738 command_line.add_with_value(u"port_0", p_0)
739 command_line.add_with_value(u"port_1", p_1)
740 command_line.add_with_value(
741 u"traffic_directions", self.traffic_directions
743 command_line.add_if(u"async_start", async_call)
744 command_line.add_if(u"latency", self.use_latency)
745 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
746 command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
748 self._start_time = time.monotonic()
749 self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
750 stdout, _ = exec_cmd_no_error(
751 self._node, command_line, timeout=int(duration) + 60,
752 message=u"T-Rex STL runtime error"
757 self._target_duration = None
758 self._duration = None
759 self._received = None
766 for line in stdout.splitlines():
767 if f"Xstats snapshot {index}: " in line:
768 xstats.append(line[19:])
770 self._xstats = tuple(xstats)
772 self._target_duration = duration
773 self._duration = duration
774 self._parse_traffic_results(stdout)
776 def send_traffic_on_tg(
784 traffic_directions=2,
785 transaction_duration=0.0,
787 transaction_type=u"packet",
791 ramp_up_duration=None,
795 """Send traffic from all configured interfaces on TG.
797 In async mode, xstats is stored internally,
798 to enable getting correct result when stopping the traffic.
799 In both modes, stdout is returned,
800 but _parse_traffic_results only works in sync output.
802 Note that traffic generator uses DPDK driver which might
803 reorder port numbers based on wiring and PCI numbering.
804 This method handles that, so argument values are invariant,
805 but you can see swapped valued in debug logs.
807 When transaction_scale is specified, the duration value is ignored
808 and the needed time is computed. For cases where this results in
809 to too long measurement (e.g. teardown trial with small rate),
810 duration_limit is applied (of non-zero), so the trial is stopped sooner.
812 Bidirectional STL profiles are treated as transactions with two packets.
814 The return value is None for async.
816 :param duration: Duration of test traffic generation in seconds.
817 :param rate: Traffic rate in transactions per second.
818 :param frame_size: Frame size (L2) in Bytes.
819 :param traffic_profile: Module name as a traffic profile identifier.
820 See GPL/traffic_profiles/trex for implemented modules.
821 :param async_call: Async mode.
822 :param ppta: Packets per transaction, aggregated over directions.
823 Needed for udp_pps which does not have a good transaction counter,
824 so we need to compute expected number of packets.
826 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
828 :param transaction_duration: Total expected time to close transaction.
829 :param transaction_scale: Number of transactions to perform.
830 0 (default) means unlimited.
831 :param transaction_type: An identifier specifying which counters
832 and formulas to use when computing attempted and failed
833 transactions. Default: "packet".
834 :param duration_limit: Zero or maximum limit for computed (or given)
836 :param use_latency: Whether to measure latency during the trial.
838 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
839 :param ramp_up_duration: Duration of ramp-up trials [s].
840 :param state_timeout: Time of life of DUT state [s].
841 :param ramp_up_only: If true, do not perform main trial measurement.
842 :type duration: float
844 :type frame_size: str
845 :type traffic_profile: str
846 :type async_call: bool
848 :type traffic_directions: int
849 :type transaction_duration: float
850 :type transaction_scale: int
851 :type transaction_type: str
852 :type duration_limit: float
853 :type use_latency: bool
854 :type ramp_up_rate: float
855 :type ramp_up_duration: float
856 :type state_timeout: float
857 :type ramp_up_only: bool
858 :returns: TG results.
859 :rtype: MeasurementResult or None
860 :raises ValueError: If TG traffic profile is not supported.
862 self.set_rate_provider_defaults(
863 frame_size=frame_size,
864 traffic_profile=traffic_profile,
866 traffic_directions=traffic_directions,
867 transaction_duration=transaction_duration,
868 transaction_scale=transaction_scale,
869 transaction_type=transaction_type,
870 duration_limit=duration_limit,
871 use_latency=use_latency,
872 ramp_up_rate=ramp_up_rate,
873 ramp_up_duration=ramp_up_duration,
874 state_timeout=state_timeout,
876 return self._send_traffic_on_tg_with_ramp_up(
879 async_call=async_call,
880 ramp_up_only=ramp_up_only,
883 def _send_traffic_on_tg_internal(
884 self, duration, rate, async_call=False):
885 """Send traffic from all configured interfaces on TG.
887 This is an internal function, it assumes set_rate_provider_defaults
888 has been called to remember most values.
889 The reason why need to remember various values is that
890 the traffic can be asynchronous, and parsing needs those values.
891 The reason why this is is a separate function from the one
892 which calls set_rate_provider_defaults is that some search algorithms
893 need to specify their own values, and we do not want the measure call
894 to overwrite them with defaults.
896 This function is used both for automated ramp-up trials
897 and for explicitly called trials.
899 :param duration: Duration of test traffic generation in seconds.
900 :param rate: Traffic rate in transactions per second.
901 :param async_call: Async mode.
902 :type duration: float
904 :type async_call: bool
905 :returns: TG results.
906 :rtype: MeasurementResult or None
907 :raises ValueError: If TG traffic profile is not supported.
909 subtype = check_subtype(self._node)
910 if subtype == NodeSubTypeTG.TREX:
911 if u"trex-astf" in self.traffic_profile:
912 self.trex_astf_start_remote_exec(
913 duration, float(rate), async_call
915 elif u"trex-stl" in self.traffic_profile:
916 unit_rate_str = str(rate) + u"pps"
917 self.trex_stl_start_remote_exec(
918 duration, unit_rate_str, async_call
921 raise ValueError(u"Unsupported T-Rex traffic profile!")
923 return None if async_call else self._get_measurement_result()
925 def _send_traffic_on_tg_with_ramp_up(
926 self, duration, rate, async_call=False, ramp_up_only=False):
927 """Send traffic from all interfaces on TG, maybe after ramp-up.
929 This is an internal function, it assumes set_rate_provider_defaults
930 has been called to remember most values.
931 The reason why need to remember various values is that
932 the traffic can be asynchronous, and parsing needs those values.
933 The reason why this is a separate function from the one
934 which calls set_rate_provider_defaults is that some search algorithms
935 need to specify their own values, and we do not want the measure call
936 to overwrite them with defaults.
938 If ramp-up tracking is detected, a computation is performed,
939 and if state timeout is near, trial at ramp-up rate and duration
940 is inserted before the main trial measurement.
942 The ramp_up_only parameter forces a ramp-up without immediate
943 trial measurement, which is useful in case self remembers
944 a previous ramp-up trial that belongs to a different test (phase).
946 Return None if trial is async or ramp-up only.
948 :param duration: Duration of test traffic generation in seconds.
949 :param rate: Traffic rate in transactions per second.
950 :param async_call: Async mode.
951 :param ramp_up_only: If true, do not perform main trial measurement.
952 :type duration: float
954 :type async_call: bool
955 :type ramp_up_only: bool
956 :returns: TG results.
957 :rtype: MeasurementResult or None
958 :raises ValueError: If TG traffic profile is not supported.
961 if self.ramp_up_rate:
962 # Figure out whether we need to insert a ramp-up trial.
963 if ramp_up_only or self.ramp_up_start is None:
964 # We never ramped up yet (at least not in this test case).
965 ramp_up_needed = True
967 # We ramped up before, but maybe it was too long ago.
968 # Adding a constant overhead to be safe.
969 time_now = time.monotonic() + 1.0
970 computed_duration, complete = self._compute_duration(
974 # There are two conditions for inserting ramp-up.
975 # If early sessions are expiring already,
976 # or if late sessions are to expire before measurement is over.
977 ramp_up_start_delay = time_now - self.ramp_up_start
978 ramp_up_stop_delay = time_now - self.ramp_up_stop
979 ramp_up_stop_delay += computed_duration
980 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
981 # Final boolean decision.
982 ramp_up_needed = (bigger_delay >= self.state_timeout)
985 u"State may time out during next real trial, "
986 u"inserting a ramp-up trial."
988 self.ramp_up_start = time.monotonic()
989 self._send_traffic_on_tg_internal(
990 duration=self.ramp_up_duration,
991 rate=self.ramp_up_rate,
992 async_call=async_call,
994 self.ramp_up_stop = time.monotonic()
995 logger.debug(u"Ramp-up done.")
998 u"State will probably not time out during next real trial, "
999 u"no ramp-up trial needed just yet."
1003 trial_start = time.monotonic()
1004 result = self._send_traffic_on_tg_internal(
1007 async_call=async_call,
1009 trial_end = time.monotonic()
1010 if self.ramp_up_rate:
1011 # Optimization: No loss acts as a good ramp-up, if it was complete.
1012 if complete and result is not None and result.loss_ratio == 0.0:
1013 logger.debug(u"Good trial acts as a ramp-up")
1014 self.ramp_up_start = trial_start
1015 self.ramp_up_stop = trial_end
1017 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1020 def no_traffic_loss_occurred(self):
1021 """Fail if loss occurred in traffic run.
1024 :raises Exception: If loss occured.
1026 if self._loss is None:
1027 raise RuntimeError(u"The traffic generation has not been issued")
1028 if self._loss != u"0":
1029 raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1031 def fail_if_no_traffic_forwarded(self):
1032 """Fail if no traffic forwarded.
1035 :raises Exception: If no traffic forwarded.
1037 if self._received is None:
1038 raise RuntimeError(u"The traffic generation has not been issued")
1039 if self._received == 0:
1040 raise RuntimeError(u"No traffic forwarded")
1042 def partial_traffic_loss_accepted(
1043 self, loss_acceptance, loss_acceptance_type):
1044 """Fail if loss is higher then accepted in traffic run.
1046 :param loss_acceptance: Permitted drop ratio or frames count.
1047 :param loss_acceptance_type: Type of permitted loss.
1048 :type loss_acceptance: float
1049 :type loss_acceptance_type: LossAcceptanceType
1051 :raises Exception: If loss is above acceptance criteria.
1053 if self._loss is None:
1054 raise Exception(u"The traffic generation has not been issued")
1056 if loss_acceptance_type == u"percentage":
1057 loss = (float(self._loss) / float(self._sent)) * 100
1058 elif loss_acceptance_type == u"frames":
1059 loss = float(self._loss)
1061 raise Exception(u"Loss acceptance type not supported")
1063 if loss > float(loss_acceptance):
1065 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1068 def _parse_traffic_results(self, stdout):
1069 """Parse stdout of scripts into fields of self.
1071 Block of code to reuse, by sync start, or stop after async.
1073 :param stdout: Text containing the standard output.
1076 subtype = check_subtype(self._node)
1077 if subtype == NodeSubTypeTG.TREX:
1078 # Last line from console output
1079 line = stdout.splitlines()[-1]
1080 results = line.split(u";")
1081 if results[-1] in (u" ", u""):
1083 self._result = dict()
1084 for result in results:
1085 key, value = result.split(u"=", maxsplit=1)
1086 self._result[key.strip()] = value
1087 logger.info(f"TrafficGen results:\n{self._result}")
1088 self._received = int(self._result.get(u"total_received"), 0)
1089 self._sent = int(self._result.get(u"total_sent", 0))
1090 self._loss = int(self._result.get(u"frame_loss", 0))
1091 self._approximated_duration = \
1092 self._result.get(u"approximated_duration", 0.0)
1093 if u"manual" not in str(self._approximated_duration):
1094 self._approximated_duration = float(self._approximated_duration)
1095 self._latency = list()
1096 self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1097 self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1098 if self._mode == TrexMode.ASTF:
1099 self._l7_data = dict()
1100 self._l7_data[u"client"] = dict()
1101 self._l7_data[u"client"][u"sent"] = \
1102 int(self._result.get(u"client_sent", 0))
1103 self._l7_data[u"client"][u"received"] = \
1104 int(self._result.get(u"client_received", 0))
1105 self._l7_data[u"client"][u"active_flows"] = \
1106 int(self._result.get(u"client_active_flows", 0))
1107 self._l7_data[u"client"][u"established_flows"] = \
1108 int(self._result.get(u"client_established_flows", 0))
1109 self._l7_data[u"client"][u"traffic_duration"] = \
1110 float(self._result.get(u"client_traffic_duration", 0.0))
1111 self._l7_data[u"client"][u"err_rx_throttled"] = \
1112 int(self._result.get(u"client_err_rx_throttled", 0))
1113 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1114 int(self._result.get(u"client_err_nf_throttled", 0))
1115 self._l7_data[u"client"][u"err_flow_overflow"] = \
1116 int(self._result.get(u"client_err_flow_overflow", 0))
1117 self._l7_data[u"server"] = dict()
1118 self._l7_data[u"server"][u"active_flows"] = \
1119 int(self._result.get(u"server_active_flows", 0))
1120 self._l7_data[u"server"][u"established_flows"] = \
1121 int(self._result.get(u"server_established_flows", 0))
1122 self._l7_data[u"server"][u"traffic_duration"] = \
1123 float(self._result.get(u"server_traffic_duration", 0.0))
1124 self._l7_data[u"server"][u"err_rx_throttled"] = \
1125 int(self._result.get(u"client_err_rx_throttled", 0))
1126 if u"udp" in self.traffic_profile:
1127 self._l7_data[u"client"][u"udp"] = dict()
1128 self._l7_data[u"client"][u"udp"][u"connects"] = \
1129 int(self._result.get(u"client_udp_connects", 0))
1130 self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1131 int(self._result.get(u"client_udp_closed", 0))
1132 self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1133 int(self._result.get(u"client_udp_tx_bytes", 0))
1134 self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1135 int(self._result.get(u"client_udp_rx_bytes", 0))
1136 self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1137 int(self._result.get(u"client_udp_tx_packets", 0))
1138 self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1139 int(self._result.get(u"client_udp_rx_packets", 0))
1140 self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1141 int(self._result.get(u"client_udp_keep_drops", 0))
1142 self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1143 int(self._result.get(u"client_err_cwf", 0))
1144 self._l7_data[u"server"][u"udp"] = dict()
1145 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1146 int(self._result.get(u"server_udp_accepts", 0))
1147 self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1148 int(self._result.get(u"server_udp_closed", 0))
1149 self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1150 int(self._result.get(u"server_udp_tx_bytes", 0))
1151 self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1152 int(self._result.get(u"server_udp_rx_bytes", 0))
1153 self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1154 int(self._result.get(u"server_udp_tx_packets", 0))
1155 self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1156 int(self._result.get(u"server_udp_rx_packets", 0))
1157 elif u"tcp" in self.traffic_profile:
1158 self._l7_data[u"client"][u"tcp"] = dict()
1159 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1160 int(self._result.get(u"client_tcp_connect_inits", 0))
1161 self._l7_data[u"client"][u"tcp"][u"connects"] = \
1162 int(self._result.get(u"client_tcp_connects", 0))
1163 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1164 int(self._result.get(u"client_tcp_closed", 0))
1165 self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1166 int(self._result.get(u"client_tcp_connattempt", 0))
1167 self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1168 int(self._result.get(u"client_tcp_tx_bytes", 0))
1169 self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1170 int(self._result.get(u"client_tcp_rx_bytes", 0))
1171 self._l7_data[u"server"][u"tcp"] = dict()
1172 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1173 int(self._result.get(u"server_tcp_accepts", 0))
1174 self._l7_data[u"server"][u"tcp"][u"connects"] = \
1175 int(self._result.get(u"server_tcp_connects", 0))
1176 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1177 int(self._result.get(u"server_tcp_closed", 0))
1178 self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1179 int(self._result.get(u"server_tcp_tx_bytes", 0))
1180 self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1181 int(self._result.get(u"server_tcp_rx_bytes", 0))
1183 def _get_measurement_result(self):
1184 """Return the result of last measurement as MeasurementResult.
1186 Separate function, as measurements can end either by time
1187 or by explicit call, this is the common block at the end.
1189 The intended_load field of MeasurementResult is in
1190 transactions per second. Transmit count and loss count units
1191 depend on the transaction type. Usually they are in transactions
1192 per second, or aggregated packets per second.
1194 :returns: Structure containing the result of the measurement.
1195 :rtype: MeasurementResult
1197 duration_with_overheads = time.monotonic() - self._start_time
1199 # Client duration seems to include a setup period
1200 # where TRex does not send any packets yet.
1201 # Server duration does not include it.
1202 server_data = self._l7_data[u"server"]
1203 approximated_duration = float(server_data[u"traffic_duration"])
1204 except (KeyError, AttributeError, ValueError, TypeError):
1205 approximated_duration = None
1207 if not approximated_duration:
1208 approximated_duration = float(self._approximated_duration)
1209 except ValueError: # "manual"
1210 approximated_duration = None
1211 if not approximated_duration:
1212 if self._duration and self._duration > 0:
1213 # Known recomputed or target duration.
1214 approximated_duration = self._duration
1216 # It was an explicit stop.
1217 if not self._stop_time:
1218 raise RuntimeError(u"Unable to determine duration.")
1219 approximated_duration = self._stop_time - self._start_time
1220 target_duration = self._target_duration
1221 if not target_duration:
1222 target_duration = approximated_duration
1223 transmit_rate = self._rate
1225 if self.transaction_type == u"packet":
1226 partial_attempt_count = self._sent
1227 packet_rate = transmit_rate * self.ppta
1228 # We have a float. TRex way of rounding it is not obvious.
1229 # The biggest source of mismatch is Inter Stream Gap.
1230 # So the code tolerates 10 usec of missing packets.
1231 expected_attempt_count = (target_duration - 1e-5) * packet_rate
1232 expected_attempt_count = math.ceil(expected_attempt_count)
1233 # TRex can send more.
1234 expected_attempt_count = max(expected_attempt_count, self._sent)
1235 unsent = expected_attempt_count - self._sent
1236 pass_count = self._received
1237 loss_count = self._loss
1238 elif self.transaction_type == u"udp_cps":
1239 if not self.transaction_scale:
1240 raise RuntimeError(u"Add support for no-limit udp_cps.")
1241 partial_attempt_count = self._l7_data[u"client"][u"sent"]
1242 # We do not care whether TG is slow, it should have attempted all.
1243 expected_attempt_count = self.transaction_scale
1244 unsent = expected_attempt_count - partial_attempt_count
1245 pass_count = self._l7_data[u"client"][u"received"]
1246 loss_count = partial_attempt_count - pass_count
1247 elif self.transaction_type == u"tcp_cps":
1248 if not self.transaction_scale:
1249 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1250 ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1251 partial_attempt_count = ctca
1252 # We do not care whether TG is slow, it should have attempted all.
1253 expected_attempt_count = self.transaction_scale
1254 unsent = expected_attempt_count - partial_attempt_count
1255 # From TCP point of view, server/connects counts full connections,
1256 # but we are testing NAT session so client/connects counts that
1257 # (half connections from TCP point of view).
1258 pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1259 loss_count = partial_attempt_count - pass_count
1260 elif self.transaction_type == u"udp_pps":
1261 if not self.transaction_scale:
1262 raise RuntimeError(u"Add support for no-limit udp_pps.")
1263 partial_attempt_count = self._sent
1264 expected_attempt_count = self.transaction_scale * self.ppta
1265 unsent = expected_attempt_count - self._sent
1266 loss_count = self._loss
1267 elif self.transaction_type == u"tcp_pps":
1268 if not self.transaction_scale:
1269 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1270 partial_attempt_count = self._sent
1271 expected_attempt_count = self.transaction_scale * self.ppta
1272 # One loss-like scenario happens when TRex receives all packets
1273 # on L2 level, but is not fast enough to process them all
1274 # at L7 level, which leads to retransmissions.
1275 # Those manifest as opackets larger than expected.
1276 # A simple workaround is to add absolute difference.
1277 # Probability of retransmissions exactly cancelling
1278 # packets unsent due to duration stretching is quite low.
1279 unsent = abs(expected_attempt_count - self._sent)
1280 loss_count = self._loss
1282 raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1283 if unsent and isinstance(self._approximated_duration, float):
1284 # Do not report unsent for "manual".
1285 logger.debug(f"Unsent packets/transactions: {unsent}")
1286 if loss_count < 0 and not self.negative_loss:
1288 measurement = MeasurementResult(
1289 intended_duration=target_duration,
1290 intended_load=transmit_rate,
1291 offered_count=partial_attempt_count,
1292 loss_count=loss_count,
1293 offered_duration=approximated_duration,
1294 duration_with_overheads=duration_with_overheads,
1295 intended_count=expected_attempt_count,
1297 measurement.latency = self.get_latency_int()
1300 def measure(self, intended_duration, intended_load):
1301 """Run trial measurement, parse and return results.
1303 The intended load is for transactions. Stateles bidirectional traffic
1304 is understood as sequence of (asynchronous) transactions,
1307 The result units depend on test type, generally
1308 the count either transactions or packets (aggregated over directions).
1310 Optionally, this method sleeps if measurement finished before
1311 the time specified as intended_duration (PLRsearch needs time for math).
1313 :param intended_duration: Trial duration [s].
1314 :param intended_load: Target rate in transactions per second.
1315 :type intended_duration: float
1316 :type intended_load: float
1317 :returns: Structure containing the result of the measurement.
1318 :rtype: MeasurementResult
1319 :raises RuntimeError: If TG is not set or if node is not TG
1320 or if subtype is not specified.
1321 :raises NotImplementedError: If TG is not supported.
1323 intended_duration = float(intended_duration)
1324 time_start = time.monotonic()
1325 time_stop = time_start + intended_duration
1328 result = self._send_traffic_on_tg_with_ramp_up(
1329 duration=intended_duration,
1333 logger.debug(f"trial measurement result: {result!r}")
1334 # In PLRsearch, computation needs the specified time to complete.
1335 if self.sleep_till_duration:
1336 while (sleeptime := time_stop - time.monotonic()) > 0.0:
1337 time.sleep(sleeptime)
1340 def set_rate_provider_defaults(
1346 traffic_directions=2,
1347 transaction_duration=0.0,
1348 transaction_scale=0,
1349 transaction_type=u"packet",
1352 sleep_till_duration=False,
1355 ramp_up_duration=None,
1356 state_timeout=240.0,
1358 """Store values accessed by measure().
1360 :param frame_size: Frame size identifier or value [B].
1361 :param traffic_profile: Module name as a traffic profile identifier.
1362 See GPL/traffic_profiles/trex for implemented modules.
1363 :param ppta: Packets per transaction, aggregated over directions.
1364 Needed for udp_pps which does not have a good transaction counter,
1365 so we need to compute expected number of packets.
1367 :param resetter: Callable to reset DUT state for repeated trials.
1368 :param traffic_directions: Traffic from packet counting point of view
1369 is bi- (2) or uni- (1) directional.
1371 :param transaction_duration: Total expected time to close transaction.
1372 :param transaction_scale: Number of transactions to perform.
1373 0 (default) means unlimited.
1374 :param transaction_type: An identifier specifying which counters
1375 and formulas to use when computing attempted and failed
1376 transactions. Default: "packet".
1377 :param duration_limit: Zero or maximum limit for computed (or given)
1379 :param negative_loss: If false, negative loss is reported as zero loss.
1380 :param sleep_till_duration: If true and measurement returned faster,
1381 sleep until it matches duration. Needed for PLRsearch.
1382 :param use_latency: Whether to measure latency during the trial.
1384 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1385 :param ramp_up_duration: Duration of ramp-up trials [s].
1386 :param state_timeout: Time of life of DUT state [s].
1387 :type frame_size: str or int
1388 :type traffic_profile: str
1390 :type resetter: Optional[Callable[[], None]]
1391 :type traffic_directions: int
1392 :type transaction_duration: float
1393 :type transaction_scale: int
1394 :type transaction_type: str
1395 :type duration_limit: float
1396 :type negative_loss: bool
1397 :type sleep_till_duration: bool
1398 :type use_latency: bool
1399 :type ramp_up_rate: float
1400 :type ramp_up_duration: float
1401 :type state_timeout: float
1403 self.frame_size = frame_size
1404 self.traffic_profile = str(traffic_profile)
1405 self.resetter = resetter
1406 self.ppta = int(ppta)
1407 self.traffic_directions = int(traffic_directions)
1408 self.transaction_duration = float(transaction_duration)
1409 self.transaction_scale = int(transaction_scale)
1410 self.transaction_type = str(transaction_type)
1411 self.duration_limit = float(duration_limit)
1412 self.negative_loss = bool(negative_loss)
1413 self.sleep_till_duration = bool(sleep_till_duration)
1414 self.use_latency = bool(use_latency)
1415 self.ramp_up_rate = float(ramp_up_rate)
1416 self.ramp_up_duration = float(ramp_up_duration)
1417 self.state_timeout = float(state_timeout)
1420 class OptimizedSearch:
1421 """Class to be imported as Robot Library, containing search keywords.
1423 Aside of setting up measurer and forwarding arguments,
1424 the main business is to translate min/max rate from unidir to aggregated.
1428 def perform_mlr_search(
1429 frame_size: Union[int, str],
1430 traffic_profile: str,
1433 loss_ratio: float = 0.005,
1434 relative_width: float = 0.005,
1435 initial_trial_duration: float = 1.0,
1436 final_trial_duration: float = 1.0,
1437 duration_sum: float = 21.0,
1438 expansion_coefficient: int = 2,
1439 preceding_targets: int = 2,
1440 search_duration_max: float = 1200.0,
1442 resetter: Optional[Callable[[], None]] = None,
1443 traffic_directions: int = 2,
1444 transaction_duration: float = 0.0,
1445 transaction_scale: int = 0,
1446 transaction_type: str = "packet",
1447 use_latency: bool = False,
1448 ramp_up_rate: float = 0.0,
1449 ramp_up_duration: float = 0.0,
1450 state_timeout: float = 240.0,
1451 ) -> List[GoalResult]:
1452 """Setup initialized TG, perform optimized search, return intervals.
1454 If transaction_scale is nonzero, all init and non-init trial durations
1455 are set to 1.0 (as they do not affect the real trial duration)
1456 and zero intermediate phases are used.
1457 This way no re-measurement happens.
1458 Warmup has to be handled via resetter or ramp-up mechanisms.
1460 :param frame_size: Frame size identifier or value [B].
1461 :param traffic_profile: Module name as a traffic profile identifier.
1462 See GPL/traffic_profiles/trex for implemented modules.
1463 :param min_load: Minimal load in transactions per second.
1464 :param max_load: Maximal load in transactions per second.
1465 :param loss_ratio: Ratio of packets lost, for PDR [1].
1466 :param relative_width: Final lower bound intended load
1467 cannot be more distant that this multiple of upper bound [1].
1468 :param initial_trial_duration: Trial duration for the initial phase
1469 and also for the first intermediate phase [s].
1470 :param final_trial_duration: Trial duration for the final phase [s].
1471 :param duration_sum: Max sum of duration for deciding [s].
1472 :param expansion_coefficient: In external search multiply width by this.
1473 :param preceding_targets: Number of intermediate phases
1474 to perform before the final phase [1].
1475 :param search_duration_max: The search will fail itself
1476 when not finished before this overall time [s].
1477 :param ppta: Packets per transaction, aggregated over directions.
1478 Needed for udp_pps which does not have a good transaction counter,
1479 so we need to compute expected number of packets.
1481 :param resetter: Callable to reset DUT state for repeated trials.
1482 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1484 :param transaction_duration: Total expected time to close transaction.
1485 :param transaction_scale: Number of transactions to perform.
1486 0 (default) means unlimited.
1487 :param transaction_type: An identifier specifying which counters
1488 and formulas to use when computing attempted and failed
1489 transactions. Default: "packet".
1490 :param use_latency: Whether to measure latency during the trial.
1492 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1493 :param ramp_up_duration: Duration of ramp-up trials [s].
1494 :param state_timeout: Time of life of DUT state [s].
1495 :type frame_size: str or int
1496 :type traffic_profile: str
1497 :type min_load: float
1498 :type max_load: float
1499 :type loss_ratio: float
1500 :type relative_width: float
1501 :type initial_trial_duration: float
1502 :type final_trial_duration: float
1503 :type duration_sum: float
1504 :type expansion_coefficient: int
1505 :type preceding_targets: int
1506 :type search_duration_max: float
1508 :type resetter: Optional[Callable[[], None]]
1509 :type traffic_directions: int
1510 :type transaction_duration: float
1511 :type transaction_scale: int
1512 :type transaction_type: str
1513 :type use_latency: bool
1514 :type ramp_up_rate: float
1515 :type ramp_up_duration: float
1516 :type state_timeout: float
1517 :returns: Goal result (based on unidirectional tps) for each goal.
1518 The result contains both the offered load for stat trial,
1519 and the conditional throughput for display.
1520 :rtype: List[GoalResult]
1521 :raises RuntimeError: If search duration exceeds search_duration_max
1522 or if min load becomes an upper bound for any search goal.
1524 # we need instance of TrafficGenerator instantiated by Robot Framework
1525 # to be able to use trex_stl-*()
1526 tg_instance = BuiltIn().get_library_instance(
1527 u"resources.libraries.python.TrafficGenerator"
1529 # Overrides for fixed transaction amount.
1530 if transaction_scale:
1531 initial_trial_duration = 1.0
1532 final_trial_duration = 1.0
1533 preceding_targets = 1
1534 # TODO: Move the value to Constants.py?
1535 search_duration_max += transaction_scale * 3e-4
1536 tg_instance.set_rate_provider_defaults(
1537 frame_size=frame_size,
1538 traffic_profile=traffic_profile,
1539 sleep_till_duration=False,
1542 traffic_directions=traffic_directions,
1543 transaction_duration=transaction_duration,
1544 transaction_scale=transaction_scale,
1545 transaction_type=transaction_type,
1546 use_latency=use_latency,
1547 ramp_up_rate=ramp_up_rate,
1548 ramp_up_duration=ramp_up_duration,
1549 state_timeout=state_timeout,
1552 loss_ratios = [0.0, loss_ratio]
1555 # Happens in reconf tests.
1560 loss_ratio=loss_ratio,
1561 exceed_ratio=exceed_ratio,
1562 relative_width=relative_width,
1563 initial_trial_duration=initial_trial_duration,
1564 final_trial_duration=final_trial_duration,
1565 duration_sum=duration_sum,
1566 preceding_targets=preceding_targets,
1567 expansion_coefficient=expansion_coefficient,
1570 for loss_ratio in loss_ratios
1573 config.goals = goals
1574 config.min_load = min_load
1575 config.max_load = max_load
1576 config.search_duration_max = search_duration_max
1577 config.warmup_duration = 1.0
1578 algorithm = MultipleLossRatioSearch(config)
1579 results = algorithm.search(measurer=tg_instance, debug=logger.debug)
1580 return [results[goal] for goal in goals]
1583 def perform_soak_search(
1594 trace_enabled=False,
1595 traffic_directions=2,
1596 transaction_duration=0.0,
1597 transaction_scale=0,
1598 transaction_type=u"packet",
1601 ramp_up_duration=None,
1602 state_timeout=240.0,
1604 """Setup initialized TG, perform soak search, return avg and stdev.
1606 :param frame_size: Frame size identifier or value [B].
1607 :param traffic_profile: Module name as a traffic profile identifier.
1608 See GPL/traffic_profiles/trex for implemented modules.
1609 :param min_load: Minimal load in transactions per second.
1610 :param max_load: Maximal load in transactions per second.
1611 :param plr_target: Ratio of packets lost to achieve [1].
1612 :param tdpt: Trial duration per trial.
1613 The algorithm linearly increases trial duration with trial number,
1614 this is the increment between succesive trials, in seconds.
1615 :param initial_count: Offset to apply before the first trial.
1616 For example initial_count=50 makes first trial to be 51*tdpt long.
1617 This is needed because initial "search" phase of integrator
1618 takes significant time even without any trial results.
1619 :param timeout: The search will stop after this overall time [s].
1620 :param ppta: Packets per transaction, aggregated over directions.
1621 Needed for udp_pps which does not have a good transaction counter,
1622 so we need to compute expected number of packets.
1624 :param resetter: Callable to reset DUT state for repeated trials.
1625 :param trace_enabled: True if trace enabled else False.
1626 This is very verbose tracing on numeric computations,
1627 do not use in production.
1629 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1631 :param transaction_duration: Total expected time to close transaction.
1632 :param transaction_scale: Number of transactions to perform.
1633 0 (default) means unlimited.
1634 :param transaction_type: An identifier specifying which counters
1635 and formulas to use when computing attempted and failed
1636 transactions. Default: "packet".
1637 :param use_latency: Whether to measure latency during the trial.
1639 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1640 :param ramp_up_duration: Duration of ramp-up trials [s].
1641 :param state_timeout: Time of life of DUT state [s].
1642 :type frame_size: str or int
1643 :type traffic_profile: str
1644 :type min_load: float
1645 :type max_load: float
1646 :type plr_target: float
1647 :type initial_count: int
1648 :type timeout: float
1650 :type resetter: Optional[Callable[[], None]]
1651 :type trace_enabled: bool
1652 :type traffic_directions: int
1653 :type transaction_duration: float
1654 :type transaction_scale: int
1655 :type transaction_type: str
1656 :type use_latency: bool
1657 :type ramp_up_rate: float
1658 :type ramp_up_duration: float
1659 :type state_timeout: float
1660 :returns: Average and stdev of estimated aggregated rate giving PLR.
1661 :rtype: 2-tuple of float
1663 tg_instance = BuiltIn().get_library_instance(
1664 u"resources.libraries.python.TrafficGenerator"
1666 # Overrides for fixed transaction amount.
1667 if transaction_scale:
1669 tg_instance.set_rate_provider_defaults(
1670 frame_size=frame_size,
1671 traffic_profile=traffic_profile,
1672 negative_loss=False,
1673 sleep_till_duration=True,
1676 traffic_directions=traffic_directions,
1677 transaction_duration=transaction_duration,
1678 transaction_scale=transaction_scale,
1679 transaction_type=transaction_type,
1680 use_latency=use_latency,
1681 ramp_up_rate=ramp_up_rate,
1682 ramp_up_duration=ramp_up_duration,
1683 state_timeout=state_timeout,
1685 algorithm = PLRsearch(
1686 measurer=tg_instance,
1687 trial_duration_per_trial=tdpt,
1688 packet_loss_ratio_target=plr_target,
1689 trial_number_offset=initial_count,
1691 trace_enabled=trace_enabled,
1693 result = algorithm.search(