1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Performance testing traffic generator library."""
18 from robot.api import logger
19 from robot.libraries.BuiltIn import BuiltIn
21 from .Constants import Constants
22 from .CpuUtils import CpuUtils
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
34 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
37 def check_subtype(node):
38 """Return supported subtype of given node, or raise an exception.
40 Currently only one subtype is supported,
41 but we want our code to be ready for other ones.
43 :param node: Topology node to check. Can be None.
44 :type node: dict or NoneType
45 :returns: Subtype detected.
47 :raises RuntimeError: If node is not supported, message explains how.
49 if node.get(u"type") is None:
50 msg = u"Node type is not defined"
51 elif node[u"type"] != NodeType.TG:
52 msg = f"Node type is {node[u'type']!r}, not a TG"
53 elif node.get(u"subtype") is None:
54 msg = u"TG subtype is not defined"
55 elif node[u"subtype"] != NodeSubTypeTG.TREX:
56 msg = f"TG subtype {node[u'subtype']!r} is not supported"
58 return NodeSubTypeTG.TREX
59 raise RuntimeError(msg)
62 class TGDropRateSearchImpl(DropRateSearch):
63 """Drop Rate Search implementation."""
66 # super(TGDropRateSearchImpl, self).__init__()
69 self, rate, frame_size, loss_acceptance, loss_acceptance_type,
71 """Runs the traffic and evaluate the measured results.
73 :param rate: Offered traffic load.
74 :param frame_size: Size of frame.
75 :param loss_acceptance: Permitted drop ratio or frames count.
76 :param loss_acceptance_type: Type of permitted loss.
77 :param traffic_profile: Module name as a traffic profile identifier.
78 See GPL/traffic_profiles/trex for implemented modules.
81 :type loss_acceptance: float
82 :type loss_acceptance_type: LossAcceptanceType
83 :type traffic_profile: str
84 :returns: Drop threshold exceeded? (True/False)
86 :raises NotImplementedError: If TG is not supported.
87 :raises RuntimeError: If TG is not specified.
89 # we need instance of TrafficGenerator instantiated by Robot Framework
90 # to be able to use trex_stl-*()
91 tg_instance = BuiltIn().get_library_instance(
92 u"resources.libraries.python.TrafficGenerator"
94 subtype = check_subtype(tg_instance.node)
95 if subtype == NodeSubTypeTG.TREX:
96 unit_rate = str(rate) + self.get_rate_type_str()
97 tg_instance.trex_stl_start_remote_exec(
98 self.get_duration(), unit_rate, frame_size, traffic_profile
100 loss = tg_instance.get_loss()
101 sent = tg_instance.get_sent()
102 if self.loss_acceptance_type_is_percentage():
103 loss = (float(loss) / float(sent)) * 100
105 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
107 return float(loss) <= float(loss_acceptance)
110 def get_latency(self):
111 """Returns min/avg/max latency.
113 :returns: Latency stats.
116 tg_instance = BuiltIn().get_library_instance(
117 u"resources.libraries.python.TrafficGenerator"
119 return tg_instance.get_latency_int()
123 """Defines mode of T-Rex traffic generator."""
124 # Advanced stateful mode
130 # TODO: Pylint says too-many-instance-attributes.
131 class TrafficGenerator(AbstractMeasurer):
132 """Traffic Generator."""
134 # TODO: Remove "trex" from lines which could work with other TGs.
136 # Use one instance of TrafficGenerator for all tests in test suite
137 ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
140 # TODO: Separate into few dataclasses/dicts.
141 # Pylint dislikes large unstructured state, and it is right.
144 # TG interface order mapping
145 self._ifaces_reordered = False
146 # Result holding fields, to be removed.
151 self._received = None
152 self._approximated_rate = None
153 self._approximated_duration = None
155 # Measurement input fields, needed for async stop result.
156 self._start_time = None
157 self._stop_time = None
159 self._target_duration = None
160 self._duration = None
161 # Other input parameters, not knowable from measure() signature.
162 self.frame_size = None
163 self.traffic_profile = None
164 self.traffic_directions = None
165 self.negative_loss = None
166 self.use_latency = None
169 self.transaction_scale = None
170 self.transaction_duration = None
171 self.sleep_till_duration = None
172 self.transaction_type = None
173 self.duration_limit = None
174 # Transient data needed for async measurements.
175 self._xstats = (None, None)
176 # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
182 :returns: Traffic generator node.
188 """Return number of lost packets.
190 :returns: Number of lost packets.
196 """Return number of sent packets.
198 :returns: Number of sent packets.
203 def get_received(self):
204 """Return number of received packets.
206 :returns: Number of received packets.
209 return self._received
211 def get_latency_int(self):
212 """Return rounded min/avg/max latency.
214 :returns: Latency stats.
219 def get_approximated_rate(self):
220 """Return approximated rate computed as ratio of transmitted packets
221 over duration of trial.
223 :returns: Approximated rate.
226 return self._approximated_rate
228 def get_l7_data(self):
231 :returns: Number of received packets.
236 def check_mode(self, expected_mode):
239 :param expected_mode: Expected traffic generator mode.
240 :type expected_mode: object
241 :raises RuntimeError: In case of unexpected TG mode.
243 if self._mode == expected_mode:
246 f"{self._node[u'subtype']} not running in {expected_mode} mode!"
249 # TODO: pylint says disable=too-many-locals.
250 def initialize_traffic_generator(
251 self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
252 tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
253 tg_if2_dst_mac=None):
254 """TG initialization.
256 TODO: Document why do we need (and how do we use) _ifaces_reordered.
258 :param tg_node: Traffic generator node.
259 :param tg_if1: TG - name of first interface.
260 :param tg_if2: TG - name of second interface.
261 :param tg_if1_adj_node: TG if1 adjecent node.
262 :param tg_if1_adj_if: TG if1 adjecent interface.
263 :param tg_if2_adj_node: TG if2 adjecent node.
264 :param tg_if2_adj_if: TG if2 adjecent interface.
265 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
266 :param tg_if1_dst_mac: Interface 1 destination MAC address.
267 :param tg_if2_dst_mac: Interface 2 destination MAC address.
271 :type tg_if1_adj_node: dict
272 :type tg_if1_adj_if: str
273 :type tg_if2_adj_node: dict
274 :type tg_if2_adj_if: str
276 :type tg_if1_dst_mac: str
277 :type tg_if2_dst_mac: str
279 :raises RuntimeError: In case of issue during initialization.
281 subtype = check_subtype(tg_node)
282 if subtype == NodeSubTypeTG.TREX:
284 self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
287 if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
288 if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
289 if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
290 if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
292 if osi_layer == u"L2":
293 if1[u"adj_addr"] = if2[u"addr"]
294 if2[u"adj_addr"] = if1[u"addr"]
295 elif osi_layer in (u"L3", u"L7"):
296 if1[u"adj_addr"] = Topology().get_interface_mac(
297 tg_if1_adj_node, tg_if1_adj_if
299 if2[u"adj_addr"] = Topology().get_interface_mac(
300 tg_if2_adj_node, tg_if2_adj_if
303 raise ValueError(u"Unknown OSI layer!")
305 # in case of switched environment we can override MAC addresses
306 if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
307 if1[u"adj_addr"] = tg_if1_dst_mac
308 if2[u"adj_addr"] = tg_if2_dst_mac
310 if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
312 self._ifaces_reordered = True
314 master_thread_id, latency_thread_id, socket, threads = \
315 CpuUtils.get_affinity_trex(
316 self._node, tg_if1, tg_if2,
317 tg_dtc=Constants.TREX_CORE_COUNT)
319 if osi_layer in (u"L2", u"L3", u"L7"):
322 f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
324 f" c: {len(threads)}\n"
325 f" limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
326 f" interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
328 f" - dest_mac: \'{if1[u'adj_addr']}\'\n"
329 f" src_mac: \'{if1[u'addr']}\'\n"
330 f" - dest_mac: \'{if2[u'adj_addr']}\'\n"
331 f" src_mac: \'{if2[u'addr']}\'\n"
333 f" master_thread_id: {master_thread_id}\n"
334 f" latency_thread_id: {latency_thread_id}\n"
336 f" - socket: {socket}\n"
337 f" threads: {threads}\n"
339 sudo=True, message=u"T-Rex config generation!"
342 raise ValueError(u"Unknown OSI layer!")
344 TrafficGenerator.startup_trex(
345 self._node, osi_layer, subtype=subtype
349 def startup_trex(tg_node, osi_layer, subtype=None):
350 """Startup sequence for the TRex traffic generator.
352 :param tg_node: Traffic generator node.
353 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
354 :param subtype: Traffic generator sub-type.
357 :type subtype: NodeSubTypeTG
358 :raises RuntimeError: If T-Rex startup failed.
359 :raises ValueError: If OSI layer is not supported.
362 subtype = check_subtype(tg_node)
363 if subtype == NodeSubTypeTG.TREX:
364 for _ in range(0, 3):
365 # Kill TRex only if it is already running.
366 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
368 tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
373 for port in tg_node[u"interfaces"].values():
374 if u'Mellanox' not in port.get(u'model'):
375 ports += f" {port.get(u'pci_address')}"
377 cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
378 f"./dpdk_nic_bind.py -u {ports} || true\""
380 tg_node, cmd, sudo=True,
381 message=u"Unbind PCI ports from driver failed!"
385 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
386 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
388 trex_cmd.add(u"--prefix $(hostname)")
389 trex_cmd.add(u"--hdrh")
390 trex_cmd.add(u"--no-scapy-server")
391 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
392 # OptionString does not create double space if extra is empty.
393 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
394 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
395 cmd = f"sh -c \"{inner_command}\" > /dev/null"
397 exec_cmd_no_error(tg_node, cmd, sudo=True)
399 cmd = u"sh -c \"cat /tmp/trex.log\""
401 tg_node, cmd, sudo=True,
402 message=u"Get TRex logs failed!"
404 raise RuntimeError(u"Start TRex failed!")
406 # Test T-Rex API responsiveness.
408 cmd += f" {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
409 if osi_layer in (u"L2", u"L3"):
410 cmd += f"trex_stl_assert.py"
411 elif osi_layer == u"L7":
412 cmd += f"trex_astf_assert.py"
414 raise ValueError(u"Unknown OSI layer!")
417 tg_node, cmd, sudo=True,
418 message=u"T-Rex API is not responding!", retries=20
423 # After max retries TRex is still not responding to API critical
425 exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
426 raise RuntimeError(u"Start T-Rex failed after multiple retries!")
429 def is_trex_running(node):
430 """Check if T-Rex is running using pidof.
432 :param node: Traffic generator node.
434 :returns: True if T-Rex is running otherwise False.
437 ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
438 return bool(int(ret) == 0)
441 def teardown_traffic_generator(node):
444 :param node: Traffic generator node.
447 :raises RuntimeError: If node type is not a TG,
448 or if T-Rex teardown fails.
450 subtype = check_subtype(node)
451 if subtype == NodeSubTypeTG.TREX:
455 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
457 message=u"T-Rex kill failed!"
460 def trex_astf_stop_remote_exec(self, node):
461 """Execute T-Rex ASTF script on remote node over ssh to stop running
464 Internal state is updated with measurement results.
466 :param node: T-Rex generator node.
468 :raises RuntimeError: If stop traffic script fails.
470 command_line = OptionString().add(u"python3")
471 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
472 command_line.add(f"'{dirname}/trex_astf_stop.py'")
473 command_line.change_prefix(u"--")
474 for index, value in enumerate(self._xstats):
475 if value is not None:
476 value = value.replace(u"'", u"\"")
477 command_line.add_equals(f"xstat{index}", f"'{value}'")
478 stdout, _ = exec_cmd_no_error(
480 message=u"T-Rex ASTF runtime error!"
482 self._parse_traffic_results(stdout)
484 def trex_stl_stop_remote_exec(self, node):
485 """Execute T-Rex STL script on remote node over ssh to stop running
488 Internal state is updated with measurement results.
490 :param node: T-Rex generator node.
492 :raises RuntimeError: If stop traffic script fails.
494 command_line = OptionString().add(u"python3")
495 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
496 command_line.add(f"'{dirname}/trex_stl_stop.py'")
497 command_line.change_prefix(u"--")
498 for index, value in enumerate(self._xstats):
499 if value is not None:
500 value = value.replace(u"'", u"\"")
501 command_line.add_equals(f"xstat{index}", f"'{value}'")
502 stdout, _ = exec_cmd_no_error(
504 message=u"T-Rex STL runtime error!"
506 self._parse_traffic_results(stdout)
508 def stop_traffic_on_tg(self):
509 """Stop all traffic on TG.
511 :returns: Structure containing the result of the measurement.
512 :rtype: ReceiveRateMeasurement
513 :raises ValueError: If TG traffic profile is not supported.
515 subtype = check_subtype(self._node)
516 if subtype != NodeSubTypeTG.TREX:
517 raise ValueError(f"Unsupported TG subtype: {subtype!r}")
518 if u"trex-astf" in self.traffic_profile:
519 self.trex_astf_stop_remote_exec(self._node)
520 elif u"trex-stl" in self.traffic_profile:
521 self.trex_stl_stop_remote_exec(self._node)
523 raise ValueError(u"Unsupported T-Rex traffic profile!")
524 self._stop_time = time.monotonic()
526 return self.get_measurement_result()
528 def trex_astf_start_remote_exec(
529 self, duration, multiplier, async_call=False):
530 """Execute T-Rex ASTF script on remote node over ssh to start running
533 In sync mode, measurement results are stored internally.
534 In async mode, initial data including xstats are stored internally.
536 This method contains the logic to compute duration as maximum time
537 if transaction_scale is nonzero.
538 The transaction_scale argument defines (limits) how many transactions
539 will be started in total. As that amount of transaction can take
540 considerable time (sometimes due to explicit delays in the profile),
541 the real time a trial needs to finish is computed here. For now,
542 in that case the duration argument is ignored, assuming it comes
543 from ASTF-unaware search algorithm. The overall time a single
544 transaction needs is given in parameter transaction_duration,
545 it includes both explicit delays and implicit time it takes
546 to transfer data (or whatever the transaction does).
548 Currently it is observed TRex does not start the ASTF traffic
549 immediately, an ad-hoc constant is added to the computed duration
550 to compensate for that.
552 If transaction_scale is zero, duration is not recomputed.
553 It is assumed the subsequent result parsing gets the real duration
554 if the traffic stops sooner for any reason.
556 Currently, it is assumed traffic profile defines a single transaction.
557 To avoid heavy logic here, the input rate is expected to be in
558 transactions per second, as that directly translates to TRex multiplier,
559 (assuming the profile does not override the default cps value of one).
561 :param duration: Time expressed in seconds for how long to send traffic.
562 :param multiplier: Traffic rate in transactions per second.
563 :param async_call: If enabled then don't wait for all incoming traffic.
564 :type duration: float
565 :type multiplier: int
566 :type async_call: bool
567 :raises RuntimeError: In case of T-Rex driver issue.
569 self.check_mode(TrexMode.ASTF)
570 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
571 if not isinstance(duration, (float, int)):
572 duration = float(duration)
575 computed_duration = duration
577 if self.transaction_scale:
578 computed_duration = self.transaction_scale / multiplier
579 # Log the computed duration,
580 # so we can compare with what telemetry suggests
581 # the real duration was.
582 logger.debug(f"Expected duration {computed_duration}")
583 computed_duration += 0.1115
585 if self.duration_limit:
586 computed_duration = min(computed_duration, self.duration_limit)
588 command_line = OptionString().add(u"python3")
589 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
590 command_line.add(f"'{dirname}/trex_astf_profile.py'")
591 command_line.change_prefix(u"--")
592 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
593 command_line.add_with_value(
594 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
596 command_line.add_with_value(u"duration", f"{computed_duration!r}")
597 command_line.add_with_value(u"frame_size", self.frame_size)
598 command_line.add_with_value(u"multiplier", multiplier)
599 command_line.add_with_value(u"port_0", p_0)
600 command_line.add_with_value(u"port_1", p_1)
601 command_line.add_with_value(
602 u"traffic_directions", self.traffic_directions
604 command_line.add_if(u"async_start", async_call)
605 command_line.add_if(u"latency", self.use_latency)
606 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
608 self._start_time = time.monotonic()
609 self._rate = multiplier
610 stdout, _ = exec_cmd_no_error(
611 self._node, command_line, timeout=computed_duration + 10.0,
612 message=u"T-Rex ASTF runtime error!"
617 self._target_duration = None
618 self._duration = None
619 self._received = None
623 xstats = [None, None]
624 self._l7_data = dict()
625 self._l7_data[u"client"] = dict()
626 self._l7_data[u"client"][u"active_flows"] = None
627 self._l7_data[u"client"][u"established_flows"] = None
628 self._l7_data[u"client"][u"traffic_duration"] = None
629 self._l7_data[u"server"] = dict()
630 self._l7_data[u"server"][u"active_flows"] = None
631 self._l7_data[u"server"][u"established_flows"] = None
632 self._l7_data[u"server"][u"traffic_duration"] = None
633 if u"udp" in self.traffic_profile:
634 self._l7_data[u"client"][u"udp"] = dict()
635 self._l7_data[u"client"][u"udp"][u"connects"] = None
636 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
637 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
638 self._l7_data[u"server"][u"udp"] = dict()
639 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
640 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
641 elif u"tcp" in self.traffic_profile:
642 self._l7_data[u"client"][u"tcp"] = dict()
643 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
644 self._l7_data[u"client"][u"tcp"][u"connects"] = None
645 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
646 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
647 self._l7_data[u"server"][u"tcp"] = dict()
648 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
649 self._l7_data[u"server"][u"tcp"][u"connects"] = None
650 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
652 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
654 for line in stdout.splitlines():
655 if f"Xstats snapshot {index}: " in line:
656 xstats[index] = line[19:]
660 self._xstats = tuple(xstats)
662 self._target_duration = duration
663 self._duration = computed_duration
664 self._parse_traffic_results(stdout)
666 def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
667 """Execute T-Rex STL script on remote node over ssh to start running
670 In sync mode, measurement results are stored internally.
671 In async mode, initial data including xstats are stored internally.
673 Mode-unaware code (e.g. in search algorithms) works with transactions.
674 To keep the logic simple, multiplier is set to that value.
675 As bidirectional traffic profiles send packets in both directions,
676 they are treated as transactions with two packets (one per direction).
678 :param duration: Time expressed in seconds for how long to send traffic.
679 :param rate: Traffic rate in transactions per second.
680 :param async_call: If enabled then don't wait for all incoming traffic.
681 :type duration: float
683 :type async_call: bool
684 :raises RuntimeError: In case of T-Rex driver issue.
686 self.check_mode(TrexMode.STL)
687 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
688 if not isinstance(duration, (float, int)):
689 duration = float(duration)
690 if self.duration_limit:
691 duration = min(duration, self.duration_limit)
693 command_line = OptionString().add(u"python3")
694 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
695 command_line.add(f"'{dirname}/trex_stl_profile.py'")
696 command_line.change_prefix(u"--")
697 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
698 command_line.add_with_value(
699 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
701 command_line.add_with_value(u"duration", f"{duration!r}")
702 command_line.add_with_value(u"frame_size", self.frame_size)
703 command_line.add_with_value(u"rate", f"{rate!r}")
704 command_line.add_with_value(u"port_0", p_0)
705 command_line.add_with_value(u"port_1", p_1)
706 command_line.add_with_value(
707 u"traffic_directions", self.traffic_directions
709 command_line.add_if(u"async_start", async_call)
710 command_line.add_if(u"latency", self.use_latency)
711 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
713 # TODO: This is ugly. Handle parsing better.
714 self._start_time = time.monotonic()
715 self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
716 stdout, _ = exec_cmd_no_error(
717 self._node, command_line, timeout=int(duration) + 60,
718 message=u"T-Rex STL runtime error"
723 self._target_duration = None
724 self._duration = None
725 self._received = None
730 xstats = [None, None]
732 for line in stdout.splitlines():
733 if f"Xstats snapshot {index}: " in line:
734 xstats[index] = line[19:]
738 self._xstats = tuple(xstats)
740 self._target_duration = duration
741 self._duration = duration
742 self._parse_traffic_results(stdout)
744 def send_traffic_on_tg(
752 traffic_directions=2,
753 transaction_duration=0.0,
755 transaction_type=u"packet",
759 """Send traffic from all configured interfaces on TG.
761 In async mode, xstats is stored internally,
762 to enable getting correct result when stopping the traffic.
763 In both modes, stdout is returned,
764 but _parse_traffic_results only works in sync output.
766 Note that traffic generator uses DPDK driver which might
767 reorder port numbers based on wiring and PCI numbering.
768 This method handles that, so argument values are invariant,
769 but you can see swapped valued in debug logs.
771 When transaction_scale is specified, the duration value is ignored
772 and the needed time is computed. For cases where this results in
773 to too long measurement (e.g. teardown trial with small rate),
774 duration_limit is applied (of non-zero), so the trial is stopped sooner.
776 Bidirectional STL profiles are treated as transactions with two packets.
778 :param duration: Duration of test traffic generation in seconds.
779 :param rate: Traffic rate in transactions per second.
780 :param frame_size: Frame size (L2) in Bytes.
781 :param traffic_profile: Module name as a traffic profile identifier.
782 See GPL/traffic_profiles/trex for implemented modules.
783 :param async_call: Async mode.
784 :param ppta: Packets per transaction, aggregated over directions.
785 Needed for udp_pps which does not have a good transaction counter,
786 so we need to compute expected number of packets.
788 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
790 :param transaction_duration: Total expected time to close transaction.
791 :param transaction_scale: Number of transactions to perform.
792 0 (default) means unlimited.
793 :param transaction_type: An identifier specifying which counters
794 and formulas to use when computing attempted and failed
795 transactions. Default: "packet".
796 :param duration_limit: Zero or maximum limit for computed (or given)
798 :param use_latency: Whether to measure latency during the trial.
800 :type duration: float
802 :type frame_size: str
803 :type traffic_profile: str
804 :type async_call: bool
806 :type traffic_directions: int
807 :type transaction_duration: float
808 :type transaction_scale: int
809 :type transaction_type: str
810 :type duration_limit: float
811 :type use_latency: bool
812 :returns: TG results.
814 :raises ValueError: If TG traffic profile is not supported.
816 self.set_rate_provider_defaults(
817 frame_size=frame_size,
818 traffic_profile=traffic_profile,
820 traffic_directions=traffic_directions,
821 transaction_duration=transaction_duration,
822 transaction_scale=transaction_scale,
823 transaction_type=transaction_type,
824 duration_limit=duration_limit,
825 use_latency=use_latency,
827 self._send_traffic_on_tg_internal(duration, rate, async_call)
829 def _send_traffic_on_tg_internal(self, duration, rate, async_call=False):
830 """Send traffic from all configured interfaces on TG.
832 This is an internal function, it assumes set_rate_provider_defaults
833 has been called to remember most values.
834 The reason why need to remember various values is that
835 the traffic can be asynchronous, and parsing needs those values.
836 The reason why this is is a separate function from the one
837 which calls set_rate_provider_defaults is that some search algorithms
838 need to specify their own values, and we do not want the measure call
839 to overwrite them with defaults.
841 :param duration: Duration of test traffic generation in seconds.
842 :param rate: Traffic rate in transactions per second.
843 :param async_call: Async mode.
844 :type duration: float
846 :type async_call: bool
847 :returns: TG results.
849 :raises ValueError: If TG traffic profile is not supported.
851 subtype = check_subtype(self._node)
852 if subtype == NodeSubTypeTG.TREX:
853 if u"trex-astf" in self.traffic_profile:
854 self.trex_astf_start_remote_exec(
855 duration, float(rate), async_call
857 elif u"trex-stl" in self.traffic_profile:
858 unit_rate_str = str(rate) + u"pps"
859 # TODO: Suport transaction_scale et al?
860 self.trex_stl_start_remote_exec(
861 duration, unit_rate_str, async_call
864 raise ValueError(u"Unsupported T-Rex traffic profile!")
868 def no_traffic_loss_occurred(self):
869 """Fail if loss occurred in traffic run.
872 :raises Exception: If loss occured.
874 if self._loss is None:
875 raise RuntimeError(u"The traffic generation has not been issued")
876 if self._loss != u"0":
877 raise RuntimeError(f"Traffic loss occurred: {self._loss}")
879 def fail_if_no_traffic_forwarded(self):
880 """Fail if no traffic forwarded.
882 TODO: Check number of passed transactions instead.
885 :raises Exception: If no traffic forwarded.
887 if self._received is None:
888 raise RuntimeError(u"The traffic generation has not been issued")
889 if self._received == u"0":
890 raise RuntimeError(u"No traffic forwarded")
892 def partial_traffic_loss_accepted(
893 self, loss_acceptance, loss_acceptance_type):
894 """Fail if loss is higher then accepted in traffic run.
896 :param loss_acceptance: Permitted drop ratio or frames count.
897 :param loss_acceptance_type: Type of permitted loss.
898 :type loss_acceptance: float
899 :type loss_acceptance_type: LossAcceptanceType
901 :raises Exception: If loss is above acceptance criteria.
903 if self._loss is None:
904 raise Exception(u"The traffic generation has not been issued")
906 if loss_acceptance_type == u"percentage":
907 loss = (float(self._loss) / float(self._sent)) * 100
908 elif loss_acceptance_type == u"frames":
909 loss = float(self._loss)
911 raise Exception(u"Loss acceptance type not supported")
913 if loss > float(loss_acceptance):
915 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
918 def _parse_traffic_results(self, stdout):
919 """Parse stdout of scripts into fields of self.
921 Block of code to reuse, by sync start, or stop after async.
923 :param stdout: Text containing the standard output.
926 subtype = check_subtype(self._node)
927 if subtype == NodeSubTypeTG.TREX:
928 # Last line from console output
929 line = stdout.splitlines()[-1]
930 results = line.split(u";")
931 if results[-1] in (u" ", u""):
933 self._result = dict()
934 for result in results:
935 key, value = result.split(u"=", maxsplit=1)
936 self._result[key.strip()] = value
937 logger.info(f"TrafficGen results:\n{self._result}")
938 self._received = int(self._result.get(u"total_received"), 0)
939 self._sent = int(self._result.get(u"total_sent", 0))
940 self._loss = int(self._result.get(u"frame_loss", 0))
941 self._approximated_duration = \
942 self._result.get(u"approximated_duration", 0.0)
943 if u"manual" not in str(self._approximated_duration):
944 self._approximated_duration = float(self._approximated_duration)
945 self._latency = list()
946 self._latency.append(self._result.get(u"latency_stream_0(usec)"))
947 self._latency.append(self._result.get(u"latency_stream_1(usec)"))
948 if self._mode == TrexMode.ASTF:
949 self._l7_data = dict()
950 self._l7_data[u"client"] = dict()
951 self._l7_data[u"client"][u"sent"] = \
952 int(self._result.get(u"client_sent", 0))
953 self._l7_data[u"client"][u"received"] = \
954 int(self._result.get(u"client_received", 0))
955 self._l7_data[u"client"][u"active_flows"] = \
956 int(self._result.get(u"client_active_flows", 0))
957 self._l7_data[u"client"][u"established_flows"] = \
958 int(self._result.get(u"client_established_flows", 0))
959 self._l7_data[u"client"][u"traffic_duration"] = \
960 float(self._result.get(u"client_traffic_duration", 0.0))
961 self._l7_data[u"client"][u"err_rx_throttled"] = \
962 int(self._result.get(u"client_err_rx_throttled", 0))
963 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
964 int(self._result.get(u"client_err_nf_throttled", 0))
965 self._l7_data[u"client"][u"err_flow_overflow"] = \
966 int(self._result.get(u"client_err_flow_overflow", 0))
967 self._l7_data[u"server"] = dict()
968 self._l7_data[u"server"][u"active_flows"] = \
969 int(self._result.get(u"server_active_flows", 0))
970 self._l7_data[u"server"][u"established_flows"] = \
971 int(self._result.get(u"server_established_flows", 0))
972 self._l7_data[u"server"][u"traffic_duration"] = \
973 float(self._result.get(u"server_traffic_duration", 0.0))
974 self._l7_data[u"server"][u"err_rx_throttled"] = \
975 int(self._result.get(u"client_err_rx_throttled", 0))
976 if u"udp" in self.traffic_profile:
977 self._l7_data[u"client"][u"udp"] = dict()
978 self._l7_data[u"client"][u"udp"][u"connects"] = \
979 int(self._result.get(u"client_udp_connects", 0))
980 self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
981 int(self._result.get(u"client_udp_closed", 0))
982 self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
983 int(self._result.get(u"client_udp_tx_bytes", 0))
984 self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
985 int(self._result.get(u"client_udp_rx_bytes", 0))
986 self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
987 int(self._result.get(u"client_udp_tx_packets", 0))
988 self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
989 int(self._result.get(u"client_udp_rx_packets", 0))
990 self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
991 int(self._result.get(u"client_udp_keep_drops", 0))
992 self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
993 int(self._result.get(u"client_err_cwf", 0))
994 self._l7_data[u"server"][u"udp"] = dict()
995 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
996 int(self._result.get(u"server_udp_accepts", 0))
997 self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
998 int(self._result.get(u"server_udp_closed", 0))
999 self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1000 int(self._result.get(u"server_udp_tx_bytes", 0))
1001 self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1002 int(self._result.get(u"server_udp_rx_bytes", 0))
1003 self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1004 int(self._result.get(u"server_udp_tx_packets", 0))
1005 self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1006 int(self._result.get(u"server_udp_rx_packets", 0))
1007 elif u"tcp" in self.traffic_profile:
1008 self._l7_data[u"client"][u"tcp"] = dict()
1009 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1010 int(self._result.get(u"client_tcp_connect_inits", 0))
1011 self._l7_data[u"client"][u"tcp"][u"connects"] = \
1012 int(self._result.get(u"client_tcp_connects", 0))
1013 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1014 int(self._result.get(u"client_tcp_closed", 0))
1015 self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1016 int(self._result.get(u"client_tcp_connattempt", 0))
1017 self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1018 int(self._result.get(u"client_tcp_tx_bytes", 0))
1019 self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1020 int(self._result.get(u"client_tcp_rx_bytes", 0))
1021 self._l7_data[u"server"][u"tcp"] = dict()
1022 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1023 int(self._result.get(u"server_tcp_accepts", 0))
1024 self._l7_data[u"server"][u"tcp"][u"connects"] = \
1025 int(self._result.get(u"server_tcp_connects", 0))
1026 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1027 int(self._result.get(u"server_tcp_closed", 0))
1028 self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1029 int(self._result.get(u"server_tcp_tx_bytes", 0))
1030 self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1031 int(self._result.get(u"server_tcp_rx_bytes", 0))
1033 def get_measurement_result(self):
1034 """Return the result of last measurement as ReceiveRateMeasurement.
1036 Separate function, as measurements can end either by time
1037 or by explicit call, this is the common block at the end.
1039 The target_tr field of ReceiveRateMeasurement is in
1040 transactions per second. Transmit count and loss count units
1041 depend on the transaction type. Usually they are in transactions
1042 per second, or aggregate packets per second.
1044 TODO: Fail on running or already reported measurement.
1046 :returns: Structure containing the result of the measurement.
1047 :rtype: ReceiveRateMeasurement
1050 # Client duration seems to include a setup period
1051 # where TRex does not send any packets yet.
1052 # Server duration does not include it.
1053 server_data = self._l7_data[u"server"]
1054 approximated_duration = float(server_data[u"traffic_duration"])
1055 except (KeyError, AttributeError, ValueError, TypeError):
1056 approximated_duration = None
1058 if not approximated_duration:
1059 approximated_duration = float(self._approximated_duration)
1060 except ValueError: # "manual"
1061 approximated_duration = None
1062 if not approximated_duration:
1063 if self._duration and self._duration > 0:
1064 # Known recomputed or target duration.
1065 approximated_duration = self._duration
1067 # It was an explicit stop.
1068 if not self._stop_time:
1069 raise RuntimeError(u"Unable to determine duration.")
1070 approximated_duration = self._stop_time - self._start_time
1071 target_duration = self._target_duration
1072 if not target_duration:
1073 target_duration = approximated_duration
1074 transmit_rate = self._rate
1075 if self.transaction_type == u"packet":
1076 partial_attempt_count = self._sent
1077 expected_attempt_count = self._sent
1078 fail_count = self._loss
1079 elif self.transaction_type == u"udp_cps":
1080 if not self.transaction_scale:
1081 raise RuntimeError(u"Add support for no-limit udp_cps.")
1082 partial_attempt_count = self._l7_data[u"client"][u"sent"]
1083 # We do not care whether TG is slow, it should have attempted all.
1084 expected_attempt_count = self.transaction_scale
1085 pass_count = self._l7_data[u"client"][u"received"]
1086 fail_count = expected_attempt_count - pass_count
1087 elif self.transaction_type == u"tcp_cps":
1088 if not self.transaction_scale:
1089 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1090 ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1091 partial_attempt_count = ctca
1092 # We do not care whether TG is slow, it should have attempted all.
1093 expected_attempt_count = self.transaction_scale
1094 # From TCP point of view, server/connects counts full connections,
1095 # but we are testing NAT session so client/connects counts that
1096 # (half connections from TCP point of view).
1097 pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1098 fail_count = expected_attempt_count - pass_count
1099 elif self.transaction_type == u"udp_pps":
1100 if not self.transaction_scale:
1101 raise RuntimeError(u"Add support for no-limit udp_pps.")
1102 partial_attempt_count = self._sent
1103 expected_attempt_count = self.transaction_scale * self.ppta
1104 fail_count = self._loss + (expected_attempt_count - self._sent)
1105 elif self.transaction_type == u"tcp_pps":
1106 if not self.transaction_scale:
1107 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1108 partial_attempt_count = self._sent
1109 expected_attempt_count = self.transaction_scale * self.ppta
1110 # One loss-like scenario happens when TRex receives all packets
1111 # on L2 level, but is not fast enough to process them all
1112 # at L7 level, which leads to retransmissions.
1113 # Those manifest as opackets larger than expected.
1114 # A simple workaround is to add absolute difference.
1115 # Probability of retransmissions exactly cancelling
1116 # packets unsent due to duration stretching is quite low.
1117 fail_count = self._loss + abs(expected_attempt_count - self._sent)
1119 raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1120 if fail_count < 0 and not self.negative_loss:
1122 measurement = ReceiveRateMeasurement(
1123 duration=target_duration,
1124 target_tr=transmit_rate,
1125 transmit_count=expected_attempt_count,
1126 loss_count=fail_count,
1127 approximated_duration=approximated_duration,
1128 partial_transmit_count=partial_attempt_count,
1130 measurement.latency = self.get_latency_int()
1133 def measure(self, duration, transmit_rate):
1134 """Run trial measurement, parse and return results.
1136 The input rate is for transactions. Stateles bidirectional traffic
1137 is understood as sequence of (asynchronous) transactions,
1140 The result units depend on test type, generally
1141 the count either transactions or packets (aggregated over directions).
1143 Optionally, this method sleeps if measurement finished before
1144 the time specified as duration.
1146 :param duration: Trial duration [s].
1147 :param transmit_rate: Target rate in transactions per second.
1148 :type duration: float
1149 :type transmit_rate: float
1150 :returns: Structure containing the result of the measurement.
1151 :rtype: ReceiveRateMeasurement
1152 :raises RuntimeError: If TG is not set or if node is not TG
1153 or if subtype is not specified.
1154 :raises NotImplementedError: If TG is not supported.
1156 duration = float(duration)
1157 time_start = time.monotonic()
1158 time_stop = time_start + duration
1161 self._send_traffic_on_tg_internal(
1166 result = self.get_measurement_result()
1167 logger.debug(f"trial measurement result: {result!r}")
1168 # In PLRsearch, computation needs the specified time to complete.
1169 if self.sleep_till_duration:
1170 sleeptime = time_stop - time.monotonic()
1172 # TODO: Sometimes we have time to do additional trials here,
1173 # adapt PLRsearch to accept all the results.
1174 time.sleep(sleeptime)
1177 def set_rate_provider_defaults(
1183 traffic_directions=2,
1184 transaction_duration=0.0,
1185 transaction_scale=0,
1186 transaction_type=u"packet",
1189 sleep_till_duration=False,
1192 """Store values accessed by measure().
1194 :param frame_size: Frame size identifier or value [B].
1195 :param traffic_profile: Module name as a traffic profile identifier.
1196 See GPL/traffic_profiles/trex for implemented modules.
1197 :param ppta: Packets per transaction, aggregated over directions.
1198 Needed for udp_pps which does not have a good transaction counter,
1199 so we need to compute expected number of packets.
1201 :param resetter: Callable to reset DUT state for repeated trials.
1202 :param traffic_directions: Traffic from packet counting point of view
1203 is bi- (2) or uni- (1) directional.
1205 :param transaction_duration: Total expected time to close transaction.
1206 :param transaction_scale: Number of transactions to perform.
1207 0 (default) means unlimited.
1208 :param transaction_type: An identifier specifying which counters
1209 and formulas to use when computing attempted and failed
1210 transactions. Default: "packet".
1211 TODO: Does this also specify parsing for the measured duration?
1212 :param duration_limit: Zero or maximum limit for computed (or given)
1214 :param negative_loss: If false, negative loss is reported as zero loss.
1215 :param sleep_till_duration: If true and measurement returned faster,
1216 sleep until it matches duration. Needed for PLRsearch.
1217 :param use_latency: Whether to measure latency during the trial.
1219 :type frame_size: str or int
1220 :type traffic_profile: str
1222 :type resetter: Optional[Callable[[], None]]
1223 :type traffic_directions: int
1224 :type transaction_duration: float
1225 :type transaction_scale: int
1226 :type transaction_type: str
1227 :type duration_limit: float
1228 :type negative_loss: bool
1229 :type sleep_till_duration: bool
1230 :type use_latency: bool
1232 self.frame_size = frame_size
1233 self.traffic_profile = str(traffic_profile)
1234 self.resetter = resetter
1236 self.traffic_directions = int(traffic_directions)
1237 self.transaction_duration = float(transaction_duration)
1238 self.transaction_scale = int(transaction_scale)
1239 self.transaction_type = str(transaction_type)
1240 self.duration_limit = float(duration_limit)
1241 self.negative_loss = bool(negative_loss)
1242 self.sleep_till_duration = bool(sleep_till_duration)
1243 self.use_latency = bool(use_latency)
1246 class OptimizedSearch:
1247 """Class to be imported as Robot Library, containing search keywords.
1249 Aside of setting up measurer and forwarding arguments,
1250 the main business is to translate min/max rate from unidir to aggregate.
1254 def perform_optimized_ndrpdr_search(
1257 minimum_transmit_rate,
1258 maximum_transmit_rate,
1259 packet_loss_ratio=0.005,
1260 final_relative_width=0.005,
1261 final_trial_duration=30.0,
1262 initial_trial_duration=1.0,
1263 number_of_intermediate_phases=2,
1268 traffic_directions=2,
1269 transaction_duration=0.0,
1270 transaction_scale=0,
1271 transaction_type=u"packet",
1274 """Setup initialized TG, perform optimized search, return intervals.
1276 If transaction_scale is nonzero, all non-init trial durations
1277 are set to 2.0 (as they do not affect the real trial duration)
1278 and zero intermediate phases are used.
1279 The initial phase still uses 1.0 seconds, to force remeasurement.
1280 That makes initial phase act as a warmup.
1282 :param frame_size: Frame size identifier or value [B].
1283 :param traffic_profile: Module name as a traffic profile identifier.
1284 See GPL/traffic_profiles/trex for implemented modules.
1285 :param minimum_transmit_rate: Minimal load in transactions per second.
1286 :param maximum_transmit_rate: Maximal load in transactions per second.
1287 :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
1288 :param final_relative_width: Final lower bound transmit rate
1289 cannot be more distant that this multiple of upper bound [1].
1290 :param final_trial_duration: Trial duration for the final phase [s].
1291 :param initial_trial_duration: Trial duration for the initial phase
1292 and also for the first intermediate phase [s].
1293 :param number_of_intermediate_phases: Number of intermediate phases
1294 to perform before the final phase [1].
1295 :param timeout: The search will fail itself when not finished
1296 before this overall time [s].
1297 :param doublings: How many doublings to do in external search step.
1298 Default 1 is suitable for fairly stable tests,
1299 less stable tests might get better overal duration with 2 or more.
1300 :param ppta: Packets per transaction, aggregated over directions.
1301 Needed for udp_pps which does not have a good transaction counter,
1302 so we need to compute expected number of packets.
1304 :param resetter: Callable to reset DUT state for repeated trials.
1305 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1307 :param transaction_duration: Total expected time to close transaction.
1308 :param transaction_scale: Number of transactions to perform.
1309 0 (default) means unlimited.
1310 :param transaction_type: An identifier specifying which counters
1311 and formulas to use when computing attempted and failed
1312 transactions. Default: "packet".
1313 :param use_latency: Whether to measure latency during the trial.
1315 :type frame_size: str or int
1316 :type traffic_profile: str
1317 :type minimum_transmit_rate: float
1318 :type maximum_transmit_rate: float
1319 :type packet_loss_ratio: float
1320 :type final_relative_width: float
1321 :type final_trial_duration: float
1322 :type initial_trial_duration: float
1323 :type number_of_intermediate_phases: int
1324 :type timeout: float
1325 :type doublings: int
1327 :type resetter: Optional[Callable[[], None]]
1328 :type traffic_directions: int
1329 :type transaction_duration: float
1330 :type transaction_scale: int
1331 :type transaction_type: str
1332 :type use_latency: bool
1333 :returns: Structure containing narrowed down NDR and PDR intervals
1334 and their measurements.
1335 :rtype: NdrPdrResult
1336 :raises RuntimeError: If total duration is larger than timeout.
1338 # we need instance of TrafficGenerator instantiated by Robot Framework
1339 # to be able to use trex_stl-*()
1340 tg_instance = BuiltIn().get_library_instance(
1341 u"resources.libraries.python.TrafficGenerator"
1343 # Overrides for fixed transaction amount.
1344 # TODO: Move to robot code? We have two call sites, so this saves space,
1345 # even though this is surprising for log readers.
1346 if transaction_scale:
1347 initial_trial_duration = 1.0
1348 final_trial_duration = 2.0
1349 number_of_intermediate_phases = 0
1351 tg_instance.set_rate_provider_defaults(
1352 frame_size=frame_size,
1353 traffic_profile=traffic_profile,
1354 sleep_till_duration=False,
1357 traffic_directions=traffic_directions,
1358 transaction_duration=transaction_duration,
1359 transaction_scale=transaction_scale,
1360 transaction_type=transaction_type,
1361 use_latency=use_latency,
1363 algorithm = MultipleLossRatioSearch(
1364 measurer=tg_instance,
1365 final_trial_duration=final_trial_duration,
1366 final_relative_width=final_relative_width,
1367 number_of_intermediate_phases=number_of_intermediate_phases,
1368 initial_trial_duration=initial_trial_duration,
1370 doublings=doublings,
1372 result = algorithm.narrow_down_ndr_and_pdr(
1373 min_rate=minimum_transmit_rate,
1374 max_rate=maximum_transmit_rate,
1375 packet_loss_ratio=packet_loss_ratio,
1380 def perform_soak_search(
1383 minimum_transmit_rate,
1384 maximum_transmit_rate,
1391 trace_enabled=False,
1392 traffic_directions=2,
1393 transaction_duration=0.0,
1394 transaction_scale=0,
1395 transaction_type=u"packet",
1398 """Setup initialized TG, perform soak search, return avg and stdev.
1400 :param frame_size: Frame size identifier or value [B].
1401 :param traffic_profile: Module name as a traffic profile identifier.
1402 See GPL/traffic_profiles/trex for implemented modules.
1403 :param minimum_transmit_rate: Minimal load in transactions per second.
1404 :param maximum_transmit_rate: Maximal load in transactions per second.
1405 :param plr_target: Fraction of packets lost to achieve [1].
1406 :param tdpt: Trial duration per trial.
1407 The algorithm linearly increases trial duration with trial number,
1408 this is the increment between succesive trials, in seconds.
1409 :param initial_count: Offset to apply before the first trial.
1410 For example initial_count=50 makes first trial to be 51*tdpt long.
1411 This is needed because initial "search" phase of integrator
1412 takes significant time even without any trial results.
1413 :param timeout: The search will stop after this overall time [s].
1414 :param ppta: Packets per transaction, aggregated over directions.
1415 Needed for udp_pps which does not have a good transaction counter,
1416 so we need to compute expected number of packets.
1418 :param resetter: Callable to reset DUT state for repeated trials.
1419 :param trace_enabled: True if trace enabled else False.
1420 This is very verbose tracing on numeric computations,
1421 do not use in production.
1423 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1425 :param transaction_duration: Total expected time to close transaction.
1426 :param transaction_scale: Number of transactions to perform.
1427 0 (default) means unlimited.
1428 :param transaction_type: An identifier specifying which counters
1429 and formulas to use when computing attempted and failed
1430 transactions. Default: "packet".
1431 :param use_latency: Whether to measure latency during the trial.
1433 :type frame_size: str or int
1434 :type traffic_profile: str
1435 :type minimum_transmit_rate: float
1436 :type maximum_transmit_rate: float
1437 :type plr_target: float
1438 :type initial_count: int
1439 :type timeout: float
1441 :type resetter: Optional[Callable[[], None]]
1442 :type trace_enabled: bool
1443 :type traffic_directions: int
1444 :type transaction_duration: float
1445 :type transaction_scale: int
1446 :type transaction_type: str
1447 :type use_latency: bool
1448 :returns: Average and stdev of estimated aggregate rate giving PLR.
1449 :rtype: 2-tuple of float
1451 tg_instance = BuiltIn().get_library_instance(
1452 u"resources.libraries.python.TrafficGenerator"
1454 # Overrides for fixed transaction amount.
1455 # TODO: Move to robot code? We have a single call site
1456 # but MLRsearch has two and we want the two to be used similarly.
1457 if transaction_scale:
1459 tg_instance.set_rate_provider_defaults(
1460 frame_size=frame_size,
1461 traffic_profile=traffic_profile,
1462 negative_loss=False,
1463 sleep_till_duration=True,
1466 traffic_directions=traffic_directions,
1467 transaction_duration=transaction_duration,
1468 transaction_scale=transaction_scale,
1469 transaction_type=transaction_type,
1470 use_latency=use_latency,
1472 algorithm = PLRsearch(
1473 measurer=tg_instance,
1474 trial_duration_per_trial=tdpt,
1475 packet_loss_ratio_target=plr_target,
1476 trial_number_offset=initial_count,
1478 trace_enabled=trace_enabled,
1480 result = algorithm.search(
1481 min_rate=minimum_transmit_rate,
1482 max_rate=maximum_transmit_rate,