1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Performance testing traffic generator library."""
18 from robot.api import logger
19 from robot.libraries.BuiltIn import BuiltIn
21 from .Constants import Constants
22 from .CpuUtils import CpuUtils
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
34 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
37 def check_subtype(node):
38 """Return supported subtype of given node, or raise an exception.
40 Currently only one subtype is supported,
41 but we want our code to be ready for other ones.
43 :param node: Topology node to check. Can be None.
44 :type node: dict or NoneType
45 :returns: Subtype detected.
47 :raises RuntimeError: If node is not supported, message explains how.
49 if node.get(u"type") is None:
50 msg = u"Node type is not defined"
51 elif node[u"type"] != NodeType.TG:
52 msg = f"Node type is {node[u'type']!r}, not a TG"
53 elif node.get(u"subtype") is None:
54 msg = u"TG subtype is not defined"
55 elif node[u"subtype"] != NodeSubTypeTG.TREX:
56 msg = f"TG subtype {node[u'subtype']!r} is not supported"
58 return NodeSubTypeTG.TREX
59 raise RuntimeError(msg)
62 class TGDropRateSearchImpl(DropRateSearch):
63 """Drop Rate Search implementation."""
66 # super(TGDropRateSearchImpl, self).__init__()
69 self, rate, frame_size, loss_acceptance, loss_acceptance_type,
71 """Runs the traffic and evaluate the measured results.
73 :param rate: Offered traffic load.
74 :param frame_size: Size of frame.
75 :param loss_acceptance: Permitted drop ratio or frames count.
76 :param loss_acceptance_type: Type of permitted loss.
77 :param traffic_profile: Module name as a traffic profile identifier.
78 See GPL/traffic_profiles/trex for implemented modules.
81 :type loss_acceptance: float
82 :type loss_acceptance_type: LossAcceptanceType
83 :type traffic_profile: str
84 :returns: Drop threshold exceeded? (True/False)
86 :raises NotImplementedError: If TG is not supported.
87 :raises RuntimeError: If TG is not specified.
89 # we need instance of TrafficGenerator instantiated by Robot Framework
90 # to be able to use trex_stl-*()
91 tg_instance = BuiltIn().get_library_instance(
92 u"resources.libraries.python.TrafficGenerator"
94 subtype = check_subtype(tg_instance.node)
95 if subtype == NodeSubTypeTG.TREX:
96 unit_rate = str(rate) + self.get_rate_type_str()
97 tg_instance.trex_stl_start_remote_exec(
98 self.get_duration(), unit_rate, frame_size, traffic_profile
100 loss = tg_instance.get_loss()
101 sent = tg_instance.get_sent()
102 if self.loss_acceptance_type_is_percentage():
103 loss = (float(loss) / float(sent)) * 100
105 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
107 return float(loss) <= float(loss_acceptance)
110 def get_latency(self):
111 """Returns min/avg/max latency.
113 :returns: Latency stats.
116 tg_instance = BuiltIn().get_library_instance(
117 u"resources.libraries.python.TrafficGenerator"
119 return tg_instance.get_latency_int()
123 """Defines mode of T-Rex traffic generator."""
124 # Advanced stateful mode
130 # TODO: Pylint says too-many-instance-attributes.
131 class TrafficGenerator(AbstractMeasurer):
132 """Traffic Generator."""
134 # TODO: Remove "trex" from lines which could work with other TGs.
136 # Use one instance of TrafficGenerator for all tests in test suite
137 ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
140 # TODO: Separate into few dataclasses/dicts.
141 # Pylint dislikes large unstructured state, and it is right.
144 # TG interface order mapping
145 self._ifaces_reordered = False
146 # Result holding fields, to be removed.
151 self._received = None
152 self._approximated_rate = None
153 self._approximated_duration = None
155 # Measurement input fields, needed for async stop result.
156 self._start_time = None
157 self._stop_time = None
159 self._target_duration = None
160 self._duration = None
161 # Other input parameters, not knowable from measure() signature.
162 self.frame_size = None
163 self.traffic_profile = None
164 self.traffic_directions = None
165 self.negative_loss = None
166 self.use_latency = None
169 self.transaction_scale = None
170 self.transaction_duration = None
171 self.sleep_till_duration = None
172 self.transaction_type = None
173 self.duration_limit = None
174 self.ramp_up_start = None
175 self.ramp_up_stop = None
176 self.ramp_up_rate = None
177 self.ramp_up_duration = None
178 self.state_timeout = None
179 # Transient data needed for async measurements.
180 self._xstats = (None, None)
181 # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
187 :returns: Traffic generator node.
193 """Return number of lost packets.
195 :returns: Number of lost packets.
201 """Return number of sent packets.
203 :returns: Number of sent packets.
208 def get_received(self):
209 """Return number of received packets.
211 :returns: Number of received packets.
214 return self._received
216 def get_latency_int(self):
217 """Return rounded min/avg/max latency.
219 :returns: Latency stats.
224 def get_approximated_rate(self):
225 """Return approximated rate computed as ratio of transmitted packets
226 over duration of trial.
228 :returns: Approximated rate.
231 return self._approximated_rate
233 def get_l7_data(self):
236 :returns: Number of received packets.
241 def check_mode(self, expected_mode):
244 :param expected_mode: Expected traffic generator mode.
245 :type expected_mode: object
246 :raises RuntimeError: In case of unexpected TG mode.
248 if self._mode == expected_mode:
251 f"{self._node[u'subtype']} not running in {expected_mode} mode!"
254 # TODO: pylint says disable=too-many-locals.
255 def initialize_traffic_generator(
256 self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
257 tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
258 tg_if2_dst_mac=None):
259 """TG initialization.
261 TODO: Document why do we need (and how do we use) _ifaces_reordered.
263 :param tg_node: Traffic generator node.
264 :param tg_if1: TG - name of first interface.
265 :param tg_if2: TG - name of second interface.
266 :param tg_if1_adj_node: TG if1 adjecent node.
267 :param tg_if1_adj_if: TG if1 adjecent interface.
268 :param tg_if2_adj_node: TG if2 adjecent node.
269 :param tg_if2_adj_if: TG if2 adjecent interface.
270 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
271 :param tg_if1_dst_mac: Interface 1 destination MAC address.
272 :param tg_if2_dst_mac: Interface 2 destination MAC address.
276 :type tg_if1_adj_node: dict
277 :type tg_if1_adj_if: str
278 :type tg_if2_adj_node: dict
279 :type tg_if2_adj_if: str
281 :type tg_if1_dst_mac: str
282 :type tg_if2_dst_mac: str
284 :raises RuntimeError: In case of issue during initialization.
286 subtype = check_subtype(tg_node)
287 if subtype == NodeSubTypeTG.TREX:
289 self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
292 if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
293 if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
294 if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
295 if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
297 if osi_layer == u"L2":
298 if1[u"adj_addr"] = if2[u"addr"]
299 if2[u"adj_addr"] = if1[u"addr"]
300 elif osi_layer in (u"L3", u"L7"):
301 if1[u"adj_addr"] = Topology().get_interface_mac(
302 tg_if1_adj_node, tg_if1_adj_if
304 if2[u"adj_addr"] = Topology().get_interface_mac(
305 tg_if2_adj_node, tg_if2_adj_if
308 raise ValueError(u"Unknown OSI layer!")
310 # in case of switched environment we can override MAC addresses
311 if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
312 if1[u"adj_addr"] = tg_if1_dst_mac
313 if2[u"adj_addr"] = tg_if2_dst_mac
315 if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
317 self._ifaces_reordered = True
319 master_thread_id, latency_thread_id, socket, threads = \
320 CpuUtils.get_affinity_trex(
321 self._node, tg_if1, tg_if2,
322 tg_dtc=Constants.TREX_CORE_COUNT)
324 if osi_layer in (u"L2", u"L3", u"L7"):
327 f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
329 f" c: {len(threads)}\n"
330 f" limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
331 f" interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
333 f" - dest_mac: \'{if1[u'adj_addr']}\'\n"
334 f" src_mac: \'{if1[u'addr']}\'\n"
335 f" - dest_mac: \'{if2[u'adj_addr']}\'\n"
336 f" src_mac: \'{if2[u'addr']}\'\n"
338 f" master_thread_id: {master_thread_id}\n"
339 f" latency_thread_id: {latency_thread_id}\n"
341 f" - socket: {socket}\n"
342 f" threads: {threads}\n"
344 sudo=True, message=u"T-Rex config generation!"
347 raise ValueError(u"Unknown OSI layer!")
349 TrafficGenerator.startup_trex(
350 self._node, osi_layer, subtype=subtype
354 def startup_trex(tg_node, osi_layer, subtype=None):
355 """Startup sequence for the TRex traffic generator.
357 :param tg_node: Traffic generator node.
358 :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
359 :param subtype: Traffic generator sub-type.
362 :type subtype: NodeSubTypeTG
363 :raises RuntimeError: If T-Rex startup failed.
364 :raises ValueError: If OSI layer is not supported.
367 subtype = check_subtype(tg_node)
368 if subtype == NodeSubTypeTG.TREX:
369 for _ in range(0, 3):
370 # Kill TRex only if it is already running.
371 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
373 tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
378 for port in tg_node[u"interfaces"].values():
379 if u'Mellanox' not in port.get(u'model'):
380 ports += f" {port.get(u'pci_address')}"
382 cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
383 f"./dpdk_nic_bind.py -u {ports} || true\""
385 tg_node, cmd, sudo=True,
386 message=u"Unbind PCI ports from driver failed!"
390 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
391 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
393 trex_cmd.add(u"--prefix $(hostname)")
394 trex_cmd.add(u"--hdrh")
395 trex_cmd.add(u"--no-scapy-server")
396 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
397 # OptionString does not create double space if extra is empty.
398 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
399 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
400 cmd = f"sh -c \"{inner_command}\" > /dev/null"
402 exec_cmd_no_error(tg_node, cmd, sudo=True)
404 cmd = u"sh -c \"cat /tmp/trex.log\""
406 tg_node, cmd, sudo=True,
407 message=u"Get TRex logs failed!"
409 raise RuntimeError(u"Start TRex failed!")
411 # Test T-Rex API responsiveness.
412 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
413 if osi_layer in (u"L2", u"L3"):
414 cmd += u"trex_stl_assert.py"
415 elif osi_layer == u"L7":
416 cmd += u"trex_astf_assert.py"
418 raise ValueError(u"Unknown OSI layer!")
421 tg_node, cmd, sudo=True,
422 message=u"T-Rex API is not responding!", retries=20
427 # After max retries TRex is still not responding to API critical
429 exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
430 raise RuntimeError(u"Start T-Rex failed after multiple retries!")
433 def is_trex_running(node):
434 """Check if T-Rex is running using pidof.
436 :param node: Traffic generator node.
438 :returns: True if T-Rex is running otherwise False.
441 ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
442 return bool(int(ret) == 0)
445 def teardown_traffic_generator(node):
448 :param node: Traffic generator node.
451 :raises RuntimeError: If node type is not a TG,
452 or if T-Rex teardown fails.
454 subtype = check_subtype(node)
455 if subtype == NodeSubTypeTG.TREX:
459 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
461 message=u"T-Rex kill failed!"
464 def trex_astf_stop_remote_exec(self, node):
465 """Execute T-Rex ASTF script on remote node over ssh to stop running
468 Internal state is updated with measurement results.
470 :param node: T-Rex generator node.
472 :raises RuntimeError: If stop traffic script fails.
474 command_line = OptionString().add(u"python3")
475 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
476 command_line.add(f"'{dirname}/trex_astf_stop.py'")
477 command_line.change_prefix(u"--")
478 for index, value in enumerate(self._xstats):
479 if value is not None:
480 value = value.replace(u"'", u"\"")
481 command_line.add_equals(f"xstat{index}", f"'{value}'")
482 stdout, _ = exec_cmd_no_error(
484 message=u"T-Rex ASTF runtime error!"
486 self._parse_traffic_results(stdout)
488 def trex_stl_stop_remote_exec(self, node):
489 """Execute T-Rex STL script on remote node over ssh to stop running
492 Internal state is updated with measurement results.
494 :param node: T-Rex generator node.
496 :raises RuntimeError: If stop traffic script fails.
498 command_line = OptionString().add(u"python3")
499 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
500 command_line.add(f"'{dirname}/trex_stl_stop.py'")
501 command_line.change_prefix(u"--")
502 for index, value in enumerate(self._xstats):
503 if value is not None:
504 value = value.replace(u"'", u"\"")
505 command_line.add_equals(f"xstat{index}", f"'{value}'")
506 stdout, _ = exec_cmd_no_error(
508 message=u"T-Rex STL runtime error!"
510 self._parse_traffic_results(stdout)
512 def stop_traffic_on_tg(self):
513 """Stop all traffic on TG.
515 :returns: Structure containing the result of the measurement.
516 :rtype: ReceiveRateMeasurement
517 :raises ValueError: If TG traffic profile is not supported.
519 subtype = check_subtype(self._node)
520 if subtype != NodeSubTypeTG.TREX:
521 raise ValueError(f"Unsupported TG subtype: {subtype!r}")
522 if u"trex-astf" in self.traffic_profile:
523 self.trex_astf_stop_remote_exec(self._node)
524 elif u"trex-stl" in self.traffic_profile:
525 self.trex_stl_stop_remote_exec(self._node)
527 raise ValueError(u"Unsupported T-Rex traffic profile!")
528 self._stop_time = time.monotonic()
530 return self._get_measurement_result()
532 def _compute_duration(self, duration, multiplier):
533 """Compute duration for profile driver.
535 The final result is influenced by transaction scale and duration limit.
536 It is assumed a higher level function has already set those to self.
537 The duration argument is the target value from search point of view,
538 before the overrides are applied here.
540 Minus one (signalling async traffic start) is kept.
542 Completeness flag is also included. Duration limited or async trials
543 are not considered complete for ramp-up purposes.
545 :param duration: Time expressed in seconds for how long to send traffic.
546 :param multiplier: Traffic rate in transactions per second.
547 :type duration: float
548 :type multiplier: float
549 :returns: New duration and whether it was a complete ramp-up candidate.
554 return duration, False
555 computed_duration = duration
556 if self.transaction_scale:
557 computed_duration = self.transaction_scale / multiplier
558 # Log the computed duration,
559 # so we can compare with what telemetry suggests
560 # the real duration was.
561 logger.debug(f"Expected duration {computed_duration}")
562 computed_duration += 0.1115
563 if not self.duration_limit:
564 return computed_duration, True
565 limited_duration = min(computed_duration, self.duration_limit)
566 return limited_duration, (limited_duration == computed_duration)
568 def trex_astf_start_remote_exec(
569 self, duration, multiplier, async_call=False):
570 """Execute T-Rex ASTF script on remote node over ssh to start running
573 In sync mode, measurement results are stored internally.
574 In async mode, initial data including xstats are stored internally.
576 This method contains the logic to compute duration as maximum time
577 if transaction_scale is nonzero.
578 The transaction_scale argument defines (limits) how many transactions
579 will be started in total. As that amount of transaction can take
580 considerable time (sometimes due to explicit delays in the profile),
581 the real time a trial needs to finish is computed here. For now,
582 in that case the duration argument is ignored, assuming it comes
583 from ASTF-unaware search algorithm. The overall time a single
584 transaction needs is given in parameter transaction_duration,
585 it includes both explicit delays and implicit time it takes
586 to transfer data (or whatever the transaction does).
588 Currently it is observed TRex does not start the ASTF traffic
589 immediately, an ad-hoc constant is added to the computed duration
590 to compensate for that.
592 If transaction_scale is zero, duration is not recomputed.
593 It is assumed the subsequent result parsing gets the real duration
594 if the traffic stops sooner for any reason.
596 Currently, it is assumed traffic profile defines a single transaction.
597 To avoid heavy logic here, the input rate is expected to be in
598 transactions per second, as that directly translates to TRex multiplier,
599 (assuming the profile does not override the default cps value of one).
601 :param duration: Time expressed in seconds for how long to send traffic.
602 :param multiplier: Traffic rate in transactions per second.
603 :param async_call: If enabled then don't wait for all incoming traffic.
604 :type duration: float
605 :type multiplier: int
606 :type async_call: bool
607 :raises RuntimeError: In case of T-Rex driver issue.
609 self.check_mode(TrexMode.ASTF)
610 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
611 if not isinstance(duration, (float, int)):
612 duration = float(duration)
614 # TODO: Refactor the code so duration is computed only once,
615 # and both the initial and the computed durations are logged.
616 computed_duration, _ = self._compute_duration(duration, multiplier)
618 command_line = OptionString().add(u"python3")
619 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
620 command_line.add(f"'{dirname}/trex_astf_profile.py'")
621 command_line.change_prefix(u"--")
622 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
623 command_line.add_with_value(
624 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
626 command_line.add_with_value(u"duration", f"{computed_duration!r}")
627 command_line.add_with_value(u"frame_size", self.frame_size)
628 command_line.add_with_value(u"multiplier", multiplier)
629 command_line.add_with_value(u"port_0", p_0)
630 command_line.add_with_value(u"port_1", p_1)
631 command_line.add_with_value(
632 u"traffic_directions", self.traffic_directions
634 command_line.add_if(u"async_start", async_call)
635 command_line.add_if(u"latency", self.use_latency)
636 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
638 self._start_time = time.monotonic()
639 self._rate = multiplier
640 stdout, _ = exec_cmd_no_error(
641 self._node, command_line, timeout=computed_duration + 10.0,
642 message=u"T-Rex ASTF runtime error!"
647 self._target_duration = None
648 self._duration = None
649 self._received = None
653 xstats = [None, None]
654 self._l7_data = dict()
655 self._l7_data[u"client"] = dict()
656 self._l7_data[u"client"][u"active_flows"] = None
657 self._l7_data[u"client"][u"established_flows"] = None
658 self._l7_data[u"client"][u"traffic_duration"] = None
659 self._l7_data[u"server"] = dict()
660 self._l7_data[u"server"][u"active_flows"] = None
661 self._l7_data[u"server"][u"established_flows"] = None
662 self._l7_data[u"server"][u"traffic_duration"] = None
663 if u"udp" in self.traffic_profile:
664 self._l7_data[u"client"][u"udp"] = dict()
665 self._l7_data[u"client"][u"udp"][u"connects"] = None
666 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
667 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
668 self._l7_data[u"server"][u"udp"] = dict()
669 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
670 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
671 elif u"tcp" in self.traffic_profile:
672 self._l7_data[u"client"][u"tcp"] = dict()
673 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
674 self._l7_data[u"client"][u"tcp"][u"connects"] = None
675 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
676 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
677 self._l7_data[u"server"][u"tcp"] = dict()
678 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
679 self._l7_data[u"server"][u"tcp"][u"connects"] = None
680 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
682 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
684 for line in stdout.splitlines():
685 if f"Xstats snapshot {index}: " in line:
686 xstats[index] = line[19:]
690 self._xstats = tuple(xstats)
692 self._target_duration = duration
693 self._duration = computed_duration
694 self._parse_traffic_results(stdout)
696 def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
697 """Execute T-Rex STL script on remote node over ssh to start running
700 In sync mode, measurement results are stored internally.
701 In async mode, initial data including xstats are stored internally.
703 Mode-unaware code (e.g. in search algorithms) works with transactions.
704 To keep the logic simple, multiplier is set to that value.
705 As bidirectional traffic profiles send packets in both directions,
706 they are treated as transactions with two packets (one per direction).
708 :param duration: Time expressed in seconds for how long to send traffic.
709 :param rate: Traffic rate in transactions per second.
710 :param async_call: If enabled then don't wait for all incoming traffic.
711 :type duration: float
713 :type async_call: bool
714 :raises RuntimeError: In case of T-Rex driver issue.
716 self.check_mode(TrexMode.STL)
717 p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
718 if not isinstance(duration, (float, int)):
719 duration = float(duration)
721 # TODO: Refactor the code so duration is computed only once,
722 # and both the initial and the computed durations are logged.
723 duration, _ = self._compute_duration(duration=duration, multiplier=rate)
725 command_line = OptionString().add(u"python3")
726 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
727 command_line.add(f"'{dirname}/trex_stl_profile.py'")
728 command_line.change_prefix(u"--")
729 dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
730 command_line.add_with_value(
731 u"profile", f"'{dirname}/{self.traffic_profile}.py'"
733 command_line.add_with_value(u"duration", f"{duration!r}")
734 command_line.add_with_value(u"frame_size", self.frame_size)
735 command_line.add_with_value(u"rate", f"{rate!r}")
736 command_line.add_with_value(u"port_0", p_0)
737 command_line.add_with_value(u"port_1", p_1)
738 command_line.add_with_value(
739 u"traffic_directions", self.traffic_directions
741 command_line.add_if(u"async_start", async_call)
742 command_line.add_if(u"latency", self.use_latency)
743 command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
745 # TODO: This is ugly. Handle parsing better.
746 self._start_time = time.monotonic()
747 self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
748 stdout, _ = exec_cmd_no_error(
749 self._node, command_line, timeout=int(duration) + 60,
750 message=u"T-Rex STL runtime error"
755 self._target_duration = None
756 self._duration = None
757 self._received = None
762 xstats = [None, None]
764 for line in stdout.splitlines():
765 if f"Xstats snapshot {index}: " in line:
766 xstats[index] = line[19:]
770 self._xstats = tuple(xstats)
772 self._target_duration = duration
773 self._duration = duration
774 self._parse_traffic_results(stdout)
776 def send_traffic_on_tg(
784 traffic_directions=2,
785 transaction_duration=0.0,
787 transaction_type=u"packet",
791 ramp_up_duration=None,
795 """Send traffic from all configured interfaces on TG.
797 In async mode, xstats is stored internally,
798 to enable getting correct result when stopping the traffic.
799 In both modes, stdout is returned,
800 but _parse_traffic_results only works in sync output.
802 Note that traffic generator uses DPDK driver which might
803 reorder port numbers based on wiring and PCI numbering.
804 This method handles that, so argument values are invariant,
805 but you can see swapped valued in debug logs.
807 When transaction_scale is specified, the duration value is ignored
808 and the needed time is computed. For cases where this results in
809 to too long measurement (e.g. teardown trial with small rate),
810 duration_limit is applied (of non-zero), so the trial is stopped sooner.
812 Bidirectional STL profiles are treated as transactions with two packets.
814 The return value is None for async.
816 :param duration: Duration of test traffic generation in seconds.
817 :param rate: Traffic rate in transactions per second.
818 :param frame_size: Frame size (L2) in Bytes.
819 :param traffic_profile: Module name as a traffic profile identifier.
820 See GPL/traffic_profiles/trex for implemented modules.
821 :param async_call: Async mode.
822 :param ppta: Packets per transaction, aggregated over directions.
823 Needed for udp_pps which does not have a good transaction counter,
824 so we need to compute expected number of packets.
826 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
828 :param transaction_duration: Total expected time to close transaction.
829 :param transaction_scale: Number of transactions to perform.
830 0 (default) means unlimited.
831 :param transaction_type: An identifier specifying which counters
832 and formulas to use when computing attempted and failed
833 transactions. Default: "packet".
834 :param duration_limit: Zero or maximum limit for computed (or given)
836 :param use_latency: Whether to measure latency during the trial.
838 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
839 :param ramp_up_duration: Duration of ramp-up trials [s].
840 :param state_timeout: Time of life of DUT state [s].
841 :param ramp_up_only: If true, do not perform main trial measurement.
842 :type duration: float
844 :type frame_size: str
845 :type traffic_profile: str
846 :type async_call: bool
848 :type traffic_directions: int
849 :type transaction_duration: float
850 :type transaction_scale: int
851 :type transaction_type: str
852 :type duration_limit: float
853 :type use_latency: bool
854 :type ramp_up_rate: float
855 :type ramp_up_duration: float
856 :type state_timeout: float
857 :type ramp_up_only: bool
858 :returns: TG results.
859 :rtype: ReceiveRateMeasurement or None
860 :raises ValueError: If TG traffic profile is not supported.
862 self.set_rate_provider_defaults(
863 frame_size=frame_size,
864 traffic_profile=traffic_profile,
866 traffic_directions=traffic_directions,
867 transaction_duration=transaction_duration,
868 transaction_scale=transaction_scale,
869 transaction_type=transaction_type,
870 duration_limit=duration_limit,
871 use_latency=use_latency,
872 ramp_up_rate=ramp_up_rate,
873 ramp_up_duration=ramp_up_duration,
874 state_timeout=state_timeout,
876 return self._send_traffic_on_tg_with_ramp_up(
879 async_call=async_call,
880 ramp_up_only=ramp_up_only,
883 def _send_traffic_on_tg_internal(
884 self, duration, rate, async_call=False):
885 """Send traffic from all configured interfaces on TG.
887 This is an internal function, it assumes set_rate_provider_defaults
888 has been called to remember most values.
889 The reason why need to remember various values is that
890 the traffic can be asynchronous, and parsing needs those values.
891 The reason why this is is a separate function from the one
892 which calls set_rate_provider_defaults is that some search algorithms
893 need to specify their own values, and we do not want the measure call
894 to overwrite them with defaults.
896 This function is used both for automated ramp-up trials
897 and for explicitly called trials.
899 :param duration: Duration of test traffic generation in seconds.
900 :param rate: Traffic rate in transactions per second.
901 :param async_call: Async mode.
902 :type duration: float
904 :type async_call: bool
905 :returns: TG results.
906 :rtype: ReceiveRateMeasurement or None
907 :raises ValueError: If TG traffic profile is not supported.
909 subtype = check_subtype(self._node)
910 if subtype == NodeSubTypeTG.TREX:
911 if u"trex-astf" in self.traffic_profile:
912 self.trex_astf_start_remote_exec(
913 duration, float(rate), async_call
915 elif u"trex-stl" in self.traffic_profile:
916 unit_rate_str = str(rate) + u"pps"
917 # TODO: Suport transaction_scale et al?
918 self.trex_stl_start_remote_exec(
919 duration, unit_rate_str, async_call
922 raise ValueError(u"Unsupported T-Rex traffic profile!")
924 return None if async_call else self._get_measurement_result()
926 def _send_traffic_on_tg_with_ramp_up(
927 self, duration, rate, async_call=False, ramp_up_only=False):
928 """Send traffic from all interfaces on TG, maybe after ramp-up.
930 This is an internal function, it assumes set_rate_provider_defaults
931 has been called to remember most values.
932 The reason why need to remember various values is that
933 the traffic can be asynchronous, and parsing needs those values.
934 The reason why this is a separate function from the one
935 which calls set_rate_provider_defaults is that some search algorithms
936 need to specify their own values, and we do not want the measure call
937 to overwrite them with defaults.
939 If ramp-up tracking is detected, a computation is performed,
940 and if state timeout is near, trial at ramp-up rate and duration
941 is inserted before the main trial measurement.
943 The ramp_up_only parameter forces a ramp-up without immediate
944 trial measurement, which is useful in case self remembers
945 a previous ramp-up trial that belongs to a different test (phase).
947 Return None if trial is async or ramp-up only.
949 :param duration: Duration of test traffic generation in seconds.
950 :param rate: Traffic rate in transactions per second.
951 :param async_call: Async mode.
952 :param ramp_up_only: If true, do not perform main trial measurement.
953 :type duration: float
955 :type async_call: bool
956 :type ramp_up_only: bool
957 :returns: TG results.
958 :rtype: ReceiveRateMeasurement or None
959 :raises ValueError: If TG traffic profile is not supported.
962 if self.ramp_up_rate:
963 # Figure out whether we need to insert a ramp-up trial.
964 # TODO: Give up on async_call=True?
965 if ramp_up_only or self.ramp_up_start is None:
966 # We never ramped up yet (at least not in this test case).
967 ramp_up_needed = True
969 # We ramped up before, but maybe it was too long ago.
970 # Adding a constant overhead to be safe.
971 time_now = time.monotonic() + 1.0
972 computed_duration, complete = self._compute_duration(
976 # There are two conditions for inserting ramp-up.
977 # If early sessions are expiring already,
978 # or if late sessions are to expire before measurement is over.
979 ramp_up_start_delay = time_now - self.ramp_up_start
980 ramp_up_stop_delay = time_now - self.ramp_up_stop
981 ramp_up_stop_delay += computed_duration
982 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
983 # Final boolean decision.
984 ramp_up_needed = (bigger_delay >= self.state_timeout)
987 u"State may time out during next real trial, "
988 u"inserting a ramp-up trial."
990 self.ramp_up_start = time.monotonic()
991 self._send_traffic_on_tg_internal(
992 duration=self.ramp_up_duration,
993 rate=self.ramp_up_rate,
994 async_call=async_call,
996 self.ramp_up_stop = time.monotonic()
997 logger.debug(u"Ramp-up done.")
1000 u"State will probably not time out during next real trial, "
1001 u"no ramp-up trial needed just yet."
1005 trial_start = time.monotonic()
1006 result = self._send_traffic_on_tg_internal(
1009 async_call=async_call,
1011 trial_end = time.monotonic()
1012 if self.ramp_up_rate:
1013 # Optimization: No loss acts as a good ramp-up, if it was complete.
1014 if complete and result is not None and result.loss_count == 0:
1015 logger.debug(u"Good trial acts as a ramp-up")
1016 self.ramp_up_start = trial_start
1017 self.ramp_up_stop = trial_end
1019 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1022 def no_traffic_loss_occurred(self):
1023 """Fail if loss occurred in traffic run.
1026 :raises Exception: If loss occured.
1028 if self._loss is None:
1029 raise RuntimeError(u"The traffic generation has not been issued")
1030 if self._loss != u"0":
1031 raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1033 def fail_if_no_traffic_forwarded(self):
1034 """Fail if no traffic forwarded.
1036 TODO: Check number of passed transactions instead.
1039 :raises Exception: If no traffic forwarded.
1041 if self._received is None:
1042 raise RuntimeError(u"The traffic generation has not been issued")
1043 if self._received == u"0":
1044 raise RuntimeError(u"No traffic forwarded")
1046 def partial_traffic_loss_accepted(
1047 self, loss_acceptance, loss_acceptance_type):
1048 """Fail if loss is higher then accepted in traffic run.
1050 :param loss_acceptance: Permitted drop ratio or frames count.
1051 :param loss_acceptance_type: Type of permitted loss.
1052 :type loss_acceptance: float
1053 :type loss_acceptance_type: LossAcceptanceType
1055 :raises Exception: If loss is above acceptance criteria.
1057 if self._loss is None:
1058 raise Exception(u"The traffic generation has not been issued")
1060 if loss_acceptance_type == u"percentage":
1061 loss = (float(self._loss) / float(self._sent)) * 100
1062 elif loss_acceptance_type == u"frames":
1063 loss = float(self._loss)
1065 raise Exception(u"Loss acceptance type not supported")
1067 if loss > float(loss_acceptance):
1069 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1072 def _parse_traffic_results(self, stdout):
1073 """Parse stdout of scripts into fields of self.
1075 Block of code to reuse, by sync start, or stop after async.
1077 :param stdout: Text containing the standard output.
1080 subtype = check_subtype(self._node)
1081 if subtype == NodeSubTypeTG.TREX:
1082 # Last line from console output
1083 line = stdout.splitlines()[-1]
1084 results = line.split(u";")
1085 if results[-1] in (u" ", u""):
1087 self._result = dict()
1088 for result in results:
1089 key, value = result.split(u"=", maxsplit=1)
1090 self._result[key.strip()] = value
1091 logger.info(f"TrafficGen results:\n{self._result}")
1092 self._received = int(self._result.get(u"total_received"), 0)
1093 self._sent = int(self._result.get(u"total_sent", 0))
1094 self._loss = int(self._result.get(u"frame_loss", 0))
1095 self._approximated_duration = \
1096 self._result.get(u"approximated_duration", 0.0)
1097 if u"manual" not in str(self._approximated_duration):
1098 self._approximated_duration = float(self._approximated_duration)
1099 self._latency = list()
1100 self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1101 self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1102 if self._mode == TrexMode.ASTF:
1103 self._l7_data = dict()
1104 self._l7_data[u"client"] = dict()
1105 self._l7_data[u"client"][u"sent"] = \
1106 int(self._result.get(u"client_sent", 0))
1107 self._l7_data[u"client"][u"received"] = \
1108 int(self._result.get(u"client_received", 0))
1109 self._l7_data[u"client"][u"active_flows"] = \
1110 int(self._result.get(u"client_active_flows", 0))
1111 self._l7_data[u"client"][u"established_flows"] = \
1112 int(self._result.get(u"client_established_flows", 0))
1113 self._l7_data[u"client"][u"traffic_duration"] = \
1114 float(self._result.get(u"client_traffic_duration", 0.0))
1115 self._l7_data[u"client"][u"err_rx_throttled"] = \
1116 int(self._result.get(u"client_err_rx_throttled", 0))
1117 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1118 int(self._result.get(u"client_err_nf_throttled", 0))
1119 self._l7_data[u"client"][u"err_flow_overflow"] = \
1120 int(self._result.get(u"client_err_flow_overflow", 0))
1121 self._l7_data[u"server"] = dict()
1122 self._l7_data[u"server"][u"active_flows"] = \
1123 int(self._result.get(u"server_active_flows", 0))
1124 self._l7_data[u"server"][u"established_flows"] = \
1125 int(self._result.get(u"server_established_flows", 0))
1126 self._l7_data[u"server"][u"traffic_duration"] = \
1127 float(self._result.get(u"server_traffic_duration", 0.0))
1128 self._l7_data[u"server"][u"err_rx_throttled"] = \
1129 int(self._result.get(u"client_err_rx_throttled", 0))
1130 if u"udp" in self.traffic_profile:
1131 self._l7_data[u"client"][u"udp"] = dict()
1132 self._l7_data[u"client"][u"udp"][u"connects"] = \
1133 int(self._result.get(u"client_udp_connects", 0))
1134 self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1135 int(self._result.get(u"client_udp_closed", 0))
1136 self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1137 int(self._result.get(u"client_udp_tx_bytes", 0))
1138 self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1139 int(self._result.get(u"client_udp_rx_bytes", 0))
1140 self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1141 int(self._result.get(u"client_udp_tx_packets", 0))
1142 self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1143 int(self._result.get(u"client_udp_rx_packets", 0))
1144 self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1145 int(self._result.get(u"client_udp_keep_drops", 0))
1146 self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1147 int(self._result.get(u"client_err_cwf", 0))
1148 self._l7_data[u"server"][u"udp"] = dict()
1149 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1150 int(self._result.get(u"server_udp_accepts", 0))
1151 self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1152 int(self._result.get(u"server_udp_closed", 0))
1153 self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1154 int(self._result.get(u"server_udp_tx_bytes", 0))
1155 self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1156 int(self._result.get(u"server_udp_rx_bytes", 0))
1157 self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1158 int(self._result.get(u"server_udp_tx_packets", 0))
1159 self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1160 int(self._result.get(u"server_udp_rx_packets", 0))
1161 elif u"tcp" in self.traffic_profile:
1162 self._l7_data[u"client"][u"tcp"] = dict()
1163 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1164 int(self._result.get(u"client_tcp_connect_inits", 0))
1165 self._l7_data[u"client"][u"tcp"][u"connects"] = \
1166 int(self._result.get(u"client_tcp_connects", 0))
1167 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1168 int(self._result.get(u"client_tcp_closed", 0))
1169 self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1170 int(self._result.get(u"client_tcp_connattempt", 0))
1171 self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1172 int(self._result.get(u"client_tcp_tx_bytes", 0))
1173 self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1174 int(self._result.get(u"client_tcp_rx_bytes", 0))
1175 self._l7_data[u"server"][u"tcp"] = dict()
1176 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1177 int(self._result.get(u"server_tcp_accepts", 0))
1178 self._l7_data[u"server"][u"tcp"][u"connects"] = \
1179 int(self._result.get(u"server_tcp_connects", 0))
1180 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1181 int(self._result.get(u"server_tcp_closed", 0))
1182 self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1183 int(self._result.get(u"server_tcp_tx_bytes", 0))
1184 self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1185 int(self._result.get(u"server_tcp_rx_bytes", 0))
1187 def _get_measurement_result(self):
1188 """Return the result of last measurement as ReceiveRateMeasurement.
1190 Separate function, as measurements can end either by time
1191 or by explicit call, this is the common block at the end.
1193 The target_tr field of ReceiveRateMeasurement is in
1194 transactions per second. Transmit count and loss count units
1195 depend on the transaction type. Usually they are in transactions
1196 per second, or aggregate packets per second.
1198 TODO: Fail on running or already reported measurement.
1200 :returns: Structure containing the result of the measurement.
1201 :rtype: ReceiveRateMeasurement
1204 # Client duration seems to include a setup period
1205 # where TRex does not send any packets yet.
1206 # Server duration does not include it.
1207 server_data = self._l7_data[u"server"]
1208 approximated_duration = float(server_data[u"traffic_duration"])
1209 except (KeyError, AttributeError, ValueError, TypeError):
1210 approximated_duration = None
1212 if not approximated_duration:
1213 approximated_duration = float(self._approximated_duration)
1214 except ValueError: # "manual"
1215 approximated_duration = None
1216 if not approximated_duration:
1217 if self._duration and self._duration > 0:
1218 # Known recomputed or target duration.
1219 approximated_duration = self._duration
1221 # It was an explicit stop.
1222 if not self._stop_time:
1223 raise RuntimeError(u"Unable to determine duration.")
1224 approximated_duration = self._stop_time - self._start_time
1225 target_duration = self._target_duration
1226 if not target_duration:
1227 target_duration = approximated_duration
1228 transmit_rate = self._rate
1229 if self.transaction_type == u"packet":
1230 partial_attempt_count = self._sent
1231 expected_attempt_count = self._sent
1232 fail_count = self._loss
1233 elif self.transaction_type == u"udp_cps":
1234 if not self.transaction_scale:
1235 raise RuntimeError(u"Add support for no-limit udp_cps.")
1236 partial_attempt_count = self._l7_data[u"client"][u"sent"]
1237 # We do not care whether TG is slow, it should have attempted all.
1238 expected_attempt_count = self.transaction_scale
1239 pass_count = self._l7_data[u"client"][u"received"]
1240 fail_count = expected_attempt_count - pass_count
1241 elif self.transaction_type == u"tcp_cps":
1242 if not self.transaction_scale:
1243 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1244 ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1245 partial_attempt_count = ctca
1246 # We do not care whether TG is slow, it should have attempted all.
1247 expected_attempt_count = self.transaction_scale
1248 # From TCP point of view, server/connects counts full connections,
1249 # but we are testing NAT session so client/connects counts that
1250 # (half connections from TCP point of view).
1251 pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1252 fail_count = expected_attempt_count - pass_count
1253 elif self.transaction_type == u"udp_pps":
1254 if not self.transaction_scale:
1255 raise RuntimeError(u"Add support for no-limit udp_pps.")
1256 partial_attempt_count = self._sent
1257 expected_attempt_count = self.transaction_scale * self.ppta
1258 fail_count = self._loss + (expected_attempt_count - self._sent)
1259 elif self.transaction_type == u"tcp_pps":
1260 if not self.transaction_scale:
1261 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1262 partial_attempt_count = self._sent
1263 expected_attempt_count = self.transaction_scale * self.ppta
1264 # One loss-like scenario happens when TRex receives all packets
1265 # on L2 level, but is not fast enough to process them all
1266 # at L7 level, which leads to retransmissions.
1267 # Those manifest as opackets larger than expected.
1268 # A simple workaround is to add absolute difference.
1269 # Probability of retransmissions exactly cancelling
1270 # packets unsent due to duration stretching is quite low.
1271 fail_count = self._loss + abs(expected_attempt_count - self._sent)
1273 raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1274 if fail_count < 0 and not self.negative_loss:
1276 measurement = ReceiveRateMeasurement(
1277 duration=target_duration,
1278 target_tr=transmit_rate,
1279 transmit_count=expected_attempt_count,
1280 loss_count=fail_count,
1281 approximated_duration=approximated_duration,
1282 partial_transmit_count=partial_attempt_count,
1284 measurement.latency = self.get_latency_int()
1287 def measure(self, duration, transmit_rate):
1288 """Run trial measurement, parse and return results.
1290 The input rate is for transactions. Stateles bidirectional traffic
1291 is understood as sequence of (asynchronous) transactions,
1294 The result units depend on test type, generally
1295 the count either transactions or packets (aggregated over directions).
1297 Optionally, this method sleeps if measurement finished before
1298 the time specified as duration.
1300 :param duration: Trial duration [s].
1301 :param transmit_rate: Target rate in transactions per second.
1302 :type duration: float
1303 :type transmit_rate: float
1304 :returns: Structure containing the result of the measurement.
1305 :rtype: ReceiveRateMeasurement
1306 :raises RuntimeError: If TG is not set or if node is not TG
1307 or if subtype is not specified.
1308 :raises NotImplementedError: If TG is not supported.
1310 duration = float(duration)
1311 time_start = time.monotonic()
1312 time_stop = time_start + duration
1315 result = self._send_traffic_on_tg_with_ramp_up(
1320 logger.debug(f"trial measurement result: {result!r}")
1321 # In PLRsearch, computation needs the specified time to complete.
1322 if self.sleep_till_duration:
1323 sleeptime = time_stop - time.monotonic()
1325 # TODO: Sometimes we have time to do additional trials here,
1326 # adapt PLRsearch to accept all the results.
1327 time.sleep(sleeptime)
1330 def set_rate_provider_defaults(
1336 traffic_directions=2,
1337 transaction_duration=0.0,
1338 transaction_scale=0,
1339 transaction_type=u"packet",
1342 sleep_till_duration=False,
1345 ramp_up_duration=None,
1346 state_timeout=300.0,
1348 """Store values accessed by measure().
1350 :param frame_size: Frame size identifier or value [B].
1351 :param traffic_profile: Module name as a traffic profile identifier.
1352 See GPL/traffic_profiles/trex for implemented modules.
1353 :param ppta: Packets per transaction, aggregated over directions.
1354 Needed for udp_pps which does not have a good transaction counter,
1355 so we need to compute expected number of packets.
1357 :param resetter: Callable to reset DUT state for repeated trials.
1358 :param traffic_directions: Traffic from packet counting point of view
1359 is bi- (2) or uni- (1) directional.
1361 :param transaction_duration: Total expected time to close transaction.
1362 :param transaction_scale: Number of transactions to perform.
1363 0 (default) means unlimited.
1364 :param transaction_type: An identifier specifying which counters
1365 and formulas to use when computing attempted and failed
1366 transactions. Default: "packet".
1367 TODO: Does this also specify parsing for the measured duration?
1368 :param duration_limit: Zero or maximum limit for computed (or given)
1370 :param negative_loss: If false, negative loss is reported as zero loss.
1371 :param sleep_till_duration: If true and measurement returned faster,
1372 sleep until it matches duration. Needed for PLRsearch.
1373 :param use_latency: Whether to measure latency during the trial.
1375 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1376 :param ramp_up_duration: Duration of ramp-up trials [s].
1377 :param state_timeout: Time of life of DUT state [s].
1378 :type frame_size: str or int
1379 :type traffic_profile: str
1381 :type resetter: Optional[Callable[[], None]]
1382 :type traffic_directions: int
1383 :type transaction_duration: float
1384 :type transaction_scale: int
1385 :type transaction_type: str
1386 :type duration_limit: float
1387 :type negative_loss: bool
1388 :type sleep_till_duration: bool
1389 :type use_latency: bool
1390 :type ramp_up_rate: float
1391 :type ramp_up_duration: float
1392 :type state_timeout: float
1394 self.frame_size = frame_size
1395 self.traffic_profile = str(traffic_profile)
1396 self.resetter = resetter
1398 self.traffic_directions = int(traffic_directions)
1399 self.transaction_duration = float(transaction_duration)
1400 self.transaction_scale = int(transaction_scale)
1401 self.transaction_type = str(transaction_type)
1402 self.duration_limit = float(duration_limit)
1403 self.negative_loss = bool(negative_loss)
1404 self.sleep_till_duration = bool(sleep_till_duration)
1405 self.use_latency = bool(use_latency)
1406 self.ramp_up_rate = float(ramp_up_rate)
1407 self.ramp_up_duration = float(ramp_up_duration)
1408 self.state_timeout = float(state_timeout)
1411 class OptimizedSearch:
1412 """Class to be imported as Robot Library, containing search keywords.
1414 Aside of setting up measurer and forwarding arguments,
1415 the main business is to translate min/max rate from unidir to aggregate.
1419 def perform_optimized_ndrpdr_search(
1422 minimum_transmit_rate,
1423 maximum_transmit_rate,
1424 packet_loss_ratio=0.005,
1425 final_relative_width=0.005,
1426 final_trial_duration=30.0,
1427 initial_trial_duration=1.0,
1428 number_of_intermediate_phases=2,
1433 traffic_directions=2,
1434 transaction_duration=0.0,
1435 transaction_scale=0,
1436 transaction_type=u"packet",
1439 ramp_up_duration=None,
1440 state_timeout=300.0,
1442 """Setup initialized TG, perform optimized search, return intervals.
1444 If transaction_scale is nonzero, all non-init trial durations
1445 are set to 2.0 (as they do not affect the real trial duration)
1446 and zero intermediate phases are used.
1447 The initial phase still uses 1.0 seconds, to force remeasurement.
1448 That makes initial phase act as a warmup.
1450 :param frame_size: Frame size identifier or value [B].
1451 :param traffic_profile: Module name as a traffic profile identifier.
1452 See GPL/traffic_profiles/trex for implemented modules.
1453 :param minimum_transmit_rate: Minimal load in transactions per second.
1454 :param maximum_transmit_rate: Maximal load in transactions per second.
1455 :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
1456 :param final_relative_width: Final lower bound transmit rate
1457 cannot be more distant that this multiple of upper bound [1].
1458 :param final_trial_duration: Trial duration for the final phase [s].
1459 :param initial_trial_duration: Trial duration for the initial phase
1460 and also for the first intermediate phase [s].
1461 :param number_of_intermediate_phases: Number of intermediate phases
1462 to perform before the final phase [1].
1463 :param timeout: The search will fail itself when not finished
1464 before this overall time [s].
1465 :param doublings: How many doublings to do in external search step.
1466 Default 1 is suitable for fairly stable tests,
1467 less stable tests might get better overal duration with 2 or more.
1468 :param ppta: Packets per transaction, aggregated over directions.
1469 Needed for udp_pps which does not have a good transaction counter,
1470 so we need to compute expected number of packets.
1472 :param resetter: Callable to reset DUT state for repeated trials.
1473 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1475 :param transaction_duration: Total expected time to close transaction.
1476 :param transaction_scale: Number of transactions to perform.
1477 0 (default) means unlimited.
1478 :param transaction_type: An identifier specifying which counters
1479 and formulas to use when computing attempted and failed
1480 transactions. Default: "packet".
1481 :param use_latency: Whether to measure latency during the trial.
1483 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1484 :param ramp_up_duration: Duration of ramp-up trials [s].
1485 :param state_timeout: Time of life of DUT state [s].
1486 :type frame_size: str or int
1487 :type traffic_profile: str
1488 :type minimum_transmit_rate: float
1489 :type maximum_transmit_rate: float
1490 :type packet_loss_ratio: float
1491 :type final_relative_width: float
1492 :type final_trial_duration: float
1493 :type initial_trial_duration: float
1494 :type number_of_intermediate_phases: int
1495 :type timeout: float
1496 :type doublings: int
1498 :type resetter: Optional[Callable[[], None]]
1499 :type traffic_directions: int
1500 :type transaction_duration: float
1501 :type transaction_scale: int
1502 :type transaction_type: str
1503 :type use_latency: bool
1504 :type ramp_up_rate: float
1505 :type ramp_up_duration: float
1506 :type state_timeout: float
1507 :returns: Structure containing narrowed down NDR and PDR intervals
1508 and their measurements.
1509 :rtype: NdrPdrResult
1510 :raises RuntimeError: If total duration is larger than timeout.
1512 # we need instance of TrafficGenerator instantiated by Robot Framework
1513 # to be able to use trex_stl-*()
1514 tg_instance = BuiltIn().get_library_instance(
1515 u"resources.libraries.python.TrafficGenerator"
1517 # Overrides for fixed transaction amount.
1518 # TODO: Move to robot code? We have two call sites, so this saves space,
1519 # even though this is surprising for log readers.
1520 if transaction_scale:
1521 initial_trial_duration = 1.0
1522 final_trial_duration = 2.0
1523 number_of_intermediate_phases = 0
1524 timeout += transaction_scale * 3e-4
1525 tg_instance.set_rate_provider_defaults(
1526 frame_size=frame_size,
1527 traffic_profile=traffic_profile,
1528 sleep_till_duration=False,
1531 traffic_directions=traffic_directions,
1532 transaction_duration=transaction_duration,
1533 transaction_scale=transaction_scale,
1534 transaction_type=transaction_type,
1535 use_latency=use_latency,
1536 ramp_up_rate=ramp_up_rate,
1537 ramp_up_duration=ramp_up_duration,
1538 state_timeout=state_timeout,
1540 algorithm = MultipleLossRatioSearch(
1541 measurer=tg_instance,
1542 final_trial_duration=final_trial_duration,
1543 final_relative_width=final_relative_width,
1544 number_of_intermediate_phases=number_of_intermediate_phases,
1545 initial_trial_duration=initial_trial_duration,
1547 doublings=doublings,
1549 result = algorithm.narrow_down_ndr_and_pdr(
1550 min_rate=minimum_transmit_rate,
1551 max_rate=maximum_transmit_rate,
1552 packet_loss_ratio=packet_loss_ratio,
1557 def perform_soak_search(
1560 minimum_transmit_rate,
1561 maximum_transmit_rate,
1568 trace_enabled=False,
1569 traffic_directions=2,
1570 transaction_duration=0.0,
1571 transaction_scale=0,
1572 transaction_type=u"packet",
1575 ramp_up_duration=None,
1576 state_timeout=300.0,
1578 """Setup initialized TG, perform soak search, return avg and stdev.
1580 :param frame_size: Frame size identifier or value [B].
1581 :param traffic_profile: Module name as a traffic profile identifier.
1582 See GPL/traffic_profiles/trex for implemented modules.
1583 :param minimum_transmit_rate: Minimal load in transactions per second.
1584 :param maximum_transmit_rate: Maximal load in transactions per second.
1585 :param plr_target: Fraction of packets lost to achieve [1].
1586 :param tdpt: Trial duration per trial.
1587 The algorithm linearly increases trial duration with trial number,
1588 this is the increment between succesive trials, in seconds.
1589 :param initial_count: Offset to apply before the first trial.
1590 For example initial_count=50 makes first trial to be 51*tdpt long.
1591 This is needed because initial "search" phase of integrator
1592 takes significant time even without any trial results.
1593 :param timeout: The search will stop after this overall time [s].
1594 :param ppta: Packets per transaction, aggregated over directions.
1595 Needed for udp_pps which does not have a good transaction counter,
1596 so we need to compute expected number of packets.
1598 :param resetter: Callable to reset DUT state for repeated trials.
1599 :param trace_enabled: True if trace enabled else False.
1600 This is very verbose tracing on numeric computations,
1601 do not use in production.
1603 :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1605 :param transaction_duration: Total expected time to close transaction.
1606 :param transaction_scale: Number of transactions to perform.
1607 0 (default) means unlimited.
1608 :param transaction_type: An identifier specifying which counters
1609 and formulas to use when computing attempted and failed
1610 transactions. Default: "packet".
1611 :param use_latency: Whether to measure latency during the trial.
1613 :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1614 :param ramp_up_duration: Duration of ramp-up trials [s].
1615 :param state_timeout: Time of life of DUT state [s].
1616 :type frame_size: str or int
1617 :type traffic_profile: str
1618 :type minimum_transmit_rate: float
1619 :type maximum_transmit_rate: float
1620 :type plr_target: float
1621 :type initial_count: int
1622 :type timeout: float
1624 :type resetter: Optional[Callable[[], None]]
1625 :type trace_enabled: bool
1626 :type traffic_directions: int
1627 :type transaction_duration: float
1628 :type transaction_scale: int
1629 :type transaction_type: str
1630 :type use_latency: bool
1631 :type ramp_up_rate: float
1632 :type ramp_up_duration: float
1633 :type state_timeout: float
1634 :returns: Average and stdev of estimated aggregate rate giving PLR.
1635 :rtype: 2-tuple of float
1637 tg_instance = BuiltIn().get_library_instance(
1638 u"resources.libraries.python.TrafficGenerator"
1640 # Overrides for fixed transaction amount.
1641 # TODO: Move to robot code? We have a single call site
1642 # but MLRsearch has two and we want the two to be used similarly.
1643 if transaction_scale:
1644 # TODO: What is a good value for max scale?
1645 # TODO: Scale the timeout with transaction scale.
1647 tg_instance.set_rate_provider_defaults(
1648 frame_size=frame_size,
1649 traffic_profile=traffic_profile,
1650 negative_loss=False,
1651 sleep_till_duration=True,
1654 traffic_directions=traffic_directions,
1655 transaction_duration=transaction_duration,
1656 transaction_scale=transaction_scale,
1657 transaction_type=transaction_type,
1658 use_latency=use_latency,
1659 ramp_up_rate=ramp_up_rate,
1660 ramp_up_duration=ramp_up_duration,
1661 state_timeout=state_timeout,
1663 algorithm = PLRsearch(
1664 measurer=tg_instance,
1665 trial_duration_per_trial=tdpt,
1666 packet_loss_ratio_target=plr_target,
1667 trial_number_offset=initial_count,
1669 trace_enabled=trace_enabled,
1671 result = algorithm.search(
1672 min_rate=minimum_transmit_rate,
1673 max_rate=maximum_transmit_rate,