feat(core): Multilink TRex ASTF Async mode
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import math
17 import time
18
19 from robot.api import logger
20 from robot.libraries.BuiltIn import BuiltIn
21
22 from .Constants import Constants
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
33 from .TRexConfigGenerator import TrexConfig
34 from .DUTSetup import DUTSetup as DS
35
36 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
37
38
39 def check_subtype(node):
40     """Return supported subtype of given node, or raise an exception.
41
42     Currently only one subtype is supported,
43     but we want our code to be ready for other ones.
44
45     :param node: Topology node to check. Can be None.
46     :type node: dict or NoneType
47     :returns: Subtype detected.
48     :rtype: NodeSubTypeTG
49     :raises RuntimeError: If node is not supported, message explains how.
50     """
51     if node.get(u"type") is None:
52         msg = u"Node type is not defined"
53     elif node[u"type"] != NodeType.TG:
54         msg = f"Node type is {node[u'type']!r}, not a TG"
55     elif node.get(u"subtype") is None:
56         msg = u"TG subtype is not defined"
57     elif node[u"subtype"] != NodeSubTypeTG.TREX:
58         msg = f"TG subtype {node[u'subtype']!r} is not supported"
59     else:
60         return NodeSubTypeTG.TREX
61     raise RuntimeError(msg)
62
63
64 class TGDropRateSearchImpl(DropRateSearch):
65     """Drop Rate Search implementation."""
66
67     # def __init__(self):
68     #     super(TGDropRateSearchImpl, self).__init__()
69
70     def measure_loss(
71             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
72             traffic_profile):
73         """Runs the traffic and evaluate the measured results.
74
75         :param rate: Offered traffic load.
76         :param frame_size: Size of frame.
77         :param loss_acceptance: Permitted drop ratio or frames count.
78         :param loss_acceptance_type: Type of permitted loss.
79         :param traffic_profile: Module name as a traffic profile identifier.
80             See GPL/traffic_profiles/trex for implemented modules.
81         :type rate: float
82         :type frame_size: str
83         :type loss_acceptance: float
84         :type loss_acceptance_type: LossAcceptanceType
85         :type traffic_profile: str
86         :returns: Drop threshold exceeded? (True/False)
87         :rtype: bool
88         :raises NotImplementedError: If TG is not supported.
89         :raises RuntimeError: If TG is not specified.
90         """
91         # we need instance of TrafficGenerator instantiated by Robot Framework
92         # to be able to use trex_stl-*()
93         tg_instance = BuiltIn().get_library_instance(
94             u"resources.libraries.python.TrafficGenerator"
95         )
96         subtype = check_subtype(tg_instance.node)
97         if subtype == NodeSubTypeTG.TREX:
98             unit_rate = str(rate) + self.get_rate_type_str()
99             tg_instance.trex_stl_start_remote_exec(
100                 self.get_duration(), unit_rate, frame_size, traffic_profile
101             )
102             loss = tg_instance.get_loss()
103             sent = tg_instance.get_sent()
104             if self.loss_acceptance_type_is_percentage():
105                 loss = (float(loss) / float(sent)) * 100
106             logger.trace(
107                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
108             )
109             return float(loss) <= float(loss_acceptance)
110         return False
111
112     def get_latency(self):
113         """Returns min/avg/max latency.
114
115         :returns: Latency stats.
116         :rtype: list
117         """
118         tg_instance = BuiltIn().get_library_instance(
119             u"resources.libraries.python.TrafficGenerator"
120         )
121         return tg_instance.get_latency_int()
122
123
124 class TrexMode:
125     """Defines mode of T-Rex traffic generator."""
126     # Advanced stateful mode
127     ASTF = u"ASTF"
128     # Stateless mode
129     STL = u"STL"
130
131
132 class TrafficGenerator(AbstractMeasurer):
133     """Traffic Generator."""
134
135     # Use one instance of TrafficGenerator for all tests in test suite
136     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
137
138     def __init__(self):
139         self._node = None
140         self._mode = None
141         # TG interface order mapping
142         self._ifaces_reordered = False
143         # Result holding fields, to be removed.
144         self._result = None
145         self._loss = None
146         self._sent = None
147         self._latency = None
148         self._received = None
149         self._approximated_rate = None
150         self._approximated_duration = None
151         self._l7_data = None
152         # Measurement input fields, needed for async stop result.
153         self._start_time = None
154         self._stop_time = None
155         self._rate = None
156         self._target_duration = None
157         self._duration = None
158         # Other input parameters, not knowable from measure() signature.
159         self.frame_size = None
160         self.traffic_profile = None
161         self.traffic_directions = None
162         self.negative_loss = None
163         self.use_latency = None
164         self.ppta = None
165         self.resetter = None
166         self.transaction_scale = None
167         self.transaction_duration = None
168         self.sleep_till_duration = None
169         self.transaction_type = None
170         self.duration_limit = None
171         self.ramp_up_start = None
172         self.ramp_up_stop = None
173         self.ramp_up_rate = None
174         self.ramp_up_duration = None
175         self.state_timeout = None
176         # Transient data needed for async measurements.
177         self._xstats = ()
178
179     @property
180     def node(self):
181         """Getter.
182
183         :returns: Traffic generator node.
184         :rtype: dict
185         """
186         return self._node
187
188     def get_loss(self):
189         """Return number of lost packets.
190
191         :returns: Number of lost packets.
192         :rtype: str
193         """
194         return self._loss
195
196     def get_sent(self):
197         """Return number of sent packets.
198
199         :returns: Number of sent packets.
200         :rtype: str
201         """
202         return self._sent
203
204     def get_received(self):
205         """Return number of received packets.
206
207         :returns: Number of received packets.
208         :rtype: str
209         """
210         return self._received
211
212     def get_latency_int(self):
213         """Return rounded min/avg/max latency.
214
215         :returns: Latency stats.
216         :rtype: list
217         """
218         return self._latency
219
220     def get_approximated_rate(self):
221         """Return approximated rate computed as ratio of transmitted packets
222         over duration of trial.
223
224         :returns: Approximated rate.
225         :rtype: str
226         """
227         return self._approximated_rate
228
229     def get_l7_data(self):
230         """Return L7 data.
231
232         :returns: Number of received packets.
233         :rtype: dict
234         """
235         return self._l7_data
236
237     def check_mode(self, expected_mode):
238         """Check TG mode.
239
240         :param expected_mode: Expected traffic generator mode.
241         :type expected_mode: object
242         :raises RuntimeError: In case of unexpected TG mode.
243         """
244         if self._mode == expected_mode:
245             return
246         raise RuntimeError(
247             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
248         )
249
250     @staticmethod
251     def get_tg_type(tg_node):
252         """Log and return the installed traffic generator type.
253
254         :param tg_node: Node from topology file.
255         :type tg_node: dict
256         :returns: Traffic generator type string.
257         :rtype: str
258         :raises RuntimeError: If command returns nonzero return code.
259         """
260         return str(check_subtype(tg_node))
261
262     @staticmethod
263     def get_tg_version(tg_node):
264         """Log and return the installed traffic generator version.
265
266         :param tg_node: Node from topology file.
267         :type tg_node: dict
268         :returns: Traffic generator version string.
269         :rtype: str
270         :raises RuntimeError: If command returns nonzero return code.
271         """
272         subtype = check_subtype(tg_node)
273         if subtype == NodeSubTypeTG.TREX:
274             command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
275             message = u"Get T-Rex version failed!"
276             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
277             return stdout.strip()
278         return "none"
279
280     def initialize_traffic_generator(self, osi_layer, parallel_links=1):
281         """TG initialization.
282
283         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
284         :param parallel_links: Number of parallel links to configure.
285         :type osi_layer: str
286         :type parallel_links: int
287         :raises ValueError: If OSI layer is unknown.
288         """
289         if osi_layer not in ("L2", "L3", "L7"):
290             raise ValueError("Unknown OSI layer!")
291
292         topology = BuiltIn().get_variable_value("&{topology_info}")
293         self._node = topology["TG"]
294         subtype = check_subtype(self._node)
295
296         if subtype == NodeSubTypeTG.TREX:
297             trex_topology = list()
298             self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
299
300             for link in range(1, parallel_links*2, 2):
301                 tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
302                 tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
303                 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
304                     ifl = BuiltIn().get_variable_value("${int}")
305                     last = topology["duts_count"]
306                     tg_if1_adj_addr = Topology().get_interface_mac(
307                         topology["DUT1"],
308                         BuiltIn().get_variable_value(
309                             f"${{DUT1_{ifl}{link}}}[0]"
310                         )
311                     )
312                     tg_if2_adj_addr = Topology().get_interface_mac(
313                         topology[f"DUT{last}"],
314                         BuiltIn().get_variable_value(
315                             f"${{DUT{last}_{ifl}{link+1}}}[0]"
316                         )
317                     )
318
319                 trex_topology.append(
320                     dict(
321                         interface=topology[f"TG_pf{link}"][0],
322                         dst_mac=tg_if1_adj_addr
323                     )
324                 )
325                 trex_topology.append(
326                     dict(
327                         interface=topology[f"TG_pf{link+1}"][0],
328                         dst_mac=tg_if2_adj_addr
329                     )
330                 )
331                 if1_pci = topology[f"TG_pf{link}_pci"][0]
332                 if2_pci = topology[f"TG_pf{link+1}_pci"][0]
333                 if min(if1_pci, if2_pci) != if1_pci:
334                     self._ifaces_reordered = True
335                     trex_topology.reverse()
336
337             TrexConfig.add_startup_configuration(
338                 self._node, trex_topology
339             )
340             TrafficGenerator.startup_trex(
341                 self._node, osi_layer, subtype=subtype
342             )
343
344     @staticmethod
345     def startup_trex(tg_node, osi_layer, subtype=None):
346         """Startup sequence for the TRex traffic generator.
347
348         :param tg_node: Traffic generator node.
349         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
350         :param subtype: Traffic generator sub-type.
351         :type tg_node: dict
352         :type osi_layer: str
353         :type subtype: NodeSubTypeTG
354         :raises RuntimeError: If T-Rex startup failed.
355         :raises ValueError: If OSI layer is not supported.
356         """
357         if not subtype:
358             subtype = check_subtype(tg_node)
359         if subtype == NodeSubTypeTG.TREX:
360             for _ in range(0, 3):
361                 # Kill TRex only if it is already running.
362                 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
363                 exec_cmd_no_error(
364                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
365                 )
366
367                 # Prepare interfaces for TRex.
368                 tg_port_drv = Constants.TREX_PORT_DRIVER
369                 mlx_driver = u""
370                 for port in tg_node[u"interfaces"].values():
371                     if u"Mellanox" in port.get(u"model"):
372                         mlx_driver = port.get(u"driver")
373                         pci_addr = port.get(u'pci_address')
374                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
375                         if cur_driver == mlx_driver:
376                             pass
377                         elif not cur_driver:
378                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
379                         else:
380                             DS.pci_driver_unbind(tg_node, pci_addr)
381                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
382                     else:
383                         pci_addr = port.get(u'pci_address')
384                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
385                         if cur_driver:
386                             DS.pci_driver_unbind(tg_node, pci_addr)
387                         DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
388
389                 # Start TRex.
390                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
391                 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
392                 trex_cmd.add(u"-i")
393                 trex_cmd.add(u"--prefix $(hostname)")
394                 trex_cmd.add(u"--hdrh")
395                 trex_cmd.add(u"--no-scapy-server")
396                 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
397                 # OptionString does not create double space if extra is empty.
398                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
399                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
400                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
401                 try:
402                     exec_cmd_no_error(tg_node, cmd, sudo=True)
403                 except RuntimeError:
404                     cmd = u"sh -c \"cat /tmp/trex.log\""
405                     exec_cmd_no_error(
406                         tg_node, cmd, sudo=True,
407                         message=u"Get TRex logs failed!"
408                     )
409                     raise RuntimeError(u"Start TRex failed!")
410
411                 # Test T-Rex API responsiveness.
412                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
413                 if osi_layer in (u"L2", u"L3"):
414                     cmd += u"trex_stl_assert.py"
415                 elif osi_layer == u"L7":
416                     cmd += u"trex_astf_assert.py"
417                 else:
418                     raise ValueError(u"Unknown OSI layer!")
419                 try:
420                     exec_cmd_no_error(
421                         tg_node, cmd, sudo=True,
422                         message=u"T-Rex API is not responding!", retries=20
423                     )
424                 except RuntimeError:
425                     continue
426                 return
427             # After max retries TRex is still not responding to API critical
428             # error occurred.
429             exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
430             raise RuntimeError(u"Start T-Rex failed after multiple retries!")
431
432     @staticmethod
433     def is_trex_running(node):
434         """Check if T-Rex is running using pidof.
435
436         :param node: Traffic generator node.
437         :type node: dict
438         :returns: True if T-Rex is running otherwise False.
439         :rtype: bool
440         """
441         ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
442         return bool(int(ret) == 0)
443
444     @staticmethod
445     def teardown_traffic_generator(node):
446         """TG teardown.
447
448         :param node: Traffic generator node.
449         :type node: dict
450         :returns: nothing
451         :raises RuntimeError: If node type is not a TG,
452             or if T-Rex teardown fails.
453         """
454         subtype = check_subtype(node)
455         if subtype == NodeSubTypeTG.TREX:
456             exec_cmd_no_error(
457                 node,
458                 u"sh -c "
459                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
460                 sudo=False,
461                 message=u"T-Rex kill failed!"
462             )
463
464     def trex_astf_stop_remote_exec(self, node):
465         """Execute T-Rex ASTF script on remote node over ssh to stop running
466         traffic.
467
468         Internal state is updated with measurement results.
469
470         :param node: T-Rex generator node.
471         :type node: dict
472         :raises RuntimeError: If stop traffic script fails.
473         """
474         command_line = OptionString().add("python3")
475         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
476         command_line.add(f"'{dirname}/trex_astf_stop.py'")
477         command_line.add("--xstat")
478         for value in self._xstats:
479             if value is not None:
480                 value = value.replace("'", "\"")
481                 command_line.add(f"'{value}'")
482         stdout, _ = exec_cmd_no_error(
483             node, command_line,
484             message="T-Rex ASTF runtime error!"
485         )
486         self._parse_traffic_results(stdout)
487
488     def trex_stl_stop_remote_exec(self, node):
489         """Execute T-Rex STL script on remote node over ssh to stop running
490         traffic.
491
492         Internal state is updated with measurement results.
493
494         :param node: T-Rex generator node.
495         :type node: dict
496         :raises RuntimeError: If stop traffic script fails.
497         """
498         command_line = OptionString().add("python3")
499         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
500         command_line.add(f"'{dirname}/trex_stl_stop.py'")
501         command_line.add("--xstat")
502         for value in self._xstats:
503             if value is not None:
504                 value = value.replace("'", "\"")
505                 command_line.add(f"'{value}'")
506         stdout, _ = exec_cmd_no_error(
507             node, command_line,
508             message="T-Rex STL runtime error!"
509         )
510         self._parse_traffic_results(stdout)
511
512     def stop_traffic_on_tg(self):
513         """Stop all traffic on TG.
514
515         :returns: Structure containing the result of the measurement.
516         :rtype: ReceiveRateMeasurement
517         :raises ValueError: If TG traffic profile is not supported.
518         """
519         subtype = check_subtype(self._node)
520         if subtype != NodeSubTypeTG.TREX:
521             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
522         if u"trex-astf" in self.traffic_profile:
523             self.trex_astf_stop_remote_exec(self._node)
524         elif u"trex-stl" in self.traffic_profile:
525             self.trex_stl_stop_remote_exec(self._node)
526         else:
527             raise ValueError(u"Unsupported T-Rex traffic profile!")
528         self._stop_time = time.monotonic()
529
530         return self._get_measurement_result()
531
532     def _compute_duration(self, duration, multiplier):
533         """Compute duration for profile driver.
534
535         The final result is influenced by transaction scale and duration limit.
536         It is assumed a higher level function has already set those to self.
537         The duration argument is the target value from search point of view,
538         before the overrides are applied here.
539
540         Minus one (signalling async traffic start) is kept.
541
542         Completeness flag is also included. Duration limited or async trials
543         are not considered complete for ramp-up purposes.
544
545         :param duration: Time expressed in seconds for how long to send traffic.
546         :param multiplier: Traffic rate in transactions per second.
547         :type duration: float
548         :type multiplier: float
549         :returns: New duration and whether it was a complete ramp-up candidate.
550         :rtype: float, bool
551         """
552         if duration < 0.0:
553             # Keep the async -1.
554             return duration, False
555         computed_duration = duration
556         if self.transaction_scale:
557             computed_duration = self.transaction_scale / multiplier
558             # Log the computed duration,
559             # so we can compare with what telemetry suggests
560             # the real duration was.
561             logger.debug(f"Expected duration {computed_duration}")
562         if not self.duration_limit:
563             return computed_duration, True
564         limited_duration = min(computed_duration, self.duration_limit)
565         return limited_duration, (limited_duration == computed_duration)
566
567     def trex_astf_start_remote_exec(
568             self, duration, multiplier, async_call=False):
569         """Execute T-Rex ASTF script on remote node over ssh to start running
570         traffic.
571
572         In sync mode, measurement results are stored internally.
573         In async mode, initial data including xstats are stored internally.
574
575         This method contains the logic to compute duration as maximum time
576         if transaction_scale is nonzero.
577         The transaction_scale argument defines (limits) how many transactions
578         will be started in total. As that amount of transaction can take
579         considerable time (sometimes due to explicit delays in the profile),
580         the real time a trial needs to finish is computed here. For now,
581         in that case the duration argument is ignored, assuming it comes
582         from ASTF-unaware search algorithm. The overall time a single
583         transaction needs is given in parameter transaction_duration,
584         it includes both explicit delays and implicit time it takes
585         to transfer data (or whatever the transaction does).
586
587         Currently it is observed TRex does not start the ASTF traffic
588         immediately, an ad-hoc constant is added to the computed duration
589         to compensate for that.
590
591         If transaction_scale is zero, duration is not recomputed.
592         It is assumed the subsequent result parsing gets the real duration
593         if the traffic stops sooner for any reason.
594
595         Currently, it is assumed traffic profile defines a single transaction.
596         To avoid heavy logic here, the input rate is expected to be in
597         transactions per second, as that directly translates to TRex multiplier,
598         (assuming the profile does not override the default cps value of one).
599
600         :param duration: Time expressed in seconds for how long to send traffic.
601         :param multiplier: Traffic rate in transactions per second.
602         :param async_call: If enabled then don't wait for all incoming traffic.
603         :type duration: float
604         :type multiplier: int
605         :type async_call: bool
606         :raises RuntimeError: In case of T-Rex driver issue.
607         """
608         self.check_mode(TrexMode.ASTF)
609         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
610         if not isinstance(duration, (float, int)):
611             duration = float(duration)
612
613         computed_duration, _ = self._compute_duration(duration, multiplier)
614
615         command_line = OptionString().add(u"python3")
616         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
617         command_line.add(f"'{dirname}/trex_astf_profile.py'")
618         command_line.change_prefix(u"--")
619         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
620         command_line.add_with_value(
621             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
622         )
623         command_line.add_with_value(u"duration", f"{computed_duration!r}")
624         command_line.add_with_value(u"frame_size", self.frame_size)
625         command_line.add_with_value(
626             u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
627         )
628         command_line.add_with_value(u"multiplier", multiplier)
629         command_line.add_with_value(u"port_0", p_0)
630         command_line.add_with_value(u"port_1", p_1)
631         command_line.add_with_value(
632             u"traffic_directions", self.traffic_directions
633         )
634         command_line.add_if(u"async_start", async_call)
635         command_line.add_if(u"latency", self.use_latency)
636         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
637         command_line.add_with_value(
638             u"delay", Constants.PERF_TRIAL_ASTF_DELAY
639         )
640
641         self._start_time = time.monotonic()
642         self._rate = multiplier
643         stdout, _ = exec_cmd_no_error(
644             self._node, command_line, timeout=computed_duration + 10.0,
645             message=u"T-Rex ASTF runtime error!"
646         )
647
648         if async_call:
649             # no result
650             self._target_duration = None
651             self._duration = None
652             self._received = None
653             self._sent = None
654             self._loss = None
655             self._latency = None
656             xstats = []
657             self._l7_data = dict()
658             self._l7_data[u"client"] = dict()
659             self._l7_data[u"client"][u"active_flows"] = None
660             self._l7_data[u"client"][u"established_flows"] = None
661             self._l7_data[u"client"][u"traffic_duration"] = None
662             self._l7_data[u"server"] = dict()
663             self._l7_data[u"server"][u"active_flows"] = None
664             self._l7_data[u"server"][u"established_flows"] = None
665             self._l7_data[u"server"][u"traffic_duration"] = None
666             if u"udp" in self.traffic_profile:
667                 self._l7_data[u"client"][u"udp"] = dict()
668                 self._l7_data[u"client"][u"udp"][u"connects"] = None
669                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
670                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
671                 self._l7_data[u"server"][u"udp"] = dict()
672                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
673                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
674             elif u"tcp" in self.traffic_profile:
675                 self._l7_data[u"client"][u"tcp"] = dict()
676                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
677                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
678                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
679                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
680                 self._l7_data[u"server"][u"tcp"] = dict()
681                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
682                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
683                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
684             else:
685                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
686             index = 0
687             for line in stdout.splitlines():
688                 if f"Xstats snapshot {index}: " in line:
689                     xstats.append(line[19:])
690                     index += 1
691             self._xstats = tuple(xstats)
692         else:
693             self._target_duration = duration
694             self._duration = computed_duration
695             self._parse_traffic_results(stdout)
696
697     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
698         """Execute T-Rex STL script on remote node over ssh to start running
699         traffic.
700
701         In sync mode, measurement results are stored internally.
702         In async mode, initial data including xstats are stored internally.
703
704         Mode-unaware code (e.g. in search algorithms) works with transactions.
705         To keep the logic simple, multiplier is set to that value.
706         As bidirectional traffic profiles send packets in both directions,
707         they are treated as transactions with two packets (one per direction).
708
709         :param duration: Time expressed in seconds for how long to send traffic.
710         :param rate: Traffic rate in transactions per second.
711         :param async_call: If enabled then don't wait for all incoming traffic.
712         :type duration: float
713         :type rate: str
714         :type async_call: bool
715         :raises RuntimeError: In case of T-Rex driver issue.
716         """
717         self.check_mode(TrexMode.STL)
718         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
719         if not isinstance(duration, (float, int)):
720             duration = float(duration)
721
722         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
723
724         command_line = OptionString().add(u"python3")
725         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
726         command_line.add(f"'{dirname}/trex_stl_profile.py'")
727         command_line.change_prefix(u"--")
728         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
729         command_line.add_with_value(
730             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
731         )
732         command_line.add_with_value(u"duration", f"{duration!r}")
733         command_line.add_with_value(u"frame_size", self.frame_size)
734         command_line.add_with_value(u"rate", f"{rate!r}")
735         command_line.add_with_value(u"port_0", p_0)
736         command_line.add_with_value(u"port_1", p_1)
737         command_line.add_with_value(
738             u"traffic_directions", self.traffic_directions
739         )
740         command_line.add_if(u"async_start", async_call)
741         command_line.add_if(u"latency", self.use_latency)
742         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
743         command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
744
745         self._start_time = time.monotonic()
746         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
747         stdout, _ = exec_cmd_no_error(
748             self._node, command_line, timeout=int(duration) + 60,
749             message=u"T-Rex STL runtime error"
750         )
751
752         if async_call:
753             # no result
754             self._target_duration = None
755             self._duration = None
756             self._received = None
757             self._sent = None
758             self._loss = None
759             self._latency = None
760
761             xstats = []
762             index = 0
763             for line in stdout.splitlines():
764                 if f"Xstats snapshot {index}: " in line:
765                     xstats.append(line[19:])
766                     index += 1
767             self._xstats = tuple(xstats)
768         else:
769             self._target_duration = duration
770             self._duration = duration
771             self._parse_traffic_results(stdout)
772
773     def send_traffic_on_tg(
774             self,
775             duration,
776             rate,
777             frame_size,
778             traffic_profile,
779             async_call=False,
780             ppta=1,
781             traffic_directions=2,
782             transaction_duration=0.0,
783             transaction_scale=0,
784             transaction_type=u"packet",
785             duration_limit=0.0,
786             use_latency=False,
787             ramp_up_rate=None,
788             ramp_up_duration=None,
789             state_timeout=240.0,
790             ramp_up_only=False,
791         ):
792         """Send traffic from all configured interfaces on TG.
793
794         In async mode, xstats is stored internally,
795         to enable getting correct result when stopping the traffic.
796         In both modes, stdout is returned,
797         but _parse_traffic_results only works in sync output.
798
799         Note that traffic generator uses DPDK driver which might
800         reorder port numbers based on wiring and PCI numbering.
801         This method handles that, so argument values are invariant,
802         but you can see swapped valued in debug logs.
803
804         When transaction_scale is specified, the duration value is ignored
805         and the needed time is computed. For cases where this results in
806         to too long measurement (e.g. teardown trial with small rate),
807         duration_limit is applied (of non-zero), so the trial is stopped sooner.
808
809         Bidirectional STL profiles are treated as transactions with two packets.
810
811         The return value is None for async.
812
813         :param duration: Duration of test traffic generation in seconds.
814         :param rate: Traffic rate in transactions per second.
815         :param frame_size: Frame size (L2) in Bytes.
816         :param traffic_profile: Module name as a traffic profile identifier.
817             See GPL/traffic_profiles/trex for implemented modules.
818         :param async_call: Async mode.
819         :param ppta: Packets per transaction, aggregated over directions.
820             Needed for udp_pps which does not have a good transaction counter,
821             so we need to compute expected number of packets.
822             Default: 1.
823         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
824             Default: 2
825         :param transaction_duration: Total expected time to close transaction.
826         :param transaction_scale: Number of transactions to perform.
827             0 (default) means unlimited.
828         :param transaction_type: An identifier specifying which counters
829             and formulas to use when computing attempted and failed
830             transactions. Default: "packet".
831         :param duration_limit: Zero or maximum limit for computed (or given)
832             duration.
833         :param use_latency: Whether to measure latency during the trial.
834             Default: False.
835         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
836         :param ramp_up_duration: Duration of ramp-up trials [s].
837         :param state_timeout: Time of life of DUT state [s].
838         :param ramp_up_only: If true, do not perform main trial measurement.
839         :type duration: float
840         :type rate: float
841         :type frame_size: str
842         :type traffic_profile: str
843         :type async_call: bool
844         :type ppta: int
845         :type traffic_directions: int
846         :type transaction_duration: float
847         :type transaction_scale: int
848         :type transaction_type: str
849         :type duration_limit: float
850         :type use_latency: bool
851         :type ramp_up_rate: float
852         :type ramp_up_duration: float
853         :type state_timeout: float
854         :type ramp_up_only: bool
855         :returns: TG results.
856         :rtype: ReceiveRateMeasurement or None
857         :raises ValueError: If TG traffic profile is not supported.
858         """
859         self.set_rate_provider_defaults(
860             frame_size=frame_size,
861             traffic_profile=traffic_profile,
862             ppta=ppta,
863             traffic_directions=traffic_directions,
864             transaction_duration=transaction_duration,
865             transaction_scale=transaction_scale,
866             transaction_type=transaction_type,
867             duration_limit=duration_limit,
868             use_latency=use_latency,
869             ramp_up_rate=ramp_up_rate,
870             ramp_up_duration=ramp_up_duration,
871             state_timeout=state_timeout,
872         )
873         return self._send_traffic_on_tg_with_ramp_up(
874             duration=duration,
875             rate=rate,
876             async_call=async_call,
877             ramp_up_only=ramp_up_only,
878         )
879
880     def _send_traffic_on_tg_internal(
881             self, duration, rate, async_call=False):
882         """Send traffic from all configured interfaces on TG.
883
884         This is an internal function, it assumes set_rate_provider_defaults
885         has been called to remember most values.
886         The reason why need to remember various values is that
887         the traffic can be asynchronous, and parsing needs those values.
888         The reason why this is is a separate function from the one
889         which calls set_rate_provider_defaults is that some search algorithms
890         need to specify their own values, and we do not want the measure call
891         to overwrite them with defaults.
892
893         This function is used both for automated ramp-up trials
894         and for explicitly called trials.
895
896         :param duration: Duration of test traffic generation in seconds.
897         :param rate: Traffic rate in transactions per second.
898         :param async_call: Async mode.
899         :type duration: float
900         :type rate: float
901         :type async_call: bool
902         :returns: TG results.
903         :rtype: ReceiveRateMeasurement or None
904         :raises ValueError: If TG traffic profile is not supported.
905         """
906         subtype = check_subtype(self._node)
907         if subtype == NodeSubTypeTG.TREX:
908             if u"trex-astf" in self.traffic_profile:
909                 self.trex_astf_start_remote_exec(
910                     duration, float(rate), async_call
911                 )
912             elif u"trex-stl" in self.traffic_profile:
913                 unit_rate_str = str(rate) + u"pps"
914                 self.trex_stl_start_remote_exec(
915                     duration, unit_rate_str, async_call
916                 )
917             else:
918                 raise ValueError(u"Unsupported T-Rex traffic profile!")
919
920         return None if async_call else self._get_measurement_result()
921
922     def _send_traffic_on_tg_with_ramp_up(
923             self, duration, rate, async_call=False, ramp_up_only=False):
924         """Send traffic from all interfaces on TG, maybe after ramp-up.
925
926         This is an internal function, it assumes set_rate_provider_defaults
927         has been called to remember most values.
928         The reason why need to remember various values is that
929         the traffic can be asynchronous, and parsing needs those values.
930         The reason why this is a separate function from the one
931         which calls set_rate_provider_defaults is that some search algorithms
932         need to specify their own values, and we do not want the measure call
933         to overwrite them with defaults.
934
935         If ramp-up tracking is detected, a computation is performed,
936         and if state timeout is near, trial at ramp-up rate and duration
937         is inserted before the main trial measurement.
938
939         The ramp_up_only parameter forces a ramp-up without immediate
940         trial measurement, which is useful in case self remembers
941         a previous ramp-up trial that belongs to a different test (phase).
942
943         Return None if trial is async or ramp-up only.
944
945         :param duration: Duration of test traffic generation in seconds.
946         :param rate: Traffic rate in transactions per second.
947         :param async_call: Async mode.
948         :param ramp_up_only: If true, do not perform main trial measurement.
949         :type duration: float
950         :type rate: float
951         :type async_call: bool
952         :type ramp_up_only: bool
953         :returns: TG results.
954         :rtype: ReceiveRateMeasurement or None
955         :raises ValueError: If TG traffic profile is not supported.
956         """
957         complete = False
958         if self.ramp_up_rate:
959             # Figure out whether we need to insert a ramp-up trial.
960             if ramp_up_only or self.ramp_up_start is None:
961                 # We never ramped up yet (at least not in this test case).
962                 ramp_up_needed = True
963             else:
964                 # We ramped up before, but maybe it was too long ago.
965                 # Adding a constant overhead to be safe.
966                 time_now = time.monotonic() + 1.0
967                 computed_duration, complete = self._compute_duration(
968                     duration=duration,
969                     multiplier=rate,
970                 )
971                 # There are two conditions for inserting ramp-up.
972                 # If early sessions are expiring already,
973                 # or if late sessions are to expire before measurement is over.
974                 ramp_up_start_delay = time_now - self.ramp_up_start
975                 ramp_up_stop_delay = time_now - self.ramp_up_stop
976                 ramp_up_stop_delay += computed_duration
977                 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
978                 # Final boolean decision.
979                 ramp_up_needed = (bigger_delay >= self.state_timeout)
980             if ramp_up_needed:
981                 logger.debug(
982                     u"State may time out during next real trial, "
983                     u"inserting a ramp-up trial."
984                 )
985                 self.ramp_up_start = time.monotonic()
986                 self._send_traffic_on_tg_internal(
987                     duration=self.ramp_up_duration,
988                     rate=self.ramp_up_rate,
989                     async_call=async_call,
990                 )
991                 self.ramp_up_stop = time.monotonic()
992                 logger.debug(u"Ramp-up done.")
993             else:
994                 logger.debug(
995                     u"State will probably not time out during next real trial, "
996                     u"no ramp-up trial needed just yet."
997                 )
998         if ramp_up_only:
999             return None
1000         trial_start = time.monotonic()
1001         result = self._send_traffic_on_tg_internal(
1002             duration=duration,
1003             rate=rate,
1004             async_call=async_call,
1005         )
1006         trial_end = time.monotonic()
1007         if self.ramp_up_rate:
1008             # Optimization: No loss acts as a good ramp-up, if it was complete.
1009             if complete and result is not None and result.loss_count == 0:
1010                 logger.debug(u"Good trial acts as a ramp-up")
1011                 self.ramp_up_start = trial_start
1012                 self.ramp_up_stop = trial_end
1013             else:
1014                 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1015         return result
1016
1017     def no_traffic_loss_occurred(self):
1018         """Fail if loss occurred in traffic run.
1019
1020         :returns: nothing
1021         :raises Exception: If loss occured.
1022         """
1023         if self._loss is None:
1024             raise RuntimeError(u"The traffic generation has not been issued")
1025         if self._loss != u"0":
1026             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1027
1028     def fail_if_no_traffic_forwarded(self):
1029         """Fail if no traffic forwarded.
1030
1031         :returns: nothing
1032         :raises Exception: If no traffic forwarded.
1033         """
1034         if self._received is None:
1035             raise RuntimeError(u"The traffic generation has not been issued")
1036         if self._received == 0:
1037             raise RuntimeError(u"No traffic forwarded")
1038
1039     def partial_traffic_loss_accepted(
1040             self, loss_acceptance, loss_acceptance_type):
1041         """Fail if loss is higher then accepted in traffic run.
1042
1043         :param loss_acceptance: Permitted drop ratio or frames count.
1044         :param loss_acceptance_type: Type of permitted loss.
1045         :type loss_acceptance: float
1046         :type loss_acceptance_type: LossAcceptanceType
1047         :returns: nothing
1048         :raises Exception: If loss is above acceptance criteria.
1049         """
1050         if self._loss is None:
1051             raise Exception(u"The traffic generation has not been issued")
1052
1053         if loss_acceptance_type == u"percentage":
1054             loss = (float(self._loss) / float(self._sent)) * 100
1055         elif loss_acceptance_type == u"frames":
1056             loss = float(self._loss)
1057         else:
1058             raise Exception(u"Loss acceptance type not supported")
1059
1060         if loss > float(loss_acceptance):
1061             raise Exception(
1062                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1063             )
1064
1065     def _parse_traffic_results(self, stdout):
1066         """Parse stdout of scripts into fields of self.
1067
1068         Block of code to reuse, by sync start, or stop after async.
1069
1070         :param stdout: Text containing the standard output.
1071         :type stdout: str
1072         """
1073         subtype = check_subtype(self._node)
1074         if subtype == NodeSubTypeTG.TREX:
1075             # Last line from console output
1076             line = stdout.splitlines()[-1]
1077             results = line.split(u";")
1078             if results[-1] in (u" ", u""):
1079                 results.pop(-1)
1080             self._result = dict()
1081             for result in results:
1082                 key, value = result.split(u"=", maxsplit=1)
1083                 self._result[key.strip()] = value
1084             logger.info(f"TrafficGen results:\n{self._result}")
1085             self._received = int(self._result.get(u"total_received"), 0)
1086             self._sent = int(self._result.get(u"total_sent", 0))
1087             self._loss = int(self._result.get(u"frame_loss", 0))
1088             self._approximated_duration = \
1089                 self._result.get(u"approximated_duration", 0.0)
1090             if u"manual" not in str(self._approximated_duration):
1091                 self._approximated_duration = float(self._approximated_duration)
1092             self._latency = list()
1093             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1094             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1095             if self._mode == TrexMode.ASTF:
1096                 self._l7_data = dict()
1097                 self._l7_data[u"client"] = dict()
1098                 self._l7_data[u"client"][u"sent"] = \
1099                     int(self._result.get(u"client_sent", 0))
1100                 self._l7_data[u"client"][u"received"] = \
1101                     int(self._result.get(u"client_received", 0))
1102                 self._l7_data[u"client"][u"active_flows"] = \
1103                     int(self._result.get(u"client_active_flows", 0))
1104                 self._l7_data[u"client"][u"established_flows"] = \
1105                     int(self._result.get(u"client_established_flows", 0))
1106                 self._l7_data[u"client"][u"traffic_duration"] = \
1107                     float(self._result.get(u"client_traffic_duration", 0.0))
1108                 self._l7_data[u"client"][u"err_rx_throttled"] = \
1109                     int(self._result.get(u"client_err_rx_throttled", 0))
1110                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1111                     int(self._result.get(u"client_err_nf_throttled", 0))
1112                 self._l7_data[u"client"][u"err_flow_overflow"] = \
1113                     int(self._result.get(u"client_err_flow_overflow", 0))
1114                 self._l7_data[u"server"] = dict()
1115                 self._l7_data[u"server"][u"active_flows"] = \
1116                     int(self._result.get(u"server_active_flows", 0))
1117                 self._l7_data[u"server"][u"established_flows"] = \
1118                     int(self._result.get(u"server_established_flows", 0))
1119                 self._l7_data[u"server"][u"traffic_duration"] = \
1120                     float(self._result.get(u"server_traffic_duration", 0.0))
1121                 self._l7_data[u"server"][u"err_rx_throttled"] = \
1122                     int(self._result.get(u"client_err_rx_throttled", 0))
1123                 if u"udp" in self.traffic_profile:
1124                     self._l7_data[u"client"][u"udp"] = dict()
1125                     self._l7_data[u"client"][u"udp"][u"connects"] = \
1126                         int(self._result.get(u"client_udp_connects", 0))
1127                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1128                         int(self._result.get(u"client_udp_closed", 0))
1129                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1130                         int(self._result.get(u"client_udp_tx_bytes", 0))
1131                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1132                         int(self._result.get(u"client_udp_rx_bytes", 0))
1133                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1134                         int(self._result.get(u"client_udp_tx_packets", 0))
1135                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1136                         int(self._result.get(u"client_udp_rx_packets", 0))
1137                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1138                         int(self._result.get(u"client_udp_keep_drops", 0))
1139                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1140                         int(self._result.get(u"client_err_cwf", 0))
1141                     self._l7_data[u"server"][u"udp"] = dict()
1142                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1143                         int(self._result.get(u"server_udp_accepts", 0))
1144                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1145                         int(self._result.get(u"server_udp_closed", 0))
1146                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1147                         int(self._result.get(u"server_udp_tx_bytes", 0))
1148                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1149                         int(self._result.get(u"server_udp_rx_bytes", 0))
1150                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1151                         int(self._result.get(u"server_udp_tx_packets", 0))
1152                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1153                         int(self._result.get(u"server_udp_rx_packets", 0))
1154                 elif u"tcp" in self.traffic_profile:
1155                     self._l7_data[u"client"][u"tcp"] = dict()
1156                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1157                         int(self._result.get(u"client_tcp_connect_inits", 0))
1158                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1159                         int(self._result.get(u"client_tcp_connects", 0))
1160                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1161                         int(self._result.get(u"client_tcp_closed", 0))
1162                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1163                         int(self._result.get(u"client_tcp_connattempt", 0))
1164                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1165                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1166                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1167                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1168                     self._l7_data[u"server"][u"tcp"] = dict()
1169                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1170                         int(self._result.get(u"server_tcp_accepts", 0))
1171                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1172                         int(self._result.get(u"server_tcp_connects", 0))
1173                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1174                         int(self._result.get(u"server_tcp_closed", 0))
1175                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1176                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1177                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1178                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1179
1180     def _get_measurement_result(self):
1181         """Return the result of last measurement as ReceiveRateMeasurement.
1182
1183         Separate function, as measurements can end either by time
1184         or by explicit call, this is the common block at the end.
1185
1186         The target_tr field of ReceiveRateMeasurement is in
1187         transactions per second. Transmit count and loss count units
1188         depend on the transaction type. Usually they are in transactions
1189         per second, or aggregated packets per second.
1190
1191         :returns: Structure containing the result of the measurement.
1192         :rtype: ReceiveRateMeasurement
1193         """
1194         try:
1195             # Client duration seems to include a setup period
1196             # where TRex does not send any packets yet.
1197             # Server duration does not include it.
1198             server_data = self._l7_data[u"server"]
1199             approximated_duration = float(server_data[u"traffic_duration"])
1200         except (KeyError, AttributeError, ValueError, TypeError):
1201             approximated_duration = None
1202         try:
1203             if not approximated_duration:
1204                 approximated_duration = float(self._approximated_duration)
1205         except ValueError:  # "manual"
1206             approximated_duration = None
1207         if not approximated_duration:
1208             if self._duration and self._duration > 0:
1209                 # Known recomputed or target duration.
1210                 approximated_duration = self._duration
1211             else:
1212                 # It was an explicit stop.
1213                 if not self._stop_time:
1214                     raise RuntimeError(u"Unable to determine duration.")
1215                 approximated_duration = self._stop_time - self._start_time
1216         target_duration = self._target_duration
1217         if not target_duration:
1218             target_duration = approximated_duration
1219         transmit_rate = self._rate
1220         unsent = 0
1221         if self.transaction_type == u"packet":
1222             partial_attempt_count = self._sent
1223             packet_rate = transmit_rate * self.ppta
1224             # We have a float. TRex way of rounding it is not obvious.
1225             # The biggest source of mismatch is Inter Stream Gap.
1226             # So the code tolerates 10 usec of missing packets.
1227             expected_attempt_count = (target_duration - 1e-5) * packet_rate
1228             expected_attempt_count = math.ceil(expected_attempt_count)
1229             # TRex can send more.
1230             expected_attempt_count = max(expected_attempt_count, self._sent)
1231             unsent = expected_attempt_count - self._sent
1232             pass_count = self._received
1233             fail_count = expected_attempt_count - pass_count
1234         elif self.transaction_type == u"udp_cps":
1235             if not self.transaction_scale:
1236                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1237             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1238             # We do not care whether TG is slow, it should have attempted all.
1239             expected_attempt_count = self.transaction_scale
1240             unsent = expected_attempt_count - partial_attempt_count
1241             pass_count = self._l7_data[u"client"][u"received"]
1242             fail_count = expected_attempt_count - pass_count
1243         elif self.transaction_type == u"tcp_cps":
1244             if not self.transaction_scale:
1245                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1246             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1247             partial_attempt_count = ctca
1248             # We do not care whether TG is slow, it should have attempted all.
1249             expected_attempt_count = self.transaction_scale
1250             unsent = expected_attempt_count - partial_attempt_count
1251             # From TCP point of view, server/connects counts full connections,
1252             # but we are testing NAT session so client/connects counts that
1253             # (half connections from TCP point of view).
1254             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1255             fail_count = expected_attempt_count - pass_count
1256         elif self.transaction_type == u"udp_pps":
1257             if not self.transaction_scale:
1258                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1259             partial_attempt_count = self._sent
1260             expected_attempt_count = self.transaction_scale * self.ppta
1261             unsent = expected_attempt_count - self._sent
1262             fail_count = self._loss + unsent
1263         elif self.transaction_type == u"tcp_pps":
1264             if not self.transaction_scale:
1265                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1266             partial_attempt_count = self._sent
1267             expected_attempt_count = self.transaction_scale * self.ppta
1268             # One loss-like scenario happens when TRex receives all packets
1269             # on L2 level, but is not fast enough to process them all
1270             # at L7 level, which leads to retransmissions.
1271             # Those manifest as opackets larger than expected.
1272             # A simple workaround is to add absolute difference.
1273             # Probability of retransmissions exactly cancelling
1274             # packets unsent due to duration stretching is quite low.
1275             unsent = abs(expected_attempt_count - self._sent)
1276             fail_count = self._loss + unsent
1277         else:
1278             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1279         if unsent and isinstance(self._approximated_duration, float):
1280             # Do not report unsent for "manual".
1281             logger.debug(f"Unsent packets/transactions: {unsent}")
1282         if fail_count < 0 and not self.negative_loss:
1283             fail_count = 0
1284         measurement = ReceiveRateMeasurement(
1285             duration=target_duration,
1286             target_tr=transmit_rate,
1287             transmit_count=expected_attempt_count,
1288             loss_count=fail_count,
1289             approximated_duration=approximated_duration,
1290             partial_transmit_count=partial_attempt_count,
1291         )
1292         measurement.latency = self.get_latency_int()
1293         return measurement
1294
1295     def measure(self, duration, transmit_rate):
1296         """Run trial measurement, parse and return results.
1297
1298         The input rate is for transactions. Stateles bidirectional traffic
1299         is understood as sequence of (asynchronous) transactions,
1300         two packets each.
1301
1302         The result units depend on test type, generally
1303         the count either transactions or packets (aggregated over directions).
1304
1305         Optionally, this method sleeps if measurement finished before
1306         the time specified as duration.
1307
1308         :param duration: Trial duration [s].
1309         :param transmit_rate: Target rate in transactions per second.
1310         :type duration: float
1311         :type transmit_rate: float
1312         :returns: Structure containing the result of the measurement.
1313         :rtype: ReceiveRateMeasurement
1314         :raises RuntimeError: If TG is not set or if node is not TG
1315             or if subtype is not specified.
1316         :raises NotImplementedError: If TG is not supported.
1317         """
1318         duration = float(duration)
1319         time_start = time.monotonic()
1320         time_stop = time_start + duration
1321         if self.resetter:
1322             self.resetter()
1323         result = self._send_traffic_on_tg_with_ramp_up(
1324             duration=duration,
1325             rate=transmit_rate,
1326             async_call=False,
1327         )
1328         logger.debug(f"trial measurement result: {result!r}")
1329         # In PLRsearch, computation needs the specified time to complete.
1330         if self.sleep_till_duration:
1331             sleeptime = time_stop - time.monotonic()
1332             if sleeptime > 0.0:
1333                 time.sleep(sleeptime)
1334         return result
1335
1336     def set_rate_provider_defaults(
1337             self,
1338             frame_size,
1339             traffic_profile,
1340             ppta=1,
1341             resetter=None,
1342             traffic_directions=2,
1343             transaction_duration=0.0,
1344             transaction_scale=0,
1345             transaction_type=u"packet",
1346             duration_limit=0.0,
1347             negative_loss=True,
1348             sleep_till_duration=False,
1349             use_latency=False,
1350             ramp_up_rate=None,
1351             ramp_up_duration=None,
1352             state_timeout=240.0,
1353         ):
1354         """Store values accessed by measure().
1355
1356         :param frame_size: Frame size identifier or value [B].
1357         :param traffic_profile: Module name as a traffic profile identifier.
1358             See GPL/traffic_profiles/trex for implemented modules.
1359         :param ppta: Packets per transaction, aggregated over directions.
1360             Needed for udp_pps which does not have a good transaction counter,
1361             so we need to compute expected number of packets.
1362             Default: 1.
1363         :param resetter: Callable to reset DUT state for repeated trials.
1364         :param traffic_directions: Traffic from packet counting point of view
1365             is bi- (2) or uni- (1) directional.
1366             Default: 2
1367         :param transaction_duration: Total expected time to close transaction.
1368         :param transaction_scale: Number of transactions to perform.
1369             0 (default) means unlimited.
1370         :param transaction_type: An identifier specifying which counters
1371             and formulas to use when computing attempted and failed
1372             transactions. Default: "packet".
1373         :param duration_limit: Zero or maximum limit for computed (or given)
1374             duration.
1375         :param negative_loss: If false, negative loss is reported as zero loss.
1376         :param sleep_till_duration: If true and measurement returned faster,
1377             sleep until it matches duration. Needed for PLRsearch.
1378         :param use_latency: Whether to measure latency during the trial.
1379             Default: False.
1380         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1381         :param ramp_up_duration: Duration of ramp-up trials [s].
1382         :param state_timeout: Time of life of DUT state [s].
1383         :type frame_size: str or int
1384         :type traffic_profile: str
1385         :type ppta: int
1386         :type resetter: Optional[Callable[[], None]]
1387         :type traffic_directions: int
1388         :type transaction_duration: float
1389         :type transaction_scale: int
1390         :type transaction_type: str
1391         :type duration_limit: float
1392         :type negative_loss: bool
1393         :type sleep_till_duration: bool
1394         :type use_latency: bool
1395         :type ramp_up_rate: float
1396         :type ramp_up_duration: float
1397         :type state_timeout: float
1398         """
1399         self.frame_size = frame_size
1400         self.traffic_profile = str(traffic_profile)
1401         self.resetter = resetter
1402         self.ppta = ppta
1403         self.traffic_directions = int(traffic_directions)
1404         self.transaction_duration = float(transaction_duration)
1405         self.transaction_scale = int(transaction_scale)
1406         self.transaction_type = str(transaction_type)
1407         self.duration_limit = float(duration_limit)
1408         self.negative_loss = bool(negative_loss)
1409         self.sleep_till_duration = bool(sleep_till_duration)
1410         self.use_latency = bool(use_latency)
1411         self.ramp_up_rate = float(ramp_up_rate)
1412         self.ramp_up_duration = float(ramp_up_duration)
1413         self.state_timeout = float(state_timeout)
1414
1415
1416 class OptimizedSearch:
1417     """Class to be imported as Robot Library, containing search keywords.
1418
1419     Aside of setting up measurer and forwarding arguments,
1420     the main business is to translate min/max rate from unidir to aggregated.
1421     """
1422
1423     @staticmethod
1424     def perform_optimized_ndrpdr_search(
1425             frame_size,
1426             traffic_profile,
1427             minimum_transmit_rate,
1428             maximum_transmit_rate,
1429             packet_loss_ratio=0.005,
1430             final_relative_width=0.005,
1431             final_trial_duration=30.0,
1432             initial_trial_duration=1.0,
1433             number_of_intermediate_phases=2,
1434             timeout=1200.0,
1435             ppta=1,
1436             resetter=None,
1437             traffic_directions=2,
1438             transaction_duration=0.0,
1439             transaction_scale=0,
1440             transaction_type=u"packet",
1441             use_latency=False,
1442             ramp_up_rate=None,
1443             ramp_up_duration=None,
1444             state_timeout=240.0,
1445             expansion_coefficient=4.0,
1446     ):
1447         """Setup initialized TG, perform optimized search, return intervals.
1448
1449         If transaction_scale is nonzero, all init and non-init trial durations
1450         are set to 1.0 (as they do not affect the real trial duration)
1451         and zero intermediate phases are used.
1452         This way no re-measurement happens.
1453         Warmup has to be handled via resetter or ramp-up mechanisms.
1454
1455         :param frame_size: Frame size identifier or value [B].
1456         :param traffic_profile: Module name as a traffic profile identifier.
1457             See GPL/traffic_profiles/trex for implemented modules.
1458         :param minimum_transmit_rate: Minimal load in transactions per second.
1459         :param maximum_transmit_rate: Maximal load in transactions per second.
1460         :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
1461         :param final_relative_width: Final lower bound transmit rate
1462             cannot be more distant that this multiple of upper bound [1].
1463         :param final_trial_duration: Trial duration for the final phase [s].
1464         :param initial_trial_duration: Trial duration for the initial phase
1465             and also for the first intermediate phase [s].
1466         :param number_of_intermediate_phases: Number of intermediate phases
1467             to perform before the final phase [1].
1468         :param timeout: The search will fail itself when not finished
1469             before this overall time [s].
1470         :param ppta: Packets per transaction, aggregated over directions.
1471             Needed for udp_pps which does not have a good transaction counter,
1472             so we need to compute expected number of packets.
1473             Default: 1.
1474         :param resetter: Callable to reset DUT state for repeated trials.
1475         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1476             Default: 2
1477         :param transaction_duration: Total expected time to close transaction.
1478         :param transaction_scale: Number of transactions to perform.
1479             0 (default) means unlimited.
1480         :param transaction_type: An identifier specifying which counters
1481             and formulas to use when computing attempted and failed
1482             transactions. Default: "packet".
1483         :param use_latency: Whether to measure latency during the trial.
1484             Default: False.
1485         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1486         :param ramp_up_duration: Duration of ramp-up trials [s].
1487         :param state_timeout: Time of life of DUT state [s].
1488         :param expansion_coefficient: In external search multiply width by this.
1489         :type frame_size: str or int
1490         :type traffic_profile: str
1491         :type minimum_transmit_rate: float
1492         :type maximum_transmit_rate: float
1493         :type packet_loss_ratio: float
1494         :type final_relative_width: float
1495         :type final_trial_duration: float
1496         :type initial_trial_duration: float
1497         :type number_of_intermediate_phases: int
1498         :type timeout: float
1499         :type ppta: int
1500         :type resetter: Optional[Callable[[], None]]
1501         :type traffic_directions: int
1502         :type transaction_duration: float
1503         :type transaction_scale: int
1504         :type transaction_type: str
1505         :type use_latency: bool
1506         :type ramp_up_rate: float
1507         :type ramp_up_duration: float
1508         :type state_timeout: float
1509         :type expansion_coefficient: float
1510         :returns: Structure containing narrowed down NDR and PDR intervals
1511             and their measurements.
1512         :rtype: List[Receiverateinterval]
1513         :raises RuntimeError: If total duration is larger than timeout.
1514         """
1515         # we need instance of TrafficGenerator instantiated by Robot Framework
1516         # to be able to use trex_stl-*()
1517         tg_instance = BuiltIn().get_library_instance(
1518             u"resources.libraries.python.TrafficGenerator"
1519         )
1520         # Overrides for fixed transaction amount.
1521         if transaction_scale:
1522             initial_trial_duration = 1.0
1523             final_trial_duration = 1.0
1524             number_of_intermediate_phases = 0
1525             timeout += transaction_scale * 3e-4
1526         tg_instance.set_rate_provider_defaults(
1527             frame_size=frame_size,
1528             traffic_profile=traffic_profile,
1529             sleep_till_duration=False,
1530             ppta=ppta,
1531             resetter=resetter,
1532             traffic_directions=traffic_directions,
1533             transaction_duration=transaction_duration,
1534             transaction_scale=transaction_scale,
1535             transaction_type=transaction_type,
1536             use_latency=use_latency,
1537             ramp_up_rate=ramp_up_rate,
1538             ramp_up_duration=ramp_up_duration,
1539             state_timeout=state_timeout,
1540         )
1541         algorithm = MultipleLossRatioSearch(
1542             measurer=tg_instance,
1543             final_trial_duration=final_trial_duration,
1544             final_relative_width=final_relative_width,
1545             number_of_intermediate_phases=number_of_intermediate_phases,
1546             initial_trial_duration=initial_trial_duration,
1547             timeout=timeout,
1548             debug=logger.debug,
1549             expansion_coefficient=expansion_coefficient,
1550         )
1551         if packet_loss_ratio:
1552             packet_loss_ratios = [0.0, packet_loss_ratio]
1553         else:
1554             # Happens in reconf tests.
1555             packet_loss_ratios = [packet_loss_ratio]
1556         results = algorithm.narrow_down_intervals(
1557             min_rate=minimum_transmit_rate,
1558             max_rate=maximum_transmit_rate,
1559             packet_loss_ratios=packet_loss_ratios,
1560         )
1561         return results
1562
1563     @staticmethod
1564     def perform_soak_search(
1565             frame_size,
1566             traffic_profile,
1567             minimum_transmit_rate,
1568             maximum_transmit_rate,
1569             plr_target=1e-7,
1570             tdpt=0.1,
1571             initial_count=50,
1572             timeout=7200.0,
1573             ppta=1,
1574             resetter=None,
1575             trace_enabled=False,
1576             traffic_directions=2,
1577             transaction_duration=0.0,
1578             transaction_scale=0,
1579             transaction_type=u"packet",
1580             use_latency=False,
1581             ramp_up_rate=None,
1582             ramp_up_duration=None,
1583             state_timeout=240.0,
1584     ):
1585         """Setup initialized TG, perform soak search, return avg and stdev.
1586
1587         :param frame_size: Frame size identifier or value [B].
1588         :param traffic_profile: Module name as a traffic profile identifier.
1589             See GPL/traffic_profiles/trex for implemented modules.
1590         :param minimum_transmit_rate: Minimal load in transactions per second.
1591         :param maximum_transmit_rate: Maximal load in transactions per second.
1592         :param plr_target: Ratio of packets lost to achieve [1].
1593         :param tdpt: Trial duration per trial.
1594             The algorithm linearly increases trial duration with trial number,
1595             this is the increment between succesive trials, in seconds.
1596         :param initial_count: Offset to apply before the first trial.
1597             For example initial_count=50 makes first trial to be 51*tdpt long.
1598             This is needed because initial "search" phase of integrator
1599             takes significant time even without any trial results.
1600         :param timeout: The search will stop after this overall time [s].
1601         :param ppta: Packets per transaction, aggregated over directions.
1602             Needed for udp_pps which does not have a good transaction counter,
1603             so we need to compute expected number of packets.
1604             Default: 1.
1605         :param resetter: Callable to reset DUT state for repeated trials.
1606         :param trace_enabled: True if trace enabled else False.
1607             This is very verbose tracing on numeric computations,
1608             do not use in production.
1609             Default: False
1610         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1611             Default: 2
1612         :param transaction_duration: Total expected time to close transaction.
1613         :param transaction_scale: Number of transactions to perform.
1614             0 (default) means unlimited.
1615         :param transaction_type: An identifier specifying which counters
1616             and formulas to use when computing attempted and failed
1617             transactions. Default: "packet".
1618         :param use_latency: Whether to measure latency during the trial.
1619             Default: False.
1620         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1621         :param ramp_up_duration: Duration of ramp-up trials [s].
1622         :param state_timeout: Time of life of DUT state [s].
1623         :type frame_size: str or int
1624         :type traffic_profile: str
1625         :type minimum_transmit_rate: float
1626         :type maximum_transmit_rate: float
1627         :type plr_target: float
1628         :type initial_count: int
1629         :type timeout: float
1630         :type ppta: int
1631         :type resetter: Optional[Callable[[], None]]
1632         :type trace_enabled: bool
1633         :type traffic_directions: int
1634         :type transaction_duration: float
1635         :type transaction_scale: int
1636         :type transaction_type: str
1637         :type use_latency: bool
1638         :type ramp_up_rate: float
1639         :type ramp_up_duration: float
1640         :type state_timeout: float
1641         :returns: Average and stdev of estimated aggregated rate giving PLR.
1642         :rtype: 2-tuple of float
1643         """
1644         tg_instance = BuiltIn().get_library_instance(
1645             u"resources.libraries.python.TrafficGenerator"
1646         )
1647         # Overrides for fixed transaction amount.
1648         if transaction_scale:
1649             timeout = 7200.0
1650         tg_instance.set_rate_provider_defaults(
1651             frame_size=frame_size,
1652             traffic_profile=traffic_profile,
1653             negative_loss=False,
1654             sleep_till_duration=True,
1655             ppta=ppta,
1656             resetter=resetter,
1657             traffic_directions=traffic_directions,
1658             transaction_duration=transaction_duration,
1659             transaction_scale=transaction_scale,
1660             transaction_type=transaction_type,
1661             use_latency=use_latency,
1662             ramp_up_rate=ramp_up_rate,
1663             ramp_up_duration=ramp_up_duration,
1664             state_timeout=state_timeout,
1665         )
1666         algorithm = PLRsearch(
1667             measurer=tg_instance,
1668             trial_duration_per_trial=tdpt,
1669             packet_loss_ratio_target=plr_target,
1670             trial_number_offset=initial_count,
1671             timeout=timeout,
1672             trace_enabled=trace_enabled,
1673         )
1674         result = algorithm.search(
1675             min_rate=minimum_transmit_rate,
1676             max_rate=maximum_transmit_rate,
1677         )
1678         return result