fix(core): Multilink TRex Async mode
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import math
17 import time
18
19 from robot.api import logger
20 from robot.libraries.BuiltIn import BuiltIn
21
22 from .Constants import Constants
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
33 from .TRexConfigGenerator import TrexConfig
34 from .DUTSetup import DUTSetup as DS
35
36 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
37
38
39 def check_subtype(node):
40     """Return supported subtype of given node, or raise an exception.
41
42     Currently only one subtype is supported,
43     but we want our code to be ready for other ones.
44
45     :param node: Topology node to check. Can be None.
46     :type node: dict or NoneType
47     :returns: Subtype detected.
48     :rtype: NodeSubTypeTG
49     :raises RuntimeError: If node is not supported, message explains how.
50     """
51     if node.get(u"type") is None:
52         msg = u"Node type is not defined"
53     elif node[u"type"] != NodeType.TG:
54         msg = f"Node type is {node[u'type']!r}, not a TG"
55     elif node.get(u"subtype") is None:
56         msg = u"TG subtype is not defined"
57     elif node[u"subtype"] != NodeSubTypeTG.TREX:
58         msg = f"TG subtype {node[u'subtype']!r} is not supported"
59     else:
60         return NodeSubTypeTG.TREX
61     raise RuntimeError(msg)
62
63
64 class TGDropRateSearchImpl(DropRateSearch):
65     """Drop Rate Search implementation."""
66
67     # def __init__(self):
68     #     super(TGDropRateSearchImpl, self).__init__()
69
70     def measure_loss(
71             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
72             traffic_profile):
73         """Runs the traffic and evaluate the measured results.
74
75         :param rate: Offered traffic load.
76         :param frame_size: Size of frame.
77         :param loss_acceptance: Permitted drop ratio or frames count.
78         :param loss_acceptance_type: Type of permitted loss.
79         :param traffic_profile: Module name as a traffic profile identifier.
80             See GPL/traffic_profiles/trex for implemented modules.
81         :type rate: float
82         :type frame_size: str
83         :type loss_acceptance: float
84         :type loss_acceptance_type: LossAcceptanceType
85         :type traffic_profile: str
86         :returns: Drop threshold exceeded? (True/False)
87         :rtype: bool
88         :raises NotImplementedError: If TG is not supported.
89         :raises RuntimeError: If TG is not specified.
90         """
91         # we need instance of TrafficGenerator instantiated by Robot Framework
92         # to be able to use trex_stl-*()
93         tg_instance = BuiltIn().get_library_instance(
94             u"resources.libraries.python.TrafficGenerator"
95         )
96         subtype = check_subtype(tg_instance.node)
97         if subtype == NodeSubTypeTG.TREX:
98             unit_rate = str(rate) + self.get_rate_type_str()
99             tg_instance.trex_stl_start_remote_exec(
100                 self.get_duration(), unit_rate, frame_size, traffic_profile
101             )
102             loss = tg_instance.get_loss()
103             sent = tg_instance.get_sent()
104             if self.loss_acceptance_type_is_percentage():
105                 loss = (float(loss) / float(sent)) * 100
106             logger.trace(
107                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
108             )
109             return float(loss) <= float(loss_acceptance)
110         return False
111
112     def get_latency(self):
113         """Returns min/avg/max latency.
114
115         :returns: Latency stats.
116         :rtype: list
117         """
118         tg_instance = BuiltIn().get_library_instance(
119             u"resources.libraries.python.TrafficGenerator"
120         )
121         return tg_instance.get_latency_int()
122
123
124 class TrexMode:
125     """Defines mode of T-Rex traffic generator."""
126     # Advanced stateful mode
127     ASTF = u"ASTF"
128     # Stateless mode
129     STL = u"STL"
130
131
132 class TrafficGenerator(AbstractMeasurer):
133     """Traffic Generator."""
134
135     # Use one instance of TrafficGenerator for all tests in test suite
136     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
137
138     def __init__(self):
139         self._node = None
140         self._mode = None
141         # TG interface order mapping
142         self._ifaces_reordered = False
143         # Result holding fields, to be removed.
144         self._result = None
145         self._loss = None
146         self._sent = None
147         self._latency = None
148         self._received = None
149         self._approximated_rate = None
150         self._approximated_duration = None
151         self._l7_data = None
152         # Measurement input fields, needed for async stop result.
153         self._start_time = None
154         self._stop_time = None
155         self._rate = None
156         self._target_duration = None
157         self._duration = None
158         # Other input parameters, not knowable from measure() signature.
159         self.frame_size = None
160         self.traffic_profile = None
161         self.traffic_directions = None
162         self.negative_loss = None
163         self.use_latency = None
164         self.ppta = None
165         self.resetter = None
166         self.transaction_scale = None
167         self.transaction_duration = None
168         self.sleep_till_duration = None
169         self.transaction_type = None
170         self.duration_limit = None
171         self.ramp_up_start = None
172         self.ramp_up_stop = None
173         self.ramp_up_rate = None
174         self.ramp_up_duration = None
175         self.state_timeout = None
176         # Transient data needed for async measurements.
177         self._xstats = ()
178
179     @property
180     def node(self):
181         """Getter.
182
183         :returns: Traffic generator node.
184         :rtype: dict
185         """
186         return self._node
187
188     def get_loss(self):
189         """Return number of lost packets.
190
191         :returns: Number of lost packets.
192         :rtype: str
193         """
194         return self._loss
195
196     def get_sent(self):
197         """Return number of sent packets.
198
199         :returns: Number of sent packets.
200         :rtype: str
201         """
202         return self._sent
203
204     def get_received(self):
205         """Return number of received packets.
206
207         :returns: Number of received packets.
208         :rtype: str
209         """
210         return self._received
211
212     def get_latency_int(self):
213         """Return rounded min/avg/max latency.
214
215         :returns: Latency stats.
216         :rtype: list
217         """
218         return self._latency
219
220     def get_approximated_rate(self):
221         """Return approximated rate computed as ratio of transmitted packets
222         over duration of trial.
223
224         :returns: Approximated rate.
225         :rtype: str
226         """
227         return self._approximated_rate
228
229     def get_l7_data(self):
230         """Return L7 data.
231
232         :returns: Number of received packets.
233         :rtype: dict
234         """
235         return self._l7_data
236
237     def check_mode(self, expected_mode):
238         """Check TG mode.
239
240         :param expected_mode: Expected traffic generator mode.
241         :type expected_mode: object
242         :raises RuntimeError: In case of unexpected TG mode.
243         """
244         if self._mode == expected_mode:
245             return
246         raise RuntimeError(
247             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
248         )
249
250     @staticmethod
251     def get_tg_type(tg_node):
252         """Log and return the installed traffic generator type.
253
254         :param tg_node: Node from topology file.
255         :type tg_node: dict
256         :returns: Traffic generator type string.
257         :rtype: str
258         :raises RuntimeError: If command returns nonzero return code.
259         """
260         return str(check_subtype(tg_node))
261
262     @staticmethod
263     def get_tg_version(tg_node):
264         """Log and return the installed traffic generator version.
265
266         :param tg_node: Node from topology file.
267         :type tg_node: dict
268         :returns: Traffic generator version string.
269         :rtype: str
270         :raises RuntimeError: If command returns nonzero return code.
271         """
272         subtype = check_subtype(tg_node)
273         if subtype == NodeSubTypeTG.TREX:
274             command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
275             message = u"Get T-Rex version failed!"
276             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
277             return stdout.strip()
278         else:
279             return "none"
280
281     def initialize_traffic_generator(self, osi_layer, parallel_links=1):
282         """TG initialization.
283
284         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
285         :param parallel_links: Number of parallel links to configure.
286         :type osi_layer: str
287         :type parallel_links: int
288         :raises ValueError: If OSI layer is unknown.
289         """
290         if osi_layer not in ("L2", "L3", "L7"):
291             raise ValueError("Unknown OSI layer!")
292
293         topology = BuiltIn().get_variable_value("&{topology_info}")
294         self._node = topology["TG"]
295         subtype = check_subtype(self._node)
296
297         if subtype == NodeSubTypeTG.TREX:
298             trex_topology = list()
299             self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
300
301             for l in range(1, parallel_links*2, 2):
302                 tg_if1_adj_addr = topology[f"TG_pf{l+1}_mac"][0]
303                 tg_if2_adj_addr = topology[f"TG_pf{l}_mac"][0]
304                 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
305                     ifl = BuiltIn().get_variable_value("${int}")
306                     last = topology["duts_count"]
307                     tg_if1_adj_addr = Topology().get_interface_mac(
308                         topology["DUT1"], 
309                         BuiltIn().get_variable_value(
310                             f"${{DUT1_{ifl}{l}}}[0]"
311                         )
312                     )
313                     tg_if2_adj_addr = Topology().get_interface_mac(
314                         topology[f"DUT{last}"], 
315                         BuiltIn().get_variable_value(
316                             f"${{DUT{last}_{ifl}{l+1}}}[0]"
317                         )
318                     )
319
320                 trex_topology.append(
321                     dict(
322                         interface=topology[f"TG_pf{l}"][0],
323                         dst_mac=tg_if1_adj_addr
324                     )
325                 )
326                 trex_topology.append(
327                     dict(
328                         interface=topology[f"TG_pf{l+1}"][0],
329                         dst_mac=tg_if2_adj_addr
330                     )
331                 )
332                 if1_pci = topology[f"TG_pf{l}_pci"][0]
333                 if2_pci = topology[f"TG_pf{l+1}_pci"][0]
334                 if min(if1_pci, if2_pci) != if1_pci:
335                     self._ifaces_reordered = True
336                     trex_topology.reverse()
337
338             TrexConfig.add_startup_configuration(
339                 self._node, trex_topology
340             )
341             TrafficGenerator.startup_trex(
342                 self._node, osi_layer, subtype=subtype
343             )
344
345     @staticmethod
346     def startup_trex(tg_node, osi_layer, subtype=None):
347         """Startup sequence for the TRex traffic generator.
348
349         :param tg_node: Traffic generator node.
350         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
351         :param subtype: Traffic generator sub-type.
352         :type tg_node: dict
353         :type osi_layer: str
354         :type subtype: NodeSubTypeTG
355         :raises RuntimeError: If T-Rex startup failed.
356         :raises ValueError: If OSI layer is not supported.
357         """
358         if not subtype:
359             subtype = check_subtype(tg_node)
360         if subtype == NodeSubTypeTG.TREX:
361             for _ in range(0, 3):
362                 # Kill TRex only if it is already running.
363                 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
364                 exec_cmd_no_error(
365                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
366                 )
367
368                 # Prepare interfaces for TRex.
369                 tg_port_drv = Constants.TREX_PORT_DRIVER
370                 mlx_driver = u""
371                 for port in tg_node[u"interfaces"].values():
372                     if u"Mellanox" in port.get(u"model"):
373                         mlx_driver = port.get(u"driver")
374                         pci_addr = port.get(u'pci_address')
375                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
376                         if cur_driver == mlx_driver:
377                             pass
378                         elif not cur_driver:
379                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
380                         else:
381                             DS.pci_driver_unbind(tg_node, pci_addr)
382                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
383                     else:
384                         pci_addr = port.get(u'pci_address')
385                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
386                         if cur_driver:
387                             DS.pci_driver_unbind(tg_node, pci_addr)
388                         DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
389
390                 # Start TRex.
391                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
392                 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
393                 trex_cmd.add(u"-i")
394                 trex_cmd.add(u"--prefix $(hostname)")
395                 trex_cmd.add(u"--hdrh")
396                 trex_cmd.add(u"--no-scapy-server")
397                 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
398                 # OptionString does not create double space if extra is empty.
399                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
400                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
401                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
402                 try:
403                     exec_cmd_no_error(tg_node, cmd, sudo=True)
404                 except RuntimeError:
405                     cmd = u"sh -c \"cat /tmp/trex.log\""
406                     exec_cmd_no_error(
407                         tg_node, cmd, sudo=True,
408                         message=u"Get TRex logs failed!"
409                     )
410                     raise RuntimeError(u"Start TRex failed!")
411
412                 # Test T-Rex API responsiveness.
413                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
414                 if osi_layer in (u"L2", u"L3"):
415                     cmd += u"trex_stl_assert.py"
416                 elif osi_layer == u"L7":
417                     cmd += u"trex_astf_assert.py"
418                 else:
419                     raise ValueError(u"Unknown OSI layer!")
420                 try:
421                     exec_cmd_no_error(
422                         tg_node, cmd, sudo=True,
423                         message=u"T-Rex API is not responding!", retries=20
424                     )
425                 except RuntimeError:
426                     continue
427                 return
428             # After max retries TRex is still not responding to API critical
429             # error occurred.
430             exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
431             raise RuntimeError(u"Start T-Rex failed after multiple retries!")
432
433     @staticmethod
434     def is_trex_running(node):
435         """Check if T-Rex is running using pidof.
436
437         :param node: Traffic generator node.
438         :type node: dict
439         :returns: True if T-Rex is running otherwise False.
440         :rtype: bool
441         """
442         ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
443         return bool(int(ret) == 0)
444
445     @staticmethod
446     def teardown_traffic_generator(node):
447         """TG teardown.
448
449         :param node: Traffic generator node.
450         :type node: dict
451         :returns: nothing
452         :raises RuntimeError: If node type is not a TG,
453             or if T-Rex teardown fails.
454         """
455         subtype = check_subtype(node)
456         if subtype == NodeSubTypeTG.TREX:
457             exec_cmd_no_error(
458                 node,
459                 u"sh -c "
460                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
461                 sudo=False,
462                 message=u"T-Rex kill failed!"
463             )
464
465     def trex_astf_stop_remote_exec(self, node):
466         """Execute T-Rex ASTF script on remote node over ssh to stop running
467         traffic.
468
469         Internal state is updated with measurement results.
470
471         :param node: T-Rex generator node.
472         :type node: dict
473         :raises RuntimeError: If stop traffic script fails.
474         """
475         command_line = OptionString().add(u"python3")
476         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
477         command_line.add(f"'{dirname}/trex_astf_stop.py'")
478         command_line.change_prefix(u"--")
479         for index, value in enumerate(self._xstats):
480             if value is not None:
481                 value = value.replace(u"'", u"\"")
482                 command_line.add_equals(f"xstat{index}", f"'{value}'")
483         stdout, _ = exec_cmd_no_error(
484             node, command_line,
485             message=u"T-Rex ASTF runtime error!"
486         )
487         self._parse_traffic_results(stdout)
488
489     def trex_stl_stop_remote_exec(self, node):
490         """Execute T-Rex STL script on remote node over ssh to stop running
491         traffic.
492
493         Internal state is updated with measurement results.
494
495         :param node: T-Rex generator node.
496         :type node: dict
497         :raises RuntimeError: If stop traffic script fails.
498         """
499         command_line = OptionString().add(u"python3")
500         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
501         command_line.add(f"'{dirname}/trex_stl_stop.py'")
502         command_line.add("--xstat")
503         for index, value in enumerate(self._xstats):
504             if value is not None:
505                 value = value.replace("'", "\"")
506                 command_line.add(f"'{value}'")
507         stdout, _ = exec_cmd_no_error(
508             node, command_line,
509             message=u"T-Rex STL runtime error!"
510         )
511         self._parse_traffic_results(stdout)
512
513     def stop_traffic_on_tg(self):
514         """Stop all traffic on TG.
515
516         :returns: Structure containing the result of the measurement.
517         :rtype: ReceiveRateMeasurement
518         :raises ValueError: If TG traffic profile is not supported.
519         """
520         subtype = check_subtype(self._node)
521         if subtype != NodeSubTypeTG.TREX:
522             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
523         if u"trex-astf" in self.traffic_profile:
524             self.trex_astf_stop_remote_exec(self._node)
525         elif u"trex-stl" in self.traffic_profile:
526             self.trex_stl_stop_remote_exec(self._node)
527         else:
528             raise ValueError(u"Unsupported T-Rex traffic profile!")
529         self._stop_time = time.monotonic()
530
531         return self._get_measurement_result()
532
533     def _compute_duration(self, duration, multiplier):
534         """Compute duration for profile driver.
535
536         The final result is influenced by transaction scale and duration limit.
537         It is assumed a higher level function has already set those to self.
538         The duration argument is the target value from search point of view,
539         before the overrides are applied here.
540
541         Minus one (signalling async traffic start) is kept.
542
543         Completeness flag is also included. Duration limited or async trials
544         are not considered complete for ramp-up purposes.
545
546         :param duration: Time expressed in seconds for how long to send traffic.
547         :param multiplier: Traffic rate in transactions per second.
548         :type duration: float
549         :type multiplier: float
550         :returns: New duration and whether it was a complete ramp-up candidate.
551         :rtype: float, bool
552         """
553         if duration < 0.0:
554             # Keep the async -1.
555             return duration, False
556         computed_duration = duration
557         if self.transaction_scale:
558             computed_duration = self.transaction_scale / multiplier
559             # Log the computed duration,
560             # so we can compare with what telemetry suggests
561             # the real duration was.
562             logger.debug(f"Expected duration {computed_duration}")
563         if not self.duration_limit:
564             return computed_duration, True
565         limited_duration = min(computed_duration, self.duration_limit)
566         return limited_duration, (limited_duration == computed_duration)
567
568     def trex_astf_start_remote_exec(
569             self, duration, multiplier, async_call=False):
570         """Execute T-Rex ASTF script on remote node over ssh to start running
571         traffic.
572
573         In sync mode, measurement results are stored internally.
574         In async mode, initial data including xstats are stored internally.
575
576         This method contains the logic to compute duration as maximum time
577         if transaction_scale is nonzero.
578         The transaction_scale argument defines (limits) how many transactions
579         will be started in total. As that amount of transaction can take
580         considerable time (sometimes due to explicit delays in the profile),
581         the real time a trial needs to finish is computed here. For now,
582         in that case the duration argument is ignored, assuming it comes
583         from ASTF-unaware search algorithm. The overall time a single
584         transaction needs is given in parameter transaction_duration,
585         it includes both explicit delays and implicit time it takes
586         to transfer data (or whatever the transaction does).
587
588         Currently it is observed TRex does not start the ASTF traffic
589         immediately, an ad-hoc constant is added to the computed duration
590         to compensate for that.
591
592         If transaction_scale is zero, duration is not recomputed.
593         It is assumed the subsequent result parsing gets the real duration
594         if the traffic stops sooner for any reason.
595
596         Currently, it is assumed traffic profile defines a single transaction.
597         To avoid heavy logic here, the input rate is expected to be in
598         transactions per second, as that directly translates to TRex multiplier,
599         (assuming the profile does not override the default cps value of one).
600
601         :param duration: Time expressed in seconds for how long to send traffic.
602         :param multiplier: Traffic rate in transactions per second.
603         :param async_call: If enabled then don't wait for all incoming traffic.
604         :type duration: float
605         :type multiplier: int
606         :type async_call: bool
607         :raises RuntimeError: In case of T-Rex driver issue.
608         """
609         self.check_mode(TrexMode.ASTF)
610         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
611         if not isinstance(duration, (float, int)):
612             duration = float(duration)
613
614         computed_duration, _ = self._compute_duration(duration, multiplier)
615
616         command_line = OptionString().add(u"python3")
617         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
618         command_line.add(f"'{dirname}/trex_astf_profile.py'")
619         command_line.change_prefix(u"--")
620         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
621         command_line.add_with_value(
622             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
623         )
624         command_line.add_with_value(u"duration", f"{computed_duration!r}")
625         command_line.add_with_value(u"frame_size", self.frame_size)
626         command_line.add_with_value(
627             u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
628         )
629         command_line.add_with_value(u"multiplier", multiplier)
630         command_line.add_with_value(u"port_0", p_0)
631         command_line.add_with_value(u"port_1", p_1)
632         command_line.add_with_value(
633             u"traffic_directions", self.traffic_directions
634         )
635         command_line.add_if(u"async_start", async_call)
636         command_line.add_if(u"latency", self.use_latency)
637         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
638         command_line.add_with_value(
639             u"delay", Constants.PERF_TRIAL_ASTF_DELAY
640         )
641
642         self._start_time = time.monotonic()
643         self._rate = multiplier
644         stdout, _ = exec_cmd_no_error(
645             self._node, command_line, timeout=computed_duration + 10.0,
646             message=u"T-Rex ASTF runtime error!"
647         )
648
649         if async_call:
650             # no result
651             self._target_duration = None
652             self._duration = None
653             self._received = None
654             self._sent = None
655             self._loss = None
656             self._latency = None
657             xstats = []
658             self._l7_data = dict()
659             self._l7_data[u"client"] = dict()
660             self._l7_data[u"client"][u"active_flows"] = None
661             self._l7_data[u"client"][u"established_flows"] = None
662             self._l7_data[u"client"][u"traffic_duration"] = None
663             self._l7_data[u"server"] = dict()
664             self._l7_data[u"server"][u"active_flows"] = None
665             self._l7_data[u"server"][u"established_flows"] = None
666             self._l7_data[u"server"][u"traffic_duration"] = None
667             if u"udp" in self.traffic_profile:
668                 self._l7_data[u"client"][u"udp"] = dict()
669                 self._l7_data[u"client"][u"udp"][u"connects"] = None
670                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
671                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
672                 self._l7_data[u"server"][u"udp"] = dict()
673                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
674                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
675             elif u"tcp" in self.traffic_profile:
676                 self._l7_data[u"client"][u"tcp"] = dict()
677                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
678                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
679                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
680                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
681                 self._l7_data[u"server"][u"tcp"] = dict()
682                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
683                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
684                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
685             else:
686                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
687             index = 0
688             for line in stdout.splitlines():
689                 if f"Xstats snapshot {index}: " in line:
690                     xstats.append(line[19:])
691                     index += 1
692             self._xstats = tuple(xstats)
693         else:
694             self._target_duration = duration
695             self._duration = computed_duration
696             self._parse_traffic_results(stdout)
697
698     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
699         """Execute T-Rex STL script on remote node over ssh to start running
700         traffic.
701
702         In sync mode, measurement results are stored internally.
703         In async mode, initial data including xstats are stored internally.
704
705         Mode-unaware code (e.g. in search algorithms) works with transactions.
706         To keep the logic simple, multiplier is set to that value.
707         As bidirectional traffic profiles send packets in both directions,
708         they are treated as transactions with two packets (one per direction).
709
710         :param duration: Time expressed in seconds for how long to send traffic.
711         :param rate: Traffic rate in transactions per second.
712         :param async_call: If enabled then don't wait for all incoming traffic.
713         :type duration: float
714         :type rate: str
715         :type async_call: bool
716         :raises RuntimeError: In case of T-Rex driver issue.
717         """
718         self.check_mode(TrexMode.STL)
719         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
720         if not isinstance(duration, (float, int)):
721             duration = float(duration)
722
723         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
724
725         command_line = OptionString().add(u"python3")
726         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
727         command_line.add(f"'{dirname}/trex_stl_profile.py'")
728         command_line.change_prefix(u"--")
729         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
730         command_line.add_with_value(
731             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
732         )
733         command_line.add_with_value(u"duration", f"{duration!r}")
734         command_line.add_with_value(u"frame_size", self.frame_size)
735         command_line.add_with_value(u"rate", f"{rate!r}")
736         command_line.add_with_value(u"port_0", p_0)
737         command_line.add_with_value(u"port_1", p_1)
738         command_line.add_with_value(
739             u"traffic_directions", self.traffic_directions
740         )
741         command_line.add_if(u"async_start", async_call)
742         command_line.add_if(u"latency", self.use_latency)
743         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
744         command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
745
746         self._start_time = time.monotonic()
747         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
748         stdout, _ = exec_cmd_no_error(
749             self._node, command_line, timeout=int(duration) + 60,
750             message=u"T-Rex STL runtime error"
751         )
752
753         if async_call:
754             # no result
755             self._target_duration = None
756             self._duration = None
757             self._received = None
758             self._sent = None
759             self._loss = None
760             self._latency = None
761
762             xstats = []
763             index = 0
764             for line in stdout.splitlines():
765                 if f"Xstats snapshot {index}: " in line:
766                     xstats.append(line[19:])
767                     index += 1
768             self._xstats = tuple(xstats)
769         else:
770             self._target_duration = duration
771             self._duration = duration
772             self._parse_traffic_results(stdout)
773
774     def send_traffic_on_tg(
775             self,
776             duration,
777             rate,
778             frame_size,
779             traffic_profile,
780             async_call=False,
781             ppta=1,
782             traffic_directions=2,
783             transaction_duration=0.0,
784             transaction_scale=0,
785             transaction_type=u"packet",
786             duration_limit=0.0,
787             use_latency=False,
788             ramp_up_rate=None,
789             ramp_up_duration=None,
790             state_timeout=240.0,
791             ramp_up_only=False,
792         ):
793         """Send traffic from all configured interfaces on TG.
794
795         In async mode, xstats is stored internally,
796         to enable getting correct result when stopping the traffic.
797         In both modes, stdout is returned,
798         but _parse_traffic_results only works in sync output.
799
800         Note that traffic generator uses DPDK driver which might
801         reorder port numbers based on wiring and PCI numbering.
802         This method handles that, so argument values are invariant,
803         but you can see swapped valued in debug logs.
804
805         When transaction_scale is specified, the duration value is ignored
806         and the needed time is computed. For cases where this results in
807         to too long measurement (e.g. teardown trial with small rate),
808         duration_limit is applied (of non-zero), so the trial is stopped sooner.
809
810         Bidirectional STL profiles are treated as transactions with two packets.
811
812         The return value is None for async.
813
814         :param duration: Duration of test traffic generation in seconds.
815         :param rate: Traffic rate in transactions per second.
816         :param frame_size: Frame size (L2) in Bytes.
817         :param traffic_profile: Module name as a traffic profile identifier.
818             See GPL/traffic_profiles/trex for implemented modules.
819         :param async_call: Async mode.
820         :param ppta: Packets per transaction, aggregated over directions.
821             Needed for udp_pps which does not have a good transaction counter,
822             so we need to compute expected number of packets.
823             Default: 1.
824         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
825             Default: 2
826         :param transaction_duration: Total expected time to close transaction.
827         :param transaction_scale: Number of transactions to perform.
828             0 (default) means unlimited.
829         :param transaction_type: An identifier specifying which counters
830             and formulas to use when computing attempted and failed
831             transactions. Default: "packet".
832         :param duration_limit: Zero or maximum limit for computed (or given)
833             duration.
834         :param use_latency: Whether to measure latency during the trial.
835             Default: False.
836         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
837         :param ramp_up_duration: Duration of ramp-up trials [s].
838         :param state_timeout: Time of life of DUT state [s].
839         :param ramp_up_only: If true, do not perform main trial measurement.
840         :type duration: float
841         :type rate: float
842         :type frame_size: str
843         :type traffic_profile: str
844         :type async_call: bool
845         :type ppta: int
846         :type traffic_directions: int
847         :type transaction_duration: float
848         :type transaction_scale: int
849         :type transaction_type: str
850         :type duration_limit: float
851         :type use_latency: bool
852         :type ramp_up_rate: float
853         :type ramp_up_duration: float
854         :type state_timeout: float
855         :type ramp_up_only: bool
856         :returns: TG results.
857         :rtype: ReceiveRateMeasurement or None
858         :raises ValueError: If TG traffic profile is not supported.
859         """
860         self.set_rate_provider_defaults(
861             frame_size=frame_size,
862             traffic_profile=traffic_profile,
863             ppta=ppta,
864             traffic_directions=traffic_directions,
865             transaction_duration=transaction_duration,
866             transaction_scale=transaction_scale,
867             transaction_type=transaction_type,
868             duration_limit=duration_limit,
869             use_latency=use_latency,
870             ramp_up_rate=ramp_up_rate,
871             ramp_up_duration=ramp_up_duration,
872             state_timeout=state_timeout,
873         )
874         return self._send_traffic_on_tg_with_ramp_up(
875             duration=duration,
876             rate=rate,
877             async_call=async_call,
878             ramp_up_only=ramp_up_only,
879         )
880
881     def _send_traffic_on_tg_internal(
882             self, duration, rate, async_call=False):
883         """Send traffic from all configured interfaces on TG.
884
885         This is an internal function, it assumes set_rate_provider_defaults
886         has been called to remember most values.
887         The reason why need to remember various values is that
888         the traffic can be asynchronous, and parsing needs those values.
889         The reason why this is is a separate function from the one
890         which calls set_rate_provider_defaults is that some search algorithms
891         need to specify their own values, and we do not want the measure call
892         to overwrite them with defaults.
893
894         This function is used both for automated ramp-up trials
895         and for explicitly called trials.
896
897         :param duration: Duration of test traffic generation in seconds.
898         :param rate: Traffic rate in transactions per second.
899         :param async_call: Async mode.
900         :type duration: float
901         :type rate: float
902         :type async_call: bool
903         :returns: TG results.
904         :rtype: ReceiveRateMeasurement or None
905         :raises ValueError: If TG traffic profile is not supported.
906         """
907         subtype = check_subtype(self._node)
908         if subtype == NodeSubTypeTG.TREX:
909             if u"trex-astf" in self.traffic_profile:
910                 self.trex_astf_start_remote_exec(
911                     duration, float(rate), async_call
912                 )
913             elif u"trex-stl" in self.traffic_profile:
914                 unit_rate_str = str(rate) + u"pps"
915                 self.trex_stl_start_remote_exec(
916                     duration, unit_rate_str, async_call
917                 )
918             else:
919                 raise ValueError(u"Unsupported T-Rex traffic profile!")
920
921         return None if async_call else self._get_measurement_result()
922
923     def _send_traffic_on_tg_with_ramp_up(
924             self, duration, rate, async_call=False, ramp_up_only=False):
925         """Send traffic from all interfaces on TG, maybe after ramp-up.
926
927         This is an internal function, it assumes set_rate_provider_defaults
928         has been called to remember most values.
929         The reason why need to remember various values is that
930         the traffic can be asynchronous, and parsing needs those values.
931         The reason why this is a separate function from the one
932         which calls set_rate_provider_defaults is that some search algorithms
933         need to specify their own values, and we do not want the measure call
934         to overwrite them with defaults.
935
936         If ramp-up tracking is detected, a computation is performed,
937         and if state timeout is near, trial at ramp-up rate and duration
938         is inserted before the main trial measurement.
939
940         The ramp_up_only parameter forces a ramp-up without immediate
941         trial measurement, which is useful in case self remembers
942         a previous ramp-up trial that belongs to a different test (phase).
943
944         Return None if trial is async or ramp-up only.
945
946         :param duration: Duration of test traffic generation in seconds.
947         :param rate: Traffic rate in transactions per second.
948         :param async_call: Async mode.
949         :param ramp_up_only: If true, do not perform main trial measurement.
950         :type duration: float
951         :type rate: float
952         :type async_call: bool
953         :type ramp_up_only: bool
954         :returns: TG results.
955         :rtype: ReceiveRateMeasurement or None
956         :raises ValueError: If TG traffic profile is not supported.
957         """
958         complete = False
959         if self.ramp_up_rate:
960             # Figure out whether we need to insert a ramp-up trial.
961             if ramp_up_only or self.ramp_up_start is None:
962                 # We never ramped up yet (at least not in this test case).
963                 ramp_up_needed = True
964             else:
965                 # We ramped up before, but maybe it was too long ago.
966                 # Adding a constant overhead to be safe.
967                 time_now = time.monotonic() + 1.0
968                 computed_duration, complete = self._compute_duration(
969                     duration=duration,
970                     multiplier=rate,
971                 )
972                 # There are two conditions for inserting ramp-up.
973                 # If early sessions are expiring already,
974                 # or if late sessions are to expire before measurement is over.
975                 ramp_up_start_delay = time_now - self.ramp_up_start
976                 ramp_up_stop_delay = time_now - self.ramp_up_stop
977                 ramp_up_stop_delay += computed_duration
978                 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
979                 # Final boolean decision.
980                 ramp_up_needed = (bigger_delay >= self.state_timeout)
981             if ramp_up_needed:
982                 logger.debug(
983                     u"State may time out during next real trial, "
984                     u"inserting a ramp-up trial."
985                 )
986                 self.ramp_up_start = time.monotonic()
987                 self._send_traffic_on_tg_internal(
988                     duration=self.ramp_up_duration,
989                     rate=self.ramp_up_rate,
990                     async_call=async_call,
991                 )
992                 self.ramp_up_stop = time.monotonic()
993                 logger.debug(u"Ramp-up done.")
994             else:
995                 logger.debug(
996                     u"State will probably not time out during next real trial, "
997                     u"no ramp-up trial needed just yet."
998                 )
999         if ramp_up_only:
1000             return None
1001         trial_start = time.monotonic()
1002         result = self._send_traffic_on_tg_internal(
1003             duration=duration,
1004             rate=rate,
1005             async_call=async_call,
1006         )
1007         trial_end = time.monotonic()
1008         if self.ramp_up_rate:
1009             # Optimization: No loss acts as a good ramp-up, if it was complete.
1010             if complete and result is not None and result.loss_count == 0:
1011                 logger.debug(u"Good trial acts as a ramp-up")
1012                 self.ramp_up_start = trial_start
1013                 self.ramp_up_stop = trial_end
1014             else:
1015                 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1016         return result
1017
1018     def no_traffic_loss_occurred(self):
1019         """Fail if loss occurred in traffic run.
1020
1021         :returns: nothing
1022         :raises Exception: If loss occured.
1023         """
1024         if self._loss is None:
1025             raise RuntimeError(u"The traffic generation has not been issued")
1026         if self._loss != u"0":
1027             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1028
1029     def fail_if_no_traffic_forwarded(self):
1030         """Fail if no traffic forwarded.
1031
1032         :returns: nothing
1033         :raises Exception: If no traffic forwarded.
1034         """
1035         if self._received is None:
1036             raise RuntimeError(u"The traffic generation has not been issued")
1037         if self._received == 0:
1038             raise RuntimeError(u"No traffic forwarded")
1039
1040     def partial_traffic_loss_accepted(
1041             self, loss_acceptance, loss_acceptance_type):
1042         """Fail if loss is higher then accepted in traffic run.
1043
1044         :param loss_acceptance: Permitted drop ratio or frames count.
1045         :param loss_acceptance_type: Type of permitted loss.
1046         :type loss_acceptance: float
1047         :type loss_acceptance_type: LossAcceptanceType
1048         :returns: nothing
1049         :raises Exception: If loss is above acceptance criteria.
1050         """
1051         if self._loss is None:
1052             raise Exception(u"The traffic generation has not been issued")
1053
1054         if loss_acceptance_type == u"percentage":
1055             loss = (float(self._loss) / float(self._sent)) * 100
1056         elif loss_acceptance_type == u"frames":
1057             loss = float(self._loss)
1058         else:
1059             raise Exception(u"Loss acceptance type not supported")
1060
1061         if loss > float(loss_acceptance):
1062             raise Exception(
1063                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1064             )
1065
1066     def _parse_traffic_results(self, stdout):
1067         """Parse stdout of scripts into fields of self.
1068
1069         Block of code to reuse, by sync start, or stop after async.
1070
1071         :param stdout: Text containing the standard output.
1072         :type stdout: str
1073         """
1074         subtype = check_subtype(self._node)
1075         if subtype == NodeSubTypeTG.TREX:
1076             # Last line from console output
1077             line = stdout.splitlines()[-1]
1078             results = line.split(u";")
1079             if results[-1] in (u" ", u""):
1080                 results.pop(-1)
1081             self._result = dict()
1082             for result in results:
1083                 key, value = result.split(u"=", maxsplit=1)
1084                 self._result[key.strip()] = value
1085             logger.info(f"TrafficGen results:\n{self._result}")
1086             self._received = int(self._result.get(u"total_received"), 0)
1087             self._sent = int(self._result.get(u"total_sent", 0))
1088             self._loss = int(self._result.get(u"frame_loss", 0))
1089             self._approximated_duration = \
1090                 self._result.get(u"approximated_duration", 0.0)
1091             if u"manual" not in str(self._approximated_duration):
1092                 self._approximated_duration = float(self._approximated_duration)
1093             self._latency = list()
1094             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1095             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1096             if self._mode == TrexMode.ASTF:
1097                 self._l7_data = dict()
1098                 self._l7_data[u"client"] = dict()
1099                 self._l7_data[u"client"][u"sent"] = \
1100                     int(self._result.get(u"client_sent", 0))
1101                 self._l7_data[u"client"][u"received"] = \
1102                     int(self._result.get(u"client_received", 0))
1103                 self._l7_data[u"client"][u"active_flows"] = \
1104                     int(self._result.get(u"client_active_flows", 0))
1105                 self._l7_data[u"client"][u"established_flows"] = \
1106                     int(self._result.get(u"client_established_flows", 0))
1107                 self._l7_data[u"client"][u"traffic_duration"] = \
1108                     float(self._result.get(u"client_traffic_duration", 0.0))
1109                 self._l7_data[u"client"][u"err_rx_throttled"] = \
1110                     int(self._result.get(u"client_err_rx_throttled", 0))
1111                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1112                     int(self._result.get(u"client_err_nf_throttled", 0))
1113                 self._l7_data[u"client"][u"err_flow_overflow"] = \
1114                     int(self._result.get(u"client_err_flow_overflow", 0))
1115                 self._l7_data[u"server"] = dict()
1116                 self._l7_data[u"server"][u"active_flows"] = \
1117                     int(self._result.get(u"server_active_flows", 0))
1118                 self._l7_data[u"server"][u"established_flows"] = \
1119                     int(self._result.get(u"server_established_flows", 0))
1120                 self._l7_data[u"server"][u"traffic_duration"] = \
1121                     float(self._result.get(u"server_traffic_duration", 0.0))
1122                 self._l7_data[u"server"][u"err_rx_throttled"] = \
1123                     int(self._result.get(u"client_err_rx_throttled", 0))
1124                 if u"udp" in self.traffic_profile:
1125                     self._l7_data[u"client"][u"udp"] = dict()
1126                     self._l7_data[u"client"][u"udp"][u"connects"] = \
1127                         int(self._result.get(u"client_udp_connects", 0))
1128                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1129                         int(self._result.get(u"client_udp_closed", 0))
1130                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1131                         int(self._result.get(u"client_udp_tx_bytes", 0))
1132                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1133                         int(self._result.get(u"client_udp_rx_bytes", 0))
1134                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1135                         int(self._result.get(u"client_udp_tx_packets", 0))
1136                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1137                         int(self._result.get(u"client_udp_rx_packets", 0))
1138                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1139                         int(self._result.get(u"client_udp_keep_drops", 0))
1140                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1141                         int(self._result.get(u"client_err_cwf", 0))
1142                     self._l7_data[u"server"][u"udp"] = dict()
1143                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1144                         int(self._result.get(u"server_udp_accepts", 0))
1145                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1146                         int(self._result.get(u"server_udp_closed", 0))
1147                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1148                         int(self._result.get(u"server_udp_tx_bytes", 0))
1149                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1150                         int(self._result.get(u"server_udp_rx_bytes", 0))
1151                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1152                         int(self._result.get(u"server_udp_tx_packets", 0))
1153                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1154                         int(self._result.get(u"server_udp_rx_packets", 0))
1155                 elif u"tcp" in self.traffic_profile:
1156                     self._l7_data[u"client"][u"tcp"] = dict()
1157                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1158                         int(self._result.get(u"client_tcp_connect_inits", 0))
1159                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1160                         int(self._result.get(u"client_tcp_connects", 0))
1161                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1162                         int(self._result.get(u"client_tcp_closed", 0))
1163                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1164                         int(self._result.get(u"client_tcp_connattempt", 0))
1165                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1166                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1167                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1168                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1169                     self._l7_data[u"server"][u"tcp"] = dict()
1170                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1171                         int(self._result.get(u"server_tcp_accepts", 0))
1172                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1173                         int(self._result.get(u"server_tcp_connects", 0))
1174                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1175                         int(self._result.get(u"server_tcp_closed", 0))
1176                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1177                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1178                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1179                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1180
1181     def _get_measurement_result(self):
1182         """Return the result of last measurement as ReceiveRateMeasurement.
1183
1184         Separate function, as measurements can end either by time
1185         or by explicit call, this is the common block at the end.
1186
1187         The target_tr field of ReceiveRateMeasurement is in
1188         transactions per second. Transmit count and loss count units
1189         depend on the transaction type. Usually they are in transactions
1190         per second, or aggregated packets per second.
1191
1192         :returns: Structure containing the result of the measurement.
1193         :rtype: ReceiveRateMeasurement
1194         """
1195         try:
1196             # Client duration seems to include a setup period
1197             # where TRex does not send any packets yet.
1198             # Server duration does not include it.
1199             server_data = self._l7_data[u"server"]
1200             approximated_duration = float(server_data[u"traffic_duration"])
1201         except (KeyError, AttributeError, ValueError, TypeError):
1202             approximated_duration = None
1203         try:
1204             if not approximated_duration:
1205                 approximated_duration = float(self._approximated_duration)
1206         except ValueError:  # "manual"
1207             approximated_duration = None
1208         if not approximated_duration:
1209             if self._duration and self._duration > 0:
1210                 # Known recomputed or target duration.
1211                 approximated_duration = self._duration
1212             else:
1213                 # It was an explicit stop.
1214                 if not self._stop_time:
1215                     raise RuntimeError(u"Unable to determine duration.")
1216                 approximated_duration = self._stop_time - self._start_time
1217         target_duration = self._target_duration
1218         if not target_duration:
1219             target_duration = approximated_duration
1220         transmit_rate = self._rate
1221         unsent = 0
1222         if self.transaction_type == u"packet":
1223             partial_attempt_count = self._sent
1224             packet_rate = transmit_rate * self.ppta
1225             # We have a float. TRex way of rounding it is not obvious.
1226             # The biggest source of mismatch is Inter Stream Gap.
1227             # So the code tolerates 10 usec of missing packets.
1228             expected_attempt_count = (target_duration - 1e-5) * packet_rate
1229             expected_attempt_count = math.ceil(expected_attempt_count)
1230             # TRex can send more.
1231             expected_attempt_count = max(expected_attempt_count, self._sent)
1232             unsent = expected_attempt_count - self._sent
1233             pass_count = self._received
1234             fail_count = expected_attempt_count - pass_count
1235         elif self.transaction_type == u"udp_cps":
1236             if not self.transaction_scale:
1237                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1238             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1239             # We do not care whether TG is slow, it should have attempted all.
1240             expected_attempt_count = self.transaction_scale
1241             unsent = expected_attempt_count - partial_attempt_count
1242             pass_count = self._l7_data[u"client"][u"received"]
1243             fail_count = expected_attempt_count - pass_count
1244         elif self.transaction_type == u"tcp_cps":
1245             if not self.transaction_scale:
1246                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1247             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1248             partial_attempt_count = ctca
1249             # We do not care whether TG is slow, it should have attempted all.
1250             expected_attempt_count = self.transaction_scale
1251             unsent = expected_attempt_count - partial_attempt_count
1252             # From TCP point of view, server/connects counts full connections,
1253             # but we are testing NAT session so client/connects counts that
1254             # (half connections from TCP point of view).
1255             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1256             fail_count = expected_attempt_count - pass_count
1257         elif self.transaction_type == u"udp_pps":
1258             if not self.transaction_scale:
1259                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1260             partial_attempt_count = self._sent
1261             expected_attempt_count = self.transaction_scale * self.ppta
1262             unsent = expected_attempt_count - self._sent
1263             fail_count = self._loss + unsent
1264         elif self.transaction_type == u"tcp_pps":
1265             if not self.transaction_scale:
1266                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1267             partial_attempt_count = self._sent
1268             expected_attempt_count = self.transaction_scale * self.ppta
1269             # One loss-like scenario happens when TRex receives all packets
1270             # on L2 level, but is not fast enough to process them all
1271             # at L7 level, which leads to retransmissions.
1272             # Those manifest as opackets larger than expected.
1273             # A simple workaround is to add absolute difference.
1274             # Probability of retransmissions exactly cancelling
1275             # packets unsent due to duration stretching is quite low.
1276             unsent = abs(expected_attempt_count - self._sent)
1277             fail_count = self._loss + unsent
1278         else:
1279             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1280         if unsent and isinstance(self._approximated_duration, float):
1281             # Do not report unsent for "manual".
1282             logger.debug(f"Unsent packets/transactions: {unsent}")
1283         if fail_count < 0 and not self.negative_loss:
1284             fail_count = 0
1285         measurement = ReceiveRateMeasurement(
1286             duration=target_duration,
1287             target_tr=transmit_rate,
1288             transmit_count=expected_attempt_count,
1289             loss_count=fail_count,
1290             approximated_duration=approximated_duration,
1291             partial_transmit_count=partial_attempt_count,
1292         )
1293         measurement.latency = self.get_latency_int()
1294         return measurement
1295
1296     def measure(self, duration, transmit_rate):
1297         """Run trial measurement, parse and return results.
1298
1299         The input rate is for transactions. Stateles bidirectional traffic
1300         is understood as sequence of (asynchronous) transactions,
1301         two packets each.
1302
1303         The result units depend on test type, generally
1304         the count either transactions or packets (aggregated over directions).
1305
1306         Optionally, this method sleeps if measurement finished before
1307         the time specified as duration.
1308
1309         :param duration: Trial duration [s].
1310         :param transmit_rate: Target rate in transactions per second.
1311         :type duration: float
1312         :type transmit_rate: float
1313         :returns: Structure containing the result of the measurement.
1314         :rtype: ReceiveRateMeasurement
1315         :raises RuntimeError: If TG is not set or if node is not TG
1316             or if subtype is not specified.
1317         :raises NotImplementedError: If TG is not supported.
1318         """
1319         duration = float(duration)
1320         time_start = time.monotonic()
1321         time_stop = time_start + duration
1322         if self.resetter:
1323             self.resetter()
1324         result = self._send_traffic_on_tg_with_ramp_up(
1325             duration=duration,
1326             rate=transmit_rate,
1327             async_call=False,
1328         )
1329         logger.debug(f"trial measurement result: {result!r}")
1330         # In PLRsearch, computation needs the specified time to complete.
1331         if self.sleep_till_duration:
1332             sleeptime = time_stop - time.monotonic()
1333             if sleeptime > 0.0:
1334                 time.sleep(sleeptime)
1335         return result
1336
1337     def set_rate_provider_defaults(
1338             self,
1339             frame_size,
1340             traffic_profile,
1341             ppta=1,
1342             resetter=None,
1343             traffic_directions=2,
1344             transaction_duration=0.0,
1345             transaction_scale=0,
1346             transaction_type=u"packet",
1347             duration_limit=0.0,
1348             negative_loss=True,
1349             sleep_till_duration=False,
1350             use_latency=False,
1351             ramp_up_rate=None,
1352             ramp_up_duration=None,
1353             state_timeout=240.0,
1354         ):
1355         """Store values accessed by measure().
1356
1357         :param frame_size: Frame size identifier or value [B].
1358         :param traffic_profile: Module name as a traffic profile identifier.
1359             See GPL/traffic_profiles/trex for implemented modules.
1360         :param ppta: Packets per transaction, aggregated over directions.
1361             Needed for udp_pps which does not have a good transaction counter,
1362             so we need to compute expected number of packets.
1363             Default: 1.
1364         :param resetter: Callable to reset DUT state for repeated trials.
1365         :param traffic_directions: Traffic from packet counting point of view
1366             is bi- (2) or uni- (1) directional.
1367             Default: 2
1368         :param transaction_duration: Total expected time to close transaction.
1369         :param transaction_scale: Number of transactions to perform.
1370             0 (default) means unlimited.
1371         :param transaction_type: An identifier specifying which counters
1372             and formulas to use when computing attempted and failed
1373             transactions. Default: "packet".
1374         :param duration_limit: Zero or maximum limit for computed (or given)
1375             duration.
1376         :param negative_loss: If false, negative loss is reported as zero loss.
1377         :param sleep_till_duration: If true and measurement returned faster,
1378             sleep until it matches duration. Needed for PLRsearch.
1379         :param use_latency: Whether to measure latency during the trial.
1380             Default: False.
1381         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1382         :param ramp_up_duration: Duration of ramp-up trials [s].
1383         :param state_timeout: Time of life of DUT state [s].
1384         :type frame_size: str or int
1385         :type traffic_profile: str
1386         :type ppta: int
1387         :type resetter: Optional[Callable[[], None]]
1388         :type traffic_directions: int
1389         :type transaction_duration: float
1390         :type transaction_scale: int
1391         :type transaction_type: str
1392         :type duration_limit: float
1393         :type negative_loss: bool
1394         :type sleep_till_duration: bool
1395         :type use_latency: bool
1396         :type ramp_up_rate: float
1397         :type ramp_up_duration: float
1398         :type state_timeout: float
1399         """
1400         self.frame_size = frame_size
1401         self.traffic_profile = str(traffic_profile)
1402         self.resetter = resetter
1403         self.ppta = ppta
1404         self.traffic_directions = int(traffic_directions)
1405         self.transaction_duration = float(transaction_duration)
1406         self.transaction_scale = int(transaction_scale)
1407         self.transaction_type = str(transaction_type)
1408         self.duration_limit = float(duration_limit)
1409         self.negative_loss = bool(negative_loss)
1410         self.sleep_till_duration = bool(sleep_till_duration)
1411         self.use_latency = bool(use_latency)
1412         self.ramp_up_rate = float(ramp_up_rate)
1413         self.ramp_up_duration = float(ramp_up_duration)
1414         self.state_timeout = float(state_timeout)
1415
1416
1417 class OptimizedSearch:
1418     """Class to be imported as Robot Library, containing search keywords.
1419
1420     Aside of setting up measurer and forwarding arguments,
1421     the main business is to translate min/max rate from unidir to aggregated.
1422     """
1423
1424     @staticmethod
1425     def perform_optimized_ndrpdr_search(
1426             frame_size,
1427             traffic_profile,
1428             minimum_transmit_rate,
1429             maximum_transmit_rate,
1430             packet_loss_ratio=0.005,
1431             final_relative_width=0.005,
1432             final_trial_duration=30.0,
1433             initial_trial_duration=1.0,
1434             number_of_intermediate_phases=2,
1435             timeout=1200.0,
1436             ppta=1,
1437             resetter=None,
1438             traffic_directions=2,
1439             transaction_duration=0.0,
1440             transaction_scale=0,
1441             transaction_type=u"packet",
1442             use_latency=False,
1443             ramp_up_rate=None,
1444             ramp_up_duration=None,
1445             state_timeout=240.0,
1446             expansion_coefficient=4.0,
1447     ):
1448         """Setup initialized TG, perform optimized search, return intervals.
1449
1450         If transaction_scale is nonzero, all init and non-init trial durations
1451         are set to 1.0 (as they do not affect the real trial duration)
1452         and zero intermediate phases are used.
1453         This way no re-measurement happens.
1454         Warmup has to be handled via resetter or ramp-up mechanisms.
1455
1456         :param frame_size: Frame size identifier or value [B].
1457         :param traffic_profile: Module name as a traffic profile identifier.
1458             See GPL/traffic_profiles/trex for implemented modules.
1459         :param minimum_transmit_rate: Minimal load in transactions per second.
1460         :param maximum_transmit_rate: Maximal load in transactions per second.
1461         :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
1462         :param final_relative_width: Final lower bound transmit rate
1463             cannot be more distant that this multiple of upper bound [1].
1464         :param final_trial_duration: Trial duration for the final phase [s].
1465         :param initial_trial_duration: Trial duration for the initial phase
1466             and also for the first intermediate phase [s].
1467         :param number_of_intermediate_phases: Number of intermediate phases
1468             to perform before the final phase [1].
1469         :param timeout: The search will fail itself when not finished
1470             before this overall time [s].
1471         :param ppta: Packets per transaction, aggregated over directions.
1472             Needed for udp_pps which does not have a good transaction counter,
1473             so we need to compute expected number of packets.
1474             Default: 1.
1475         :param resetter: Callable to reset DUT state for repeated trials.
1476         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1477             Default: 2
1478         :param transaction_duration: Total expected time to close transaction.
1479         :param transaction_scale: Number of transactions to perform.
1480             0 (default) means unlimited.
1481         :param transaction_type: An identifier specifying which counters
1482             and formulas to use when computing attempted and failed
1483             transactions. Default: "packet".
1484         :param use_latency: Whether to measure latency during the trial.
1485             Default: False.
1486         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1487         :param ramp_up_duration: Duration of ramp-up trials [s].
1488         :param state_timeout: Time of life of DUT state [s].
1489         :param expansion_coefficient: In external search multiply width by this.
1490         :type frame_size: str or int
1491         :type traffic_profile: str
1492         :type minimum_transmit_rate: float
1493         :type maximum_transmit_rate: float
1494         :type packet_loss_ratio: float
1495         :type final_relative_width: float
1496         :type final_trial_duration: float
1497         :type initial_trial_duration: float
1498         :type number_of_intermediate_phases: int
1499         :type timeout: float
1500         :type ppta: int
1501         :type resetter: Optional[Callable[[], None]]
1502         :type traffic_directions: int
1503         :type transaction_duration: float
1504         :type transaction_scale: int
1505         :type transaction_type: str
1506         :type use_latency: bool
1507         :type ramp_up_rate: float
1508         :type ramp_up_duration: float
1509         :type state_timeout: float
1510         :type expansion_coefficient: float
1511         :returns: Structure containing narrowed down NDR and PDR intervals
1512             and their measurements.
1513         :rtype: List[Receiverateinterval]
1514         :raises RuntimeError: If total duration is larger than timeout.
1515         """
1516         # we need instance of TrafficGenerator instantiated by Robot Framework
1517         # to be able to use trex_stl-*()
1518         tg_instance = BuiltIn().get_library_instance(
1519             u"resources.libraries.python.TrafficGenerator"
1520         )
1521         # Overrides for fixed transaction amount.
1522         if transaction_scale:
1523             initial_trial_duration = 1.0
1524             final_trial_duration = 1.0
1525             number_of_intermediate_phases = 0
1526             timeout += transaction_scale * 3e-4
1527         tg_instance.set_rate_provider_defaults(
1528             frame_size=frame_size,
1529             traffic_profile=traffic_profile,
1530             sleep_till_duration=False,
1531             ppta=ppta,
1532             resetter=resetter,
1533             traffic_directions=traffic_directions,
1534             transaction_duration=transaction_duration,
1535             transaction_scale=transaction_scale,
1536             transaction_type=transaction_type,
1537             use_latency=use_latency,
1538             ramp_up_rate=ramp_up_rate,
1539             ramp_up_duration=ramp_up_duration,
1540             state_timeout=state_timeout,
1541         )
1542         algorithm = MultipleLossRatioSearch(
1543             measurer=tg_instance,
1544             final_trial_duration=final_trial_duration,
1545             final_relative_width=final_relative_width,
1546             number_of_intermediate_phases=number_of_intermediate_phases,
1547             initial_trial_duration=initial_trial_duration,
1548             timeout=timeout,
1549             debug=logger.debug,
1550             expansion_coefficient=expansion_coefficient,
1551         )
1552         if packet_loss_ratio:
1553             packet_loss_ratios = [0.0, packet_loss_ratio]
1554         else:
1555             # Happens in reconf tests.
1556             packet_loss_ratios = [packet_loss_ratio]
1557         results = algorithm.narrow_down_intervals(
1558             min_rate=minimum_transmit_rate,
1559             max_rate=maximum_transmit_rate,
1560             packet_loss_ratios=packet_loss_ratios,
1561         )
1562         return results
1563
1564     @staticmethod
1565     def perform_soak_search(
1566             frame_size,
1567             traffic_profile,
1568             minimum_transmit_rate,
1569             maximum_transmit_rate,
1570             plr_target=1e-7,
1571             tdpt=0.1,
1572             initial_count=50,
1573             timeout=7200.0,
1574             ppta=1,
1575             resetter=None,
1576             trace_enabled=False,
1577             traffic_directions=2,
1578             transaction_duration=0.0,
1579             transaction_scale=0,
1580             transaction_type=u"packet",
1581             use_latency=False,
1582             ramp_up_rate=None,
1583             ramp_up_duration=None,
1584             state_timeout=240.0,
1585     ):
1586         """Setup initialized TG, perform soak search, return avg and stdev.
1587
1588         :param frame_size: Frame size identifier or value [B].
1589         :param traffic_profile: Module name as a traffic profile identifier.
1590             See GPL/traffic_profiles/trex for implemented modules.
1591         :param minimum_transmit_rate: Minimal load in transactions per second.
1592         :param maximum_transmit_rate: Maximal load in transactions per second.
1593         :param plr_target: Ratio of packets lost to achieve [1].
1594         :param tdpt: Trial duration per trial.
1595             The algorithm linearly increases trial duration with trial number,
1596             this is the increment between succesive trials, in seconds.
1597         :param initial_count: Offset to apply before the first trial.
1598             For example initial_count=50 makes first trial to be 51*tdpt long.
1599             This is needed because initial "search" phase of integrator
1600             takes significant time even without any trial results.
1601         :param timeout: The search will stop after this overall time [s].
1602         :param ppta: Packets per transaction, aggregated over directions.
1603             Needed for udp_pps which does not have a good transaction counter,
1604             so we need to compute expected number of packets.
1605             Default: 1.
1606         :param resetter: Callable to reset DUT state for repeated trials.
1607         :param trace_enabled: True if trace enabled else False.
1608             This is very verbose tracing on numeric computations,
1609             do not use in production.
1610             Default: False
1611         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1612             Default: 2
1613         :param transaction_duration: Total expected time to close transaction.
1614         :param transaction_scale: Number of transactions to perform.
1615             0 (default) means unlimited.
1616         :param transaction_type: An identifier specifying which counters
1617             and formulas to use when computing attempted and failed
1618             transactions. Default: "packet".
1619         :param use_latency: Whether to measure latency during the trial.
1620             Default: False.
1621         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1622         :param ramp_up_duration: Duration of ramp-up trials [s].
1623         :param state_timeout: Time of life of DUT state [s].
1624         :type frame_size: str or int
1625         :type traffic_profile: str
1626         :type minimum_transmit_rate: float
1627         :type maximum_transmit_rate: float
1628         :type plr_target: float
1629         :type initial_count: int
1630         :type timeout: float
1631         :type ppta: int
1632         :type resetter: Optional[Callable[[], None]]
1633         :type trace_enabled: bool
1634         :type traffic_directions: int
1635         :type transaction_duration: float
1636         :type transaction_scale: int
1637         :type transaction_type: str
1638         :type use_latency: bool
1639         :type ramp_up_rate: float
1640         :type ramp_up_duration: float
1641         :type state_timeout: float
1642         :returns: Average and stdev of estimated aggregated rate giving PLR.
1643         :rtype: 2-tuple of float
1644         """
1645         tg_instance = BuiltIn().get_library_instance(
1646             u"resources.libraries.python.TrafficGenerator"
1647         )
1648         # Overrides for fixed transaction amount.
1649         if transaction_scale:
1650             timeout = 7200.0
1651         tg_instance.set_rate_provider_defaults(
1652             frame_size=frame_size,
1653             traffic_profile=traffic_profile,
1654             negative_loss=False,
1655             sleep_till_duration=True,
1656             ppta=ppta,
1657             resetter=resetter,
1658             traffic_directions=traffic_directions,
1659             transaction_duration=transaction_duration,
1660             transaction_scale=transaction_scale,
1661             transaction_type=transaction_type,
1662             use_latency=use_latency,
1663             ramp_up_rate=ramp_up_rate,
1664             ramp_up_duration=ramp_up_duration,
1665             state_timeout=state_timeout,
1666         )
1667         algorithm = PLRsearch(
1668             measurer=tg_instance,
1669             trial_duration_per_trial=tdpt,
1670             packet_loss_ratio_target=plr_target,
1671             trial_number_offset=initial_count,
1672             timeout=timeout,
1673             trace_enabled=trace_enabled,
1674         )
1675         result = algorithm.search(
1676             min_rate=minimum_transmit_rate,
1677             max_rate=maximum_transmit_rate,
1678         )
1679         return result