feat(jobspec): Unify soak jobspecs
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import math
17 import time
18
19 from typing import Callable, List, Optional, Union
20
21 from robot.api import logger
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from .Constants import Constants
25 from .DropRateSearch import DropRateSearch
26 from .MLRsearch import (
27     AbstractMeasurer, Config, GoalResult, MeasurementResult,
28     MultipleLossRatioSearch, SearchGoal,
29 )
30 from .PLRsearch.PLRsearch import PLRsearch
31 from .OptionString import OptionString
32 from .ssh import exec_cmd_no_error, exec_cmd
33 from .topology import NodeType
34 from .topology import NodeSubTypeTG
35 from .topology import Topology
36 from .TRexConfigGenerator import TrexConfig
37 from .DUTSetup import DUTSetup as DS
38
39 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
40
41
42 def check_subtype(node):
43     """Return supported subtype of given node, or raise an exception.
44
45     Currently only one subtype is supported,
46     but we want our code to be ready for other ones.
47
48     :param node: Topology node to check. Can be None.
49     :type node: dict or NoneType
50     :returns: Subtype detected.
51     :rtype: NodeSubTypeTG
52     :raises RuntimeError: If node is not supported, message explains how.
53     """
54     if node.get(u"type") is None:
55         msg = u"Node type is not defined"
56     elif node[u"type"] != NodeType.TG:
57         msg = f"Node type is {node[u'type']!r}, not a TG"
58     elif node.get(u"subtype") is None:
59         msg = u"TG subtype is not defined"
60     elif node[u"subtype"] != NodeSubTypeTG.TREX:
61         msg = f"TG subtype {node[u'subtype']!r} is not supported"
62     else:
63         return NodeSubTypeTG.TREX
64     raise RuntimeError(msg)
65
66
67 class TGDropRateSearchImpl(DropRateSearch):
68     """Drop Rate Search implementation."""
69
70     # def __init__(self):
71     #     super(TGDropRateSearchImpl, self).__init__()
72
73     def measure_loss(
74             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
75             traffic_profile):
76         """Runs the traffic and evaluate the measured results.
77
78         :param rate: Offered traffic load.
79         :param frame_size: Size of frame.
80         :param loss_acceptance: Permitted drop ratio or frames count.
81         :param loss_acceptance_type: Type of permitted loss.
82         :param traffic_profile: Module name as a traffic profile identifier.
83             See GPL/traffic_profiles/trex for implemented modules.
84         :type rate: float
85         :type frame_size: str
86         :type loss_acceptance: float
87         :type loss_acceptance_type: LossAcceptanceType
88         :type traffic_profile: str
89         :returns: Drop threshold exceeded? (True/False)
90         :rtype: bool
91         :raises NotImplementedError: If TG is not supported.
92         :raises RuntimeError: If TG is not specified.
93         """
94         # we need instance of TrafficGenerator instantiated by Robot Framework
95         # to be able to use trex_stl-*()
96         tg_instance = BuiltIn().get_library_instance(
97             u"resources.libraries.python.TrafficGenerator"
98         )
99         subtype = check_subtype(tg_instance.node)
100         if subtype == NodeSubTypeTG.TREX:
101             unit_rate = str(rate) + self.get_rate_type_str()
102             tg_instance.trex_stl_start_remote_exec(
103                 self.get_duration(), unit_rate, frame_size, traffic_profile
104             )
105             loss = tg_instance.get_loss()
106             sent = tg_instance.get_sent()
107             if self.loss_acceptance_type_is_percentage():
108                 loss = (float(loss) / float(sent)) * 100
109             logger.trace(
110                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
111             )
112             return float(loss) <= float(loss_acceptance)
113         return False
114
115     def get_latency(self):
116         """Returns min/avg/max latency.
117
118         :returns: Latency stats.
119         :rtype: list
120         """
121         tg_instance = BuiltIn().get_library_instance(
122             u"resources.libraries.python.TrafficGenerator"
123         )
124         return tg_instance.get_latency_int()
125
126
127 class TrexMode:
128     """Defines mode of T-Rex traffic generator."""
129     # Advanced stateful mode
130     ASTF = u"ASTF"
131     # Stateless mode
132     STL = u"STL"
133
134
135 class TrafficGenerator(AbstractMeasurer):
136     """Traffic Generator."""
137
138     # Use one instance of TrafficGenerator for all tests in test suite
139     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
140
141     def __init__(self):
142         self._node = None
143         self._mode = None
144         # TG interface order mapping
145         self._ifaces_reordered = False
146         self._ifaces = []
147         # Result holding fields, to be removed.
148         self._result = None
149         self._loss = None
150         self._sent = None
151         self._latency = None
152         self._received = None
153         self._approximated_rate = None
154         self._approximated_duration = None
155         self._l7_data = None
156         # Measurement input fields, needed for async stop result.
157         self._start_time = None
158         self._stop_time = None
159         self._rate = None
160         self._target_duration = None
161         self._duration = None
162         # Other input parameters, not knowable from measure() signature.
163         self.frame_size = None
164         self.traffic_profile = None
165         self.traffic_directions = None
166         self.negative_loss = None
167         self.use_latency = None
168         self.ppta = None
169         self.resetter = None
170         self.transaction_scale = None
171         self.transaction_duration = None
172         self.sleep_till_duration = None
173         self.transaction_type = None
174         self.duration_limit = None
175         self.ramp_up_start = None
176         self.ramp_up_stop = None
177         self.ramp_up_rate = None
178         self.ramp_up_duration = None
179         self.state_timeout = None
180         # Transient data needed for async measurements.
181         self._xstats = []
182
183     @property
184     def node(self):
185         """Getter.
186
187         :returns: Traffic generator node.
188         :rtype: dict
189         """
190         return self._node
191
192     def get_loss(self):
193         """Return number of lost packets.
194
195         :returns: Number of lost packets.
196         :rtype: str
197         """
198         return self._loss
199
200     def get_sent(self):
201         """Return number of sent packets.
202
203         :returns: Number of sent packets.
204         :rtype: str
205         """
206         return self._sent
207
208     def get_received(self):
209         """Return number of received packets.
210
211         :returns: Number of received packets.
212         :rtype: str
213         """
214         return self._received
215
216     def get_latency_int(self):
217         """Return rounded min/avg/max latency.
218
219         :returns: Latency stats.
220         :rtype: list
221         """
222         return self._latency
223
224     def get_approximated_rate(self):
225         """Return approximated rate computed as ratio of transmitted packets
226         over duration of trial.
227
228         :returns: Approximated rate.
229         :rtype: str
230         """
231         return self._approximated_rate
232
233     def get_l7_data(self):
234         """Return L7 data.
235
236         :returns: Number of received packets.
237         :rtype: dict
238         """
239         return self._l7_data
240
241     def check_mode(self, expected_mode):
242         """Check TG mode.
243
244         :param expected_mode: Expected traffic generator mode.
245         :type expected_mode: object
246         :raises RuntimeError: In case of unexpected TG mode.
247         """
248         if self._mode == expected_mode:
249             return
250         raise RuntimeError(
251             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
252         )
253
254     @staticmethod
255     def get_tg_type(tg_node):
256         """Log and return the installed traffic generator type.
257
258         :param tg_node: Node from topology file.
259         :type tg_node: dict
260         :returns: Traffic generator type string.
261         :rtype: str
262         :raises RuntimeError: If command returns nonzero return code.
263         """
264         return str(check_subtype(tg_node))
265
266     @staticmethod
267     def get_tg_version(tg_node):
268         """Log and return the installed traffic generator version.
269
270         :param tg_node: Node from topology file.
271         :type tg_node: dict
272         :returns: Traffic generator version string.
273         :rtype: str
274         :raises RuntimeError: If command returns nonzero return code.
275         """
276         subtype = check_subtype(tg_node)
277         if subtype == NodeSubTypeTG.TREX:
278             command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
279             message = u"Get T-Rex version failed!"
280             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
281             return stdout.strip()
282         return "none"
283
284     def initialize_traffic_generator(self, osi_layer, pfs=2):
285         """TG initialization.
286
287         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
288         :param pfs: Number of physical interfaces to configure.
289         :type osi_layer: str
290         :type pfs: int
291         :raises ValueError: If OSI layer is unknown.
292         """
293         if osi_layer not in ("L2", "L3", "L7"):
294             raise ValueError("Unknown OSI layer!")
295
296         topology = BuiltIn().get_variable_value("&{topology_info}")
297         self._node = topology["TG"]
298         subtype = check_subtype(self._node)
299
300         if subtype == NodeSubTypeTG.TREX:
301             trex_topology = list()
302             self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
303
304             for link in range(1, pfs, 2):
305                 tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
306                 tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
307                 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
308                     ifl = BuiltIn().get_variable_value("${int}")
309                     last = topology["duts_count"]
310                     tg_if1_adj_addr = Topology().get_interface_mac(
311                         topology["DUT1"],
312                         BuiltIn().get_variable_value(
313                             f"${{DUT1_{ifl}{link}}}[0]"
314                         )
315                     )
316                     tg_if2_adj_addr = Topology().get_interface_mac(
317                         topology[f"DUT{last}"],
318                         BuiltIn().get_variable_value(
319                             f"${{DUT{last}_{ifl}{link+1}}}[0]"
320                         )
321                     )
322
323                 if1_pci = topology[f"TG_pf{link}_pci"][0]
324                 if2_pci = topology[f"TG_pf{link+1}_pci"][0]
325                 if min(if1_pci, if2_pci) != if1_pci:
326                     self._ifaces.append(str(link))
327                     self._ifaces.append(str(link-1))
328                     trex_topology.append(
329                         dict(
330                             interface=topology[f"TG_pf{link+1}"][0],
331                             dst_mac=tg_if2_adj_addr
332                         )
333                     )
334                     trex_topology.append(
335                         dict(
336                             interface=topology[f"TG_pf{link}"][0],
337                             dst_mac=tg_if1_adj_addr
338                         )
339                     )
340                 else:
341                     self._ifaces.append(str(link-1))
342                     self._ifaces.append(str(link))
343                     trex_topology.append(
344                         dict(
345                             interface=topology[f"TG_pf{link}"][0],
346                             dst_mac=tg_if1_adj_addr
347                         )
348                     )
349                     trex_topology.append(
350                         dict(
351                             interface=topology[f"TG_pf{link+1}"][0],
352                             dst_mac=tg_if2_adj_addr
353                         )
354                     )
355
356             TrexConfig.add_startup_configuration(
357                 self._node, trex_topology
358             )
359             TrafficGenerator.startup_trex(
360                 self._node, osi_layer, subtype=subtype
361             )
362
363     @staticmethod
364     def startup_trex(tg_node, osi_layer, subtype=None):
365         """Startup sequence for the TRex traffic generator.
366
367         :param tg_node: Traffic generator node.
368         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
369         :param subtype: Traffic generator sub-type.
370         :type tg_node: dict
371         :type osi_layer: str
372         :type subtype: NodeSubTypeTG
373         :raises RuntimeError: If T-Rex startup failed.
374         :raises ValueError: If OSI layer is not supported.
375         """
376         if not subtype:
377             subtype = check_subtype(tg_node)
378         if subtype == NodeSubTypeTG.TREX:
379             for _ in range(0, 3):
380                 # Kill TRex only if it is already running.
381                 cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
382                 exec_cmd_no_error(
383                     tg_node, cmd, sudo=True, message="Kill TRex failed!"
384                 )
385
386                 # Prepare interfaces for TRex.
387                 tg_port_drv = Constants.TREX_PORT_DRIVER
388                 mlx_driver = ""
389                 for port in tg_node["interfaces"].values():
390                     if "Mellanox" in port.get("model"):
391                         mlx_driver = port.get("driver")
392                         pci_addr = port.get("pci_address")
393                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
394                         if cur_driver == mlx_driver:
395                             pass
396                         elif not cur_driver:
397                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
398                         else:
399                             DS.pci_driver_unbind(tg_node, pci_addr)
400                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
401                     else:
402                         pci_addr = port.get("pci_address")
403                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
404                         if cur_driver:
405                             DS.pci_driver_unbind(tg_node, pci_addr)
406                         DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
407
408                 # Start TRex.
409                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
410                 trex_cmd = OptionString(["nohup", "./t-rex-64"])
411                 trex_cmd.add("-i")
412                 trex_cmd.add("--prefix $(hostname)")
413                 trex_cmd.add("--hdrh")
414                 trex_cmd.add("--no-scapy-server")
415                 trex_cmd.add_if("--astf", osi_layer == "L7")
416                 # OptionString does not create double space if extra is empty.
417                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
418                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
419                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
420                 try:
421                     exec_cmd_no_error(tg_node, cmd, sudo=True)
422                 except RuntimeError:
423                     cmd = "sh -c \"cat /tmp/trex.log\""
424                     exec_cmd_no_error(
425                         tg_node, cmd, sudo=True,
426                         message="Get TRex logs failed!"
427                     )
428                     raise RuntimeError("Start TRex failed!")
429
430                 # Test T-Rex API responsiveness.
431                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
432                 if osi_layer in ("L2", "L3"):
433                     cmd += "trex_stl_assert.py"
434                 elif osi_layer == "L7":
435                     cmd += "trex_astf_assert.py"
436                 else:
437                     raise ValueError("Unknown OSI layer!")
438                 try:
439                     exec_cmd_no_error(
440                         tg_node, cmd, sudo=True,
441                         message="T-Rex API is not responding!", retries=20
442                     )
443                 except RuntimeError:
444                     continue
445                 return
446             # After max retries TRex is still not responding to API critical
447             # error occurred.
448             exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
449             raise RuntimeError("Start T-Rex failed after multiple retries!")
450
451     @staticmethod
452     def is_trex_running(node):
453         """Check if T-Rex is running using pidof.
454
455         :param node: Traffic generator node.
456         :type node: dict
457         :returns: True if T-Rex is running otherwise False.
458         :rtype: bool
459         """
460         ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
461         return bool(int(ret) == 0)
462
463     @staticmethod
464     def teardown_traffic_generator(node):
465         """TG teardown.
466
467         :param node: Traffic generator node.
468         :type node: dict
469         :returns: nothing
470         :raises RuntimeError: If node type is not a TG,
471             or if T-Rex teardown fails.
472         """
473         subtype = check_subtype(node)
474         if subtype == NodeSubTypeTG.TREX:
475             exec_cmd_no_error(
476                 node,
477                 u"sh -c "
478                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
479                 sudo=False,
480                 message=u"T-Rex kill failed!"
481             )
482
483     def trex_astf_stop_remote_exec(self, node):
484         """Execute T-Rex ASTF script on remote node over ssh to stop running
485         traffic.
486
487         Internal state is updated with measurement results.
488
489         :param node: T-Rex generator node.
490         :type node: dict
491         :raises RuntimeError: If stop traffic script fails.
492         """
493         command_line = OptionString().add("python3")
494         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
495         command_line.add(f"'{dirname}/trex_astf_stop.py'")
496         command_line.add("--xstat")
497         for value in self._xstats:
498             if value is not None:
499                 value = value.replace("'", "\"")
500                 command_line.add(f"'{value}'")
501         stdout, _ = exec_cmd_no_error(
502             node, command_line,
503             message="T-Rex ASTF runtime error!"
504         )
505         self._parse_traffic_results(stdout)
506
507     def trex_stl_stop_remote_exec(self, node):
508         """Execute T-Rex STL script on remote node over ssh to stop running
509         traffic.
510
511         Internal state is updated with measurement results.
512
513         :param node: T-Rex generator node.
514         :type node: dict
515         :raises RuntimeError: If stop traffic script fails.
516         """
517         command_line = OptionString().add("python3")
518         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
519         command_line.add(f"'{dirname}/trex_stl_stop.py'")
520         command_line.add("--xstat")
521         for value in self._xstats:
522             if value is not None:
523                 value = value.replace("'", "\"")
524                 command_line.add(f"'{value}'")
525         stdout, _ = exec_cmd_no_error(
526             node, command_line,
527             message="T-Rex STL runtime error!"
528         )
529         self._parse_traffic_results(stdout)
530
531     def stop_traffic_on_tg(self):
532         """Stop all traffic on TG.
533
534         :returns: Structure containing the result of the measurement.
535         :rtype: MeasurementResult
536         :raises ValueError: If TG traffic profile is not supported.
537         """
538         subtype = check_subtype(self._node)
539         if subtype != NodeSubTypeTG.TREX:
540             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
541         if u"trex-astf" in self.traffic_profile:
542             self.trex_astf_stop_remote_exec(self._node)
543         elif u"trex-stl" in self.traffic_profile:
544             self.trex_stl_stop_remote_exec(self._node)
545         else:
546             raise ValueError(u"Unsupported T-Rex traffic profile!")
547         self._stop_time = time.monotonic()
548
549         return self._get_measurement_result()
550
551     def _compute_duration(self, duration, multiplier):
552         """Compute duration for profile driver.
553
554         The final result is influenced by transaction scale and duration limit.
555         It is assumed a higher level function has already set those on self.
556         The duration argument is the target value from search point of view,
557         before the overrides are applied here.
558
559         Minus one (signalling async traffic start) is kept.
560
561         Completeness flag is also included. Duration limited or async trials
562         are not considered complete for ramp-up purposes.
563
564         :param duration: Time expressed in seconds for how long to send traffic.
565         :param multiplier: Traffic rate in transactions per second.
566         :type duration: float
567         :type multiplier: float
568         :returns: New duration and whether it was a complete ramp-up candidate.
569         :rtype: float, bool
570         """
571         if duration < 0.0:
572             # Keep the async -1.
573             return duration, False
574         computed_duration = duration
575         if self.transaction_scale:
576             computed_duration = self.transaction_scale / multiplier
577             # Log the computed duration,
578             # so we can compare with what telemetry suggests
579             # the real duration was.
580             logger.debug(f"Expected duration {computed_duration}")
581         if not self.duration_limit:
582             return computed_duration, True
583         limited_duration = min(computed_duration, self.duration_limit)
584         return limited_duration, (limited_duration == computed_duration)
585
586     def trex_astf_start_remote_exec(
587             self, duration, multiplier, async_call=False):
588         """Execute T-Rex ASTF script on remote node over ssh to start running
589         traffic.
590
591         In sync mode, measurement results are stored internally.
592         In async mode, initial data including xstats are stored internally.
593
594         This method contains the logic to compute duration as maximum time
595         if transaction_scale is nonzero.
596         The transaction_scale argument defines (limits) how many transactions
597         will be started in total. As that amount of transaction can take
598         considerable time (sometimes due to explicit delays in the profile),
599         the real time a trial needs to finish is computed here. For now,
600         in that case the duration argument is ignored, assuming it comes
601         from ASTF-unaware search algorithm. The overall time a single
602         transaction needs is given in parameter transaction_duration,
603         it includes both explicit delays and implicit time it takes
604         to transfer data (or whatever the transaction does).
605
606         Currently it is observed TRex does not start the ASTF traffic
607         immediately, an ad-hoc constant is added to the computed duration
608         to compensate for that.
609
610         If transaction_scale is zero, duration is not recomputed.
611         It is assumed the subsequent result parsing gets the real duration
612         if the traffic stops sooner for any reason.
613
614         Currently, it is assumed traffic profile defines a single transaction.
615         To avoid heavy logic here, the input rate is expected to be in
616         transactions per second, as that directly translates to TRex multiplier,
617         (assuming the profile does not override the default cps value of one).
618
619         :param duration: Time expressed in seconds for how long to send traffic.
620         :param multiplier: Traffic rate in transactions per second.
621         :param async_call: If enabled then don't wait for all incoming traffic.
622         :type duration: float
623         :type multiplier: int
624         :type async_call: bool
625         :raises RuntimeError: In case of T-Rex driver issue.
626         """
627         self.check_mode(TrexMode.ASTF)
628         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
629         if not isinstance(duration, (float, int)):
630             duration = float(duration)
631
632         computed_duration, _ = self._compute_duration(duration, multiplier)
633
634         command_line = OptionString().add(u"python3")
635         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
636         command_line.add(f"'{dirname}/trex_astf_profile.py'")
637         command_line.change_prefix(u"--")
638         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
639         command_line.add_with_value(
640             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
641         )
642         command_line.add_with_value(u"duration", f"{computed_duration!r}")
643         command_line.add_with_value(u"frame_size", self.frame_size)
644         command_line.add_with_value(
645             u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
646         )
647         command_line.add_with_value(u"multiplier", multiplier)
648         command_line.add_with_value(u"port_0", p_0)
649         command_line.add_with_value(u"port_1", p_1)
650         command_line.add_with_value(
651             u"traffic_directions", self.traffic_directions
652         )
653         command_line.add_if(u"async_start", async_call)
654         command_line.add_if(u"latency", self.use_latency)
655         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
656         command_line.add_with_value(
657             u"delay", Constants.PERF_TRIAL_ASTF_DELAY
658         )
659
660         self._start_time = time.monotonic()
661         self._rate = multiplier
662         stdout, _ = exec_cmd_no_error(
663             self._node, command_line, timeout=computed_duration + 10.0,
664             message=u"T-Rex ASTF runtime error!"
665         )
666
667         if async_call:
668             # no result
669             self._target_duration = None
670             self._duration = None
671             self._received = None
672             self._sent = None
673             self._loss = None
674             self._latency = None
675             xstats = []
676             self._l7_data = dict()
677             self._l7_data[u"client"] = dict()
678             self._l7_data[u"client"][u"active_flows"] = None
679             self._l7_data[u"client"][u"established_flows"] = None
680             self._l7_data[u"client"][u"traffic_duration"] = None
681             self._l7_data[u"server"] = dict()
682             self._l7_data[u"server"][u"active_flows"] = None
683             self._l7_data[u"server"][u"established_flows"] = None
684             self._l7_data[u"server"][u"traffic_duration"] = None
685             if u"udp" in self.traffic_profile:
686                 self._l7_data[u"client"][u"udp"] = dict()
687                 self._l7_data[u"client"][u"udp"][u"connects"] = None
688                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
689                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
690                 self._l7_data[u"server"][u"udp"] = dict()
691                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
692                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
693             elif u"tcp" in self.traffic_profile:
694                 self._l7_data[u"client"][u"tcp"] = dict()
695                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
696                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
697                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
698                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
699                 self._l7_data[u"server"][u"tcp"] = dict()
700                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
701                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
702                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
703             else:
704                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
705             index = 0
706             for line in stdout.splitlines():
707                 if f"Xstats snapshot {index}: " in line:
708                     xstats.append(line[19:])
709                     index += 1
710             self._xstats = tuple(xstats)
711         else:
712             self._target_duration = duration
713             self._duration = computed_duration
714             self._parse_traffic_results(stdout)
715
716     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
717         """Execute T-Rex STL script on remote node over ssh to start running
718         traffic.
719
720         In sync mode, measurement results are stored internally.
721         In async mode, initial data including xstats are stored internally.
722
723         Mode-unaware code (e.g. in search algorithms) works with transactions.
724         To keep the logic simple, multiplier is set to that value.
725         As bidirectional traffic profiles send packets in both directions,
726         they are treated as transactions with two packets (one per direction).
727
728         :param duration: Time expressed in seconds for how long to send traffic.
729         :param rate: Traffic rate in transactions per second.
730         :param async_call: If enabled then don't wait for all incoming traffic.
731         :type duration: float
732         :type rate: str
733         :type async_call: bool
734         :raises RuntimeError: In case of T-Rex driver issue.
735         """
736         self.check_mode(TrexMode.STL)
737         if not isinstance(duration, (float, int)):
738             duration = float(duration)
739
740         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
741
742         command_line = OptionString().add("python3")
743         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
744         command_line.add(f"'{dirname}/trex_stl_profile.py'")
745         command_line.change_prefix("--")
746         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
747         command_line.add_with_value(
748             "profile", f"'{dirname}/{self.traffic_profile}.py'"
749         )
750         command_line.add_with_value("duration", f"{duration!r}")
751         command_line.add_with_value("frame_size", self.frame_size)
752         command_line.add_with_value("rate", f"{rate!r}")
753         command_line.add_with_value("ports", " ".join(self._ifaces))
754         command_line.add_with_value(
755             "traffic_directions", self.traffic_directions
756         )
757         command_line.add_if("async_start", async_call)
758         command_line.add_if("latency", self.use_latency)
759         command_line.add_if("force", Constants.TREX_SEND_FORCE)
760         command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
761
762         self._start_time = time.monotonic()
763         self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
764         stdout, _ = exec_cmd_no_error(
765             self._node, command_line, timeout=int(duration) + 60,
766             message="T-Rex STL runtime error"
767         )
768
769         if async_call:
770             # no result
771             self._target_duration = None
772             self._duration = None
773             self._received = None
774             self._sent = None
775             self._loss = None
776             self._latency = None
777
778             xstats = []
779             index = 0
780             for line in stdout.splitlines():
781                 if f"Xstats snapshot {index}: " in line:
782                     xstats.append(line[19:])
783                     index += 1
784             self._xstats = tuple(xstats)
785         else:
786             self._target_duration = duration
787             self._duration = duration
788             self._parse_traffic_results(stdout)
789
790     def send_traffic_on_tg(
791             self,
792             duration,
793             rate,
794             frame_size,
795             traffic_profile,
796             async_call=False,
797             ppta=1,
798             traffic_directions=2,
799             transaction_duration=0.0,
800             transaction_scale=0,
801             transaction_type=u"packet",
802             duration_limit=0.0,
803             use_latency=False,
804             ramp_up_rate=None,
805             ramp_up_duration=None,
806             state_timeout=240.0,
807             ramp_up_only=False,
808         ):
809         """Send traffic from all configured interfaces on TG.
810
811         In async mode, xstats is stored internally,
812         to enable getting correct result when stopping the traffic.
813         In both modes, stdout is returned,
814         but _parse_traffic_results only works in sync output.
815
816         Note that traffic generator uses DPDK driver which might
817         reorder port numbers based on wiring and PCI numbering.
818         This method handles that, so argument values are invariant,
819         but you can see swapped valued in debug logs.
820
821         When transaction_scale is specified, the duration value is ignored
822         and the needed time is computed. For cases where this results in
823         to too long measurement (e.g. teardown trial with small rate),
824         duration_limit is applied (of non-zero), so the trial is stopped sooner.
825
826         Bidirectional STL profiles are treated as transactions with two packets.
827
828         The return value is None for async.
829
830         :param duration: Duration of test traffic generation in seconds.
831         :param rate: Traffic rate in transactions per second.
832         :param frame_size: Frame size (L2) in Bytes.
833         :param traffic_profile: Module name as a traffic profile identifier.
834             See GPL/traffic_profiles/trex for implemented modules.
835         :param async_call: Async mode.
836         :param ppta: Packets per transaction, aggregated over directions.
837             Needed for udp_pps which does not have a good transaction counter,
838             so we need to compute expected number of packets.
839             Default: 1.
840         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
841             Default: 2
842         :param transaction_duration: Total expected time to close transaction.
843         :param transaction_scale: Number of transactions to perform.
844             0 (default) means unlimited.
845         :param transaction_type: An identifier specifying which counters
846             and formulas to use when computing attempted and failed
847             transactions. Default: "packet".
848         :param duration_limit: Zero or maximum limit for computed (or given)
849             duration.
850         :param use_latency: Whether to measure latency during the trial.
851             Default: False.
852         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
853         :param ramp_up_duration: Duration of ramp-up trials [s].
854         :param state_timeout: Time of life of DUT state [s].
855         :param ramp_up_only: If true, do not perform main trial measurement.
856         :type duration: float
857         :type rate: float
858         :type frame_size: str
859         :type traffic_profile: str
860         :type async_call: bool
861         :type ppta: int
862         :type traffic_directions: int
863         :type transaction_duration: float
864         :type transaction_scale: int
865         :type transaction_type: str
866         :type duration_limit: float
867         :type use_latency: bool
868         :type ramp_up_rate: float
869         :type ramp_up_duration: float
870         :type state_timeout: float
871         :type ramp_up_only: bool
872         :returns: TG results.
873         :rtype: MeasurementResult or None
874         :raises ValueError: If TG traffic profile is not supported.
875         """
876         self.set_rate_provider_defaults(
877             frame_size=frame_size,
878             traffic_profile=traffic_profile,
879             ppta=ppta,
880             traffic_directions=traffic_directions,
881             transaction_duration=transaction_duration,
882             transaction_scale=transaction_scale,
883             transaction_type=transaction_type,
884             duration_limit=duration_limit,
885             use_latency=use_latency,
886             ramp_up_rate=ramp_up_rate,
887             ramp_up_duration=ramp_up_duration,
888             state_timeout=state_timeout,
889         )
890         return self._send_traffic_on_tg_with_ramp_up(
891             duration=duration,
892             rate=rate,
893             async_call=async_call,
894             ramp_up_only=ramp_up_only,
895         )
896
897     def _send_traffic_on_tg_internal(
898             self, duration, rate, async_call=False):
899         """Send traffic from all configured interfaces on TG.
900
901         This is an internal function, it assumes set_rate_provider_defaults
902         has been called to remember most values.
903         The reason why need to remember various values is that
904         the traffic can be asynchronous, and parsing needs those values.
905         The reason why this is is a separate function from the one
906         which calls set_rate_provider_defaults is that some search algorithms
907         need to specify their own values, and we do not want the measure call
908         to overwrite them with defaults.
909
910         This function is used both for automated ramp-up trials
911         and for explicitly called trials.
912
913         :param duration: Duration of test traffic generation in seconds.
914         :param rate: Traffic rate in transactions per second.
915         :param async_call: Async mode.
916         :type duration: float
917         :type rate: float
918         :type async_call: bool
919         :returns: TG results.
920         :rtype: MeasurementResult or None
921         :raises ValueError: If TG traffic profile is not supported.
922         """
923         subtype = check_subtype(self._node)
924         if subtype == NodeSubTypeTG.TREX:
925             if u"trex-astf" in self.traffic_profile:
926                 self.trex_astf_start_remote_exec(
927                     duration, float(rate), async_call
928                 )
929             elif u"trex-stl" in self.traffic_profile:
930                 unit_rate_str = str(rate) + u"pps"
931                 self.trex_stl_start_remote_exec(
932                     duration, unit_rate_str, async_call
933                 )
934             else:
935                 raise ValueError(u"Unsupported T-Rex traffic profile!")
936
937         return None if async_call else self._get_measurement_result()
938
939     def _send_traffic_on_tg_with_ramp_up(
940             self, duration, rate, async_call=False, ramp_up_only=False):
941         """Send traffic from all interfaces on TG, maybe after ramp-up.
942
943         This is an internal function, it assumes set_rate_provider_defaults
944         has been called to remember most values.
945         The reason why need to remember various values is that
946         the traffic can be asynchronous, and parsing needs those values.
947         The reason why this is a separate function from the one
948         which calls set_rate_provider_defaults is that some search algorithms
949         need to specify their own values, and we do not want the measure call
950         to overwrite them with defaults.
951
952         If ramp-up tracking is detected, a computation is performed,
953         and if state timeout is near, trial at ramp-up rate and duration
954         is inserted before the main trial measurement.
955
956         The ramp_up_only parameter forces a ramp-up without immediate
957         trial measurement, which is useful in case self remembers
958         a previous ramp-up trial that belongs to a different test (phase).
959
960         Return None if trial is async or ramp-up only.
961
962         :param duration: Duration of test traffic generation in seconds.
963         :param rate: Traffic rate in transactions per second.
964         :param async_call: Async mode.
965         :param ramp_up_only: If true, do not perform main trial measurement.
966         :type duration: float
967         :type rate: float
968         :type async_call: bool
969         :type ramp_up_only: bool
970         :returns: TG results.
971         :rtype: MeasurementResult or None
972         :raises ValueError: If TG traffic profile is not supported.
973         """
974         complete = False
975         if self.ramp_up_rate:
976             # Figure out whether we need to insert a ramp-up trial.
977             if ramp_up_only or self.ramp_up_start is None:
978                 # We never ramped up yet (at least not in this test case).
979                 ramp_up_needed = True
980             else:
981                 # We ramped up before, but maybe it was too long ago.
982                 # Adding a constant overhead to be safe.
983                 time_now = time.monotonic() + 1.0
984                 computed_duration, complete = self._compute_duration(
985                     duration=duration,
986                     multiplier=rate,
987                 )
988                 # There are two conditions for inserting ramp-up.
989                 # If early sessions are expiring already,
990                 # or if late sessions are to expire before measurement is over.
991                 ramp_up_start_delay = time_now - self.ramp_up_start
992                 ramp_up_stop_delay = time_now - self.ramp_up_stop
993                 ramp_up_stop_delay += computed_duration
994                 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
995                 # Final boolean decision.
996                 ramp_up_needed = (bigger_delay >= self.state_timeout)
997             if ramp_up_needed:
998                 logger.debug(
999                     u"State may time out during next real trial, "
1000                     u"inserting a ramp-up trial."
1001                 )
1002                 self.ramp_up_start = time.monotonic()
1003                 self._send_traffic_on_tg_internal(
1004                     duration=self.ramp_up_duration,
1005                     rate=self.ramp_up_rate,
1006                     async_call=async_call,
1007                 )
1008                 self.ramp_up_stop = time.monotonic()
1009                 logger.debug(u"Ramp-up done.")
1010             else:
1011                 logger.debug(
1012                     u"State will probably not time out during next real trial, "
1013                     u"no ramp-up trial needed just yet."
1014                 )
1015         if ramp_up_only:
1016             return None
1017         trial_start = time.monotonic()
1018         result = self._send_traffic_on_tg_internal(
1019             duration=duration,
1020             rate=rate,
1021             async_call=async_call,
1022         )
1023         trial_end = time.monotonic()
1024         if self.ramp_up_rate:
1025             # Optimization: No loss acts as a good ramp-up, if it was complete.
1026             if complete and result is not None and result.loss_ratio == 0.0:
1027                 logger.debug(u"Good trial acts as a ramp-up")
1028                 self.ramp_up_start = trial_start
1029                 self.ramp_up_stop = trial_end
1030             else:
1031                 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1032         return result
1033
1034     def no_traffic_loss_occurred(self):
1035         """Fail if loss occurred in traffic run.
1036
1037         :returns: nothing
1038         :raises Exception: If loss occured.
1039         """
1040         if self._loss is None:
1041             raise RuntimeError(u"The traffic generation has not been issued")
1042         if self._loss != u"0":
1043             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1044
1045     def fail_if_no_traffic_forwarded(self):
1046         """Fail if no traffic forwarded.
1047
1048         :returns: nothing
1049         :raises Exception: If no traffic forwarded.
1050         """
1051         if self._received is None:
1052             raise RuntimeError(u"The traffic generation has not been issued")
1053         if self._received == 0:
1054             raise RuntimeError(u"No traffic forwarded")
1055
1056     def partial_traffic_loss_accepted(
1057             self, loss_acceptance, loss_acceptance_type):
1058         """Fail if loss is higher then accepted in traffic run.
1059
1060         :param loss_acceptance: Permitted drop ratio or frames count.
1061         :param loss_acceptance_type: Type of permitted loss.
1062         :type loss_acceptance: float
1063         :type loss_acceptance_type: LossAcceptanceType
1064         :returns: nothing
1065         :raises Exception: If loss is above acceptance criteria.
1066         """
1067         if self._loss is None:
1068             raise Exception(u"The traffic generation has not been issued")
1069
1070         if loss_acceptance_type == u"percentage":
1071             loss = (float(self._loss) / float(self._sent)) * 100
1072         elif loss_acceptance_type == u"frames":
1073             loss = float(self._loss)
1074         else:
1075             raise Exception(u"Loss acceptance type not supported")
1076
1077         if loss > float(loss_acceptance):
1078             raise Exception(
1079                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1080             )
1081
1082     def _parse_traffic_results(self, stdout):
1083         """Parse stdout of scripts into fields of self.
1084
1085         Block of code to reuse, by sync start, or stop after async.
1086
1087         :param stdout: Text containing the standard output.
1088         :type stdout: str
1089         """
1090         subtype = check_subtype(self._node)
1091         if subtype == NodeSubTypeTG.TREX:
1092             # Last line from console output
1093             line = stdout.splitlines()[-1]
1094             results = line.split(u";")
1095             if results[-1] in (u" ", u""):
1096                 results.pop(-1)
1097             self._result = dict()
1098             for result in results:
1099                 key, value = result.split(u"=", maxsplit=1)
1100                 self._result[key.strip()] = value
1101             logger.info(f"TrafficGen results:\n{self._result}")
1102             self._received = int(self._result.get(u"total_received"), 0)
1103             self._sent = int(self._result.get(u"total_sent", 0))
1104             self._loss = int(self._result.get(u"frame_loss", 0))
1105             self._approximated_duration = \
1106                 self._result.get(u"approximated_duration", 0.0)
1107             if u"manual" not in str(self._approximated_duration):
1108                 self._approximated_duration = float(self._approximated_duration)
1109             self._latency = list()
1110             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1111             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1112             if self._mode == TrexMode.ASTF:
1113                 self._l7_data = dict()
1114                 self._l7_data[u"client"] = dict()
1115                 self._l7_data[u"client"][u"sent"] = \
1116                     int(self._result.get(u"client_sent", 0))
1117                 self._l7_data[u"client"][u"received"] = \
1118                     int(self._result.get(u"client_received", 0))
1119                 self._l7_data[u"client"][u"active_flows"] = \
1120                     int(self._result.get(u"client_active_flows", 0))
1121                 self._l7_data[u"client"][u"established_flows"] = \
1122                     int(self._result.get(u"client_established_flows", 0))
1123                 self._l7_data[u"client"][u"traffic_duration"] = \
1124                     float(self._result.get(u"client_traffic_duration", 0.0))
1125                 self._l7_data[u"client"][u"err_rx_throttled"] = \
1126                     int(self._result.get(u"client_err_rx_throttled", 0))
1127                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1128                     int(self._result.get(u"client_err_nf_throttled", 0))
1129                 self._l7_data[u"client"][u"err_flow_overflow"] = \
1130                     int(self._result.get(u"client_err_flow_overflow", 0))
1131                 self._l7_data[u"server"] = dict()
1132                 self._l7_data[u"server"][u"active_flows"] = \
1133                     int(self._result.get(u"server_active_flows", 0))
1134                 self._l7_data[u"server"][u"established_flows"] = \
1135                     int(self._result.get(u"server_established_flows", 0))
1136                 self._l7_data[u"server"][u"traffic_duration"] = \
1137                     float(self._result.get(u"server_traffic_duration", 0.0))
1138                 self._l7_data[u"server"][u"err_rx_throttled"] = \
1139                     int(self._result.get(u"client_err_rx_throttled", 0))
1140                 if u"udp" in self.traffic_profile:
1141                     self._l7_data[u"client"][u"udp"] = dict()
1142                     self._l7_data[u"client"][u"udp"][u"connects"] = \
1143                         int(self._result.get(u"client_udp_connects", 0))
1144                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1145                         int(self._result.get(u"client_udp_closed", 0))
1146                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1147                         int(self._result.get(u"client_udp_tx_bytes", 0))
1148                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1149                         int(self._result.get(u"client_udp_rx_bytes", 0))
1150                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1151                         int(self._result.get(u"client_udp_tx_packets", 0))
1152                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1153                         int(self._result.get(u"client_udp_rx_packets", 0))
1154                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1155                         int(self._result.get(u"client_udp_keep_drops", 0))
1156                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1157                         int(self._result.get(u"client_err_cwf", 0))
1158                     self._l7_data[u"server"][u"udp"] = dict()
1159                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1160                         int(self._result.get(u"server_udp_accepts", 0))
1161                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1162                         int(self._result.get(u"server_udp_closed", 0))
1163                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1164                         int(self._result.get(u"server_udp_tx_bytes", 0))
1165                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1166                         int(self._result.get(u"server_udp_rx_bytes", 0))
1167                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1168                         int(self._result.get(u"server_udp_tx_packets", 0))
1169                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1170                         int(self._result.get(u"server_udp_rx_packets", 0))
1171                 elif u"tcp" in self.traffic_profile:
1172                     self._l7_data[u"client"][u"tcp"] = dict()
1173                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1174                         int(self._result.get(u"client_tcp_connect_inits", 0))
1175                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1176                         int(self._result.get(u"client_tcp_connects", 0))
1177                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1178                         int(self._result.get(u"client_tcp_closed", 0))
1179                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1180                         int(self._result.get(u"client_tcp_connattempt", 0))
1181                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1182                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1183                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1184                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1185                     self._l7_data[u"server"][u"tcp"] = dict()
1186                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1187                         int(self._result.get(u"server_tcp_accepts", 0))
1188                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1189                         int(self._result.get(u"server_tcp_connects", 0))
1190                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1191                         int(self._result.get(u"server_tcp_closed", 0))
1192                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1193                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1194                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1195                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1196
1197     def _get_measurement_result(self):
1198         """Return the result of last measurement as MeasurementResult.
1199
1200         Separate function, as measurements can end either by time
1201         or by explicit call, this is the common block at the end.
1202
1203         The intended_load field of MeasurementResult is in
1204         transactions per second. Transmit count and loss count units
1205         depend on the transaction type. Usually they are in transactions
1206         per second, or aggregated packets per second.
1207
1208         :returns: Structure containing the result of the measurement.
1209         :rtype: MeasurementResult
1210         """
1211         duration_with_overheads = time.monotonic() - self._start_time
1212         try:
1213             # Client duration seems to include a setup period
1214             # where TRex does not send any packets yet.
1215             # Server duration does not include it.
1216             server_data = self._l7_data[u"server"]
1217             approximated_duration = float(server_data[u"traffic_duration"])
1218         except (KeyError, AttributeError, ValueError, TypeError):
1219             approximated_duration = None
1220         try:
1221             if not approximated_duration:
1222                 approximated_duration = float(self._approximated_duration)
1223         except ValueError:  # "manual"
1224             approximated_duration = None
1225         if not approximated_duration:
1226             if self._duration and self._duration > 0:
1227                 # Known recomputed or target duration.
1228                 approximated_duration = self._duration
1229             else:
1230                 # It was an explicit stop.
1231                 if not self._stop_time:
1232                     raise RuntimeError(u"Unable to determine duration.")
1233                 approximated_duration = self._stop_time - self._start_time
1234         target_duration = self._target_duration
1235         if not target_duration:
1236             target_duration = approximated_duration
1237         transmit_rate = self._rate
1238         unsent = 0
1239         if self.transaction_type == u"packet":
1240             partial_attempt_count = self._sent
1241             packet_rate = transmit_rate * self.ppta
1242             # We have a float. TRex way of rounding it is not obvious.
1243             # The biggest source of mismatch is Inter Stream Gap.
1244             # So the code tolerates 10 usec of missing packets.
1245             expected_attempt_count = (target_duration - 1e-5) * packet_rate
1246             expected_attempt_count = math.ceil(expected_attempt_count)
1247             # TRex can send more.
1248             expected_attempt_count = max(expected_attempt_count, self._sent)
1249             unsent = expected_attempt_count - self._sent
1250             pass_count = self._received
1251             loss_count = self._loss
1252         elif self.transaction_type == u"udp_cps":
1253             if not self.transaction_scale:
1254                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1255             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1256             # We do not care whether TG is slow, it should have attempted all.
1257             expected_attempt_count = self.transaction_scale
1258             unsent = expected_attempt_count - partial_attempt_count
1259             pass_count = self._l7_data[u"client"][u"received"]
1260             loss_count = partial_attempt_count - pass_count
1261         elif self.transaction_type == u"tcp_cps":
1262             if not self.transaction_scale:
1263                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1264             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1265             partial_attempt_count = ctca
1266             # We do not care whether TG is slow, it should have attempted all.
1267             expected_attempt_count = self.transaction_scale
1268             unsent = expected_attempt_count - partial_attempt_count
1269             # From TCP point of view, server/connects counts full connections,
1270             # but we are testing NAT session so client/connects counts that
1271             # (half connections from TCP point of view).
1272             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1273             loss_count = partial_attempt_count - pass_count
1274         elif self.transaction_type == u"udp_pps":
1275             if not self.transaction_scale:
1276                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1277             partial_attempt_count = self._sent
1278             expected_attempt_count = self.transaction_scale * self.ppta
1279             unsent = expected_attempt_count - self._sent
1280             loss_count = self._loss
1281         elif self.transaction_type == u"tcp_pps":
1282             if not self.transaction_scale:
1283                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1284             partial_attempt_count = self._sent
1285             expected_attempt_count = self.transaction_scale * self.ppta
1286             # One loss-like scenario happens when TRex receives all packets
1287             # on L2 level, but is not fast enough to process them all
1288             # at L7 level, which leads to retransmissions.
1289             # Those manifest as opackets larger than expected.
1290             # A simple workaround is to add absolute difference.
1291             # Probability of retransmissions exactly cancelling
1292             # packets unsent due to duration stretching is quite low.
1293             unsent = abs(expected_attempt_count - self._sent)
1294             loss_count = self._loss
1295         else:
1296             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1297         if unsent and isinstance(self._approximated_duration, float):
1298             # Do not report unsent for "manual".
1299             logger.debug(f"Unsent packets/transactions: {unsent}")
1300         if loss_count < 0 and not self.negative_loss:
1301             loss_count = 0
1302         measurement = MeasurementResult(
1303             intended_duration=target_duration,
1304             intended_load=transmit_rate,
1305             offered_count=partial_attempt_count,
1306             loss_count=loss_count,
1307             offered_duration=approximated_duration,
1308             duration_with_overheads=duration_with_overheads,
1309             intended_count=expected_attempt_count,
1310         )
1311         measurement.latency = self.get_latency_int()
1312         return measurement
1313
1314     def measure(self, intended_duration, intended_load):
1315         """Run trial measurement, parse and return results.
1316
1317         The intended load is for transactions. Stateles bidirectional traffic
1318         is understood as sequence of (asynchronous) transactions,
1319         two packets each.
1320
1321         The result units depend on test type, generally
1322         the count either transactions or packets (aggregated over directions).
1323
1324         Optionally, this method sleeps if measurement finished before
1325         the time specified as intended_duration (PLRsearch needs time for math).
1326
1327         :param intended_duration: Trial duration [s].
1328         :param intended_load: Target rate in transactions per second.
1329         :type intended_duration: float
1330         :type intended_load: float
1331         :returns: Structure containing the result of the measurement.
1332         :rtype: MeasurementResult
1333         :raises RuntimeError: If TG is not set or if node is not TG
1334             or if subtype is not specified.
1335         :raises NotImplementedError: If TG is not supported.
1336         """
1337         intended_duration = float(intended_duration)
1338         time_start = time.monotonic()
1339         time_stop = time_start + intended_duration
1340         if self.resetter:
1341             self.resetter()
1342         result = self._send_traffic_on_tg_with_ramp_up(
1343             duration=intended_duration,
1344             rate=intended_load,
1345             async_call=False,
1346         )
1347         logger.debug(f"trial measurement result: {result!r}")
1348         # In PLRsearch, computation needs the specified time to complete.
1349         if self.sleep_till_duration:
1350             while (sleeptime := time_stop - time.monotonic()) > 0.0:
1351                 time.sleep(sleeptime)
1352         return result
1353
1354     def set_rate_provider_defaults(
1355             self,
1356             frame_size,
1357             traffic_profile,
1358             ppta=1,
1359             resetter=None,
1360             traffic_directions=2,
1361             transaction_duration=0.0,
1362             transaction_scale=0,
1363             transaction_type=u"packet",
1364             duration_limit=0.0,
1365             negative_loss=True,
1366             sleep_till_duration=False,
1367             use_latency=False,
1368             ramp_up_rate=None,
1369             ramp_up_duration=None,
1370             state_timeout=240.0,
1371         ):
1372         """Store values accessed by measure().
1373
1374         :param frame_size: Frame size identifier or value [B].
1375         :param traffic_profile: Module name as a traffic profile identifier.
1376             See GPL/traffic_profiles/trex for implemented modules.
1377         :param ppta: Packets per transaction, aggregated over directions.
1378             Needed for udp_pps which does not have a good transaction counter,
1379             so we need to compute expected number of packets.
1380             Default: 1.
1381         :param resetter: Callable to reset DUT state for repeated trials.
1382         :param traffic_directions: Traffic from packet counting point of view
1383             is bi- (2) or uni- (1) directional.
1384             Default: 2
1385         :param transaction_duration: Total expected time to close transaction.
1386         :param transaction_scale: Number of transactions to perform.
1387             0 (default) means unlimited.
1388         :param transaction_type: An identifier specifying which counters
1389             and formulas to use when computing attempted and failed
1390             transactions. Default: "packet".
1391         :param duration_limit: Zero or maximum limit for computed (or given)
1392             duration.
1393         :param negative_loss: If false, negative loss is reported as zero loss.
1394         :param sleep_till_duration: If true and measurement returned faster,
1395             sleep until it matches duration. Needed for PLRsearch.
1396         :param use_latency: Whether to measure latency during the trial.
1397             Default: False.
1398         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1399         :param ramp_up_duration: Duration of ramp-up trials [s].
1400         :param state_timeout: Time of life of DUT state [s].
1401         :type frame_size: str or int
1402         :type traffic_profile: str
1403         :type ppta: int
1404         :type resetter: Optional[Callable[[], None]]
1405         :type traffic_directions: int
1406         :type transaction_duration: float
1407         :type transaction_scale: int
1408         :type transaction_type: str
1409         :type duration_limit: float
1410         :type negative_loss: bool
1411         :type sleep_till_duration: bool
1412         :type use_latency: bool
1413         :type ramp_up_rate: float
1414         :type ramp_up_duration: float
1415         :type state_timeout: float
1416         """
1417         self.frame_size = frame_size
1418         self.traffic_profile = str(traffic_profile)
1419         self.resetter = resetter
1420         self.ppta = int(ppta)
1421         self.traffic_directions = int(traffic_directions)
1422         self.transaction_duration = float(transaction_duration)
1423         self.transaction_scale = int(transaction_scale)
1424         self.transaction_type = str(transaction_type)
1425         self.duration_limit = float(duration_limit)
1426         self.negative_loss = bool(negative_loss)
1427         self.sleep_till_duration = bool(sleep_till_duration)
1428         self.use_latency = bool(use_latency)
1429         self.ramp_up_rate = float(ramp_up_rate)
1430         self.ramp_up_duration = float(ramp_up_duration)
1431         self.state_timeout = float(state_timeout)
1432
1433
1434 class OptimizedSearch:
1435     """Class to be imported as Robot Library, containing search keywords.
1436
1437     Aside of setting up measurer and forwarding arguments,
1438     the main business is to translate min/max rate from unidir to aggregated.
1439     """
1440
1441     @staticmethod
1442     def perform_mlr_search(
1443         frame_size: Union[int, str],
1444         traffic_profile: str,
1445         min_load: float,
1446         max_load: float,
1447         loss_ratio: float = 0.005,
1448         relative_width: float = 0.005,
1449         initial_trial_duration: float = 1.0,
1450         final_trial_duration: float = 1.0,
1451         duration_sum: float = 21.0,
1452         expansion_coefficient: int = 2,
1453         preceding_targets: int = 2,
1454         search_duration_max: float = 1200.0,
1455         ppta: int = 1,
1456         resetter: Optional[Callable[[], None]] = None,
1457         traffic_directions: int = 2,
1458         transaction_duration: float = 0.0,
1459         transaction_scale: int = 0,
1460         transaction_type: str = "packet",
1461         use_latency: bool = False,
1462         ramp_up_rate: float = 0.0,
1463         ramp_up_duration: float = 0.0,
1464         state_timeout: float = 240.0,
1465     ) -> List[GoalResult]:
1466         """Setup initialized TG, perform optimized search, return intervals.
1467
1468         If transaction_scale is nonzero, all init and non-init trial durations
1469         are set to 1.0 (as they do not affect the real trial duration)
1470         and zero intermediate phases are used.
1471         This way no re-measurement happens.
1472         Warmup has to be handled via resetter or ramp-up mechanisms.
1473
1474         :param frame_size: Frame size identifier or value [B].
1475         :param traffic_profile: Module name as a traffic profile identifier.
1476             See GPL/traffic_profiles/trex for implemented modules.
1477         :param min_load: Minimal load in transactions per second.
1478         :param max_load: Maximal load in transactions per second.
1479         :param loss_ratio: Ratio of packets lost, for PDR [1].
1480         :param relative_width: Final lower bound intended load
1481             cannot be more distant that this multiple of upper bound [1].
1482         :param initial_trial_duration: Trial duration for the initial phase
1483             and also for the first intermediate phase [s].
1484         :param final_trial_duration: Trial duration for the final phase [s].
1485         :param duration_sum: Max sum of duration for deciding [s].
1486         :param expansion_coefficient: In external search multiply width by this.
1487         :param preceding_targets: Number of intermediate phases
1488             to perform before the final phase [1].
1489         :param search_duration_max: The search will fail itself
1490             when not finished before this overall time [s].
1491         :param ppta: Packets per transaction, aggregated over directions.
1492             Needed for udp_pps which does not have a good transaction counter,
1493             so we need to compute expected number of packets.
1494             Default: 1.
1495         :param resetter: Callable to reset DUT state for repeated trials.
1496         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1497             Default: 2
1498         :param transaction_duration: Total expected time to close transaction.
1499         :param transaction_scale: Number of transactions to perform.
1500             0 (default) means unlimited.
1501         :param transaction_type: An identifier specifying which counters
1502             and formulas to use when computing attempted and failed
1503             transactions. Default: "packet".
1504         :param use_latency: Whether to measure latency during the trial.
1505             Default: False.
1506         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1507         :param ramp_up_duration: Duration of ramp-up trials [s].
1508         :param state_timeout: Time of life of DUT state [s].
1509         :type frame_size: str or int
1510         :type traffic_profile: str
1511         :type min_load: float
1512         :type max_load: float
1513         :type loss_ratio: float
1514         :type relative_width: float
1515         :type initial_trial_duration: float
1516         :type final_trial_duration: float
1517         :type duration_sum: float
1518         :type expansion_coefficient: int
1519         :type preceding_targets: int
1520         :type search_duration_max: float
1521         :type ppta: int
1522         :type resetter: Optional[Callable[[], None]]
1523         :type traffic_directions: int
1524         :type transaction_duration: float
1525         :type transaction_scale: int
1526         :type transaction_type: str
1527         :type use_latency: bool
1528         :type ramp_up_rate: float
1529         :type ramp_up_duration: float
1530         :type state_timeout: float
1531         :returns: Goal result (based on unidirectional tps) for each goal.
1532             The result contains both the offered load for stat trial,
1533             and the conditional throughput for display.
1534         :rtype: List[GoalResult]
1535         :raises RuntimeError: If search duration exceeds search_duration_max
1536             or if min load becomes an upper bound for any search goal.
1537         """
1538         # we need instance of TrafficGenerator instantiated by Robot Framework
1539         # to be able to use trex_stl-*()
1540         tg_instance = BuiltIn().get_library_instance(
1541             u"resources.libraries.python.TrafficGenerator"
1542         )
1543         # Overrides for fixed transaction amount.
1544         if transaction_scale:
1545             initial_trial_duration = 1.0
1546             final_trial_duration = 1.0
1547             preceding_targets = 1
1548             # TODO: Move the value to Constants.py?
1549             search_duration_max += transaction_scale * 3e-4
1550         tg_instance.set_rate_provider_defaults(
1551             frame_size=frame_size,
1552             traffic_profile=traffic_profile,
1553             sleep_till_duration=False,
1554             ppta=ppta,
1555             resetter=resetter,
1556             traffic_directions=traffic_directions,
1557             transaction_duration=transaction_duration,
1558             transaction_scale=transaction_scale,
1559             transaction_type=transaction_type,
1560             use_latency=use_latency,
1561             ramp_up_rate=ramp_up_rate,
1562             ramp_up_duration=ramp_up_duration,
1563             state_timeout=state_timeout,
1564         )
1565         if loss_ratio:
1566             loss_ratios = [0.0, loss_ratio]
1567             exceed_ratio = 0.5
1568         else:
1569             # Happens in reconf tests.
1570             loss_ratios = [0.0]
1571             exceed_ratio = 0.0
1572         goals = [
1573             SearchGoal(
1574                 loss_ratio=loss_ratio,
1575                 exceed_ratio=exceed_ratio,
1576                 relative_width=relative_width,
1577                 initial_trial_duration=initial_trial_duration,
1578                 final_trial_duration=final_trial_duration,
1579                 duration_sum=duration_sum,
1580                 preceding_targets=preceding_targets,
1581                 expansion_coefficient=expansion_coefficient,
1582                 fail_fast=True,
1583             )
1584             for loss_ratio in loss_ratios
1585         ]
1586         config = Config()
1587         config.goals = goals
1588         config.min_load = min_load
1589         config.max_load = max_load
1590         config.search_duration_max = search_duration_max
1591         config.warmup_duration = 1.0
1592         algorithm = MultipleLossRatioSearch(config)
1593         results = algorithm.search(measurer=tg_instance, debug=logger.debug)
1594         return [results[goal] for goal in goals]
1595
1596     @staticmethod
1597     def perform_soak_search(
1598             frame_size,
1599             traffic_profile,
1600             min_load,
1601             max_load,
1602             plr_target=1e-7,
1603             tdpt=0.1,
1604             initial_count=50,
1605             timeout=7200.0,
1606             ppta=1,
1607             resetter=None,
1608             trace_enabled=False,
1609             traffic_directions=2,
1610             transaction_duration=0.0,
1611             transaction_scale=0,
1612             transaction_type=u"packet",
1613             use_latency=False,
1614             ramp_up_rate=None,
1615             ramp_up_duration=None,
1616             state_timeout=240.0,
1617     ):
1618         """Setup initialized TG, perform soak search, return avg and stdev.
1619
1620         :param frame_size: Frame size identifier or value [B].
1621         :param traffic_profile: Module name as a traffic profile identifier.
1622             See GPL/traffic_profiles/trex for implemented modules.
1623         :param min_load: Minimal load in transactions per second.
1624         :param max_load: Maximal load in transactions per second.
1625         :param plr_target: Ratio of packets lost to achieve [1].
1626         :param tdpt: Trial duration per trial.
1627             The algorithm linearly increases trial duration with trial number,
1628             this is the increment between succesive trials, in seconds.
1629         :param initial_count: Offset to apply before the first trial.
1630             For example initial_count=50 makes first trial to be 51*tdpt long.
1631             This is needed because initial "search" phase of integrator
1632             takes significant time even without any trial results.
1633         :param timeout: The search will stop after this overall time [s].
1634         :param ppta: Packets per transaction, aggregated over directions.
1635             Needed for udp_pps which does not have a good transaction counter,
1636             so we need to compute expected number of packets.
1637             Default: 1.
1638         :param resetter: Callable to reset DUT state for repeated trials.
1639         :param trace_enabled: True if trace enabled else False.
1640             This is very verbose tracing on numeric computations,
1641             do not use in production.
1642             Default: False
1643         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1644             Default: 2
1645         :param transaction_duration: Total expected time to close transaction.
1646         :param transaction_scale: Number of transactions to perform.
1647             0 (default) means unlimited.
1648         :param transaction_type: An identifier specifying which counters
1649             and formulas to use when computing attempted and failed
1650             transactions. Default: "packet".
1651         :param use_latency: Whether to measure latency during the trial.
1652             Default: False.
1653         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1654         :param ramp_up_duration: Duration of ramp-up trials [s].
1655         :param state_timeout: Time of life of DUT state [s].
1656         :type frame_size: str or int
1657         :type traffic_profile: str
1658         :type min_load: float
1659         :type max_load: float
1660         :type plr_target: float
1661         :type initial_count: int
1662         :type timeout: float
1663         :type ppta: int
1664         :type resetter: Optional[Callable[[], None]]
1665         :type trace_enabled: bool
1666         :type traffic_directions: int
1667         :type transaction_duration: float
1668         :type transaction_scale: int
1669         :type transaction_type: str
1670         :type use_latency: bool
1671         :type ramp_up_rate: float
1672         :type ramp_up_duration: float
1673         :type state_timeout: float
1674         :returns: Average and stdev of estimated aggregated rate giving PLR.
1675         :rtype: 2-tuple of float
1676         """
1677         tg_instance = BuiltIn().get_library_instance(
1678             u"resources.libraries.python.TrafficGenerator"
1679         )
1680         # Overrides for fixed transaction amount.
1681         if transaction_scale:
1682             timeout = 7200.0
1683         tg_instance.set_rate_provider_defaults(
1684             frame_size=frame_size,
1685             traffic_profile=traffic_profile,
1686             negative_loss=False,
1687             sleep_till_duration=True,
1688             ppta=ppta,
1689             resetter=resetter,
1690             traffic_directions=traffic_directions,
1691             transaction_duration=transaction_duration,
1692             transaction_scale=transaction_scale,
1693             transaction_type=transaction_type,
1694             use_latency=use_latency,
1695             ramp_up_rate=ramp_up_rate,
1696             ramp_up_duration=ramp_up_duration,
1697             state_timeout=state_timeout,
1698         )
1699         algorithm = PLRsearch(
1700             measurer=tg_instance,
1701             trial_duration_per_trial=tdpt,
1702             packet_loss_ratio_target=plr_target,
1703             trial_number_offset=initial_count,
1704             timeout=timeout,
1705             trace_enabled=trace_enabled,
1706         )
1707         result = algorithm.search(
1708             min_rate=min_load,
1709             max_rate=max_load,
1710         )
1711         return result