feat(MLRsearch): MLRsearch v7
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import math
17 import time
18
19 from typing import Callable, List, Optional, Union
20
21 from robot.api import logger
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from .Constants import Constants
25 from .DropRateSearch import DropRateSearch
26 from .MLRsearch import (
27     AbstractMeasurer, Config, MeasurementResult,
28     MultipleLossRatioSearch, SearchGoal, TrimmedStat,
29 )
30 from .PLRsearch.PLRsearch import PLRsearch
31 from .OptionString import OptionString
32 from .ssh import exec_cmd_no_error, exec_cmd
33 from .topology import NodeType
34 from .topology import NodeSubTypeTG
35 from .topology import Topology
36 from .TRexConfigGenerator import TrexConfig
37 from .DUTSetup import DUTSetup as DS
38
39 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
40
41
42 def check_subtype(node):
43     """Return supported subtype of given node, or raise an exception.
44
45     Currently only one subtype is supported,
46     but we want our code to be ready for other ones.
47
48     :param node: Topology node to check. Can be None.
49     :type node: dict or NoneType
50     :returns: Subtype detected.
51     :rtype: NodeSubTypeTG
52     :raises RuntimeError: If node is not supported, message explains how.
53     """
54     if node.get(u"type") is None:
55         msg = u"Node type is not defined"
56     elif node[u"type"] != NodeType.TG:
57         msg = f"Node type is {node[u'type']!r}, not a TG"
58     elif node.get(u"subtype") is None:
59         msg = u"TG subtype is not defined"
60     elif node[u"subtype"] != NodeSubTypeTG.TREX:
61         msg = f"TG subtype {node[u'subtype']!r} is not supported"
62     else:
63         return NodeSubTypeTG.TREX
64     raise RuntimeError(msg)
65
66
67 class TGDropRateSearchImpl(DropRateSearch):
68     """Drop Rate Search implementation."""
69
70     # def __init__(self):
71     #     super(TGDropRateSearchImpl, self).__init__()
72
73     def measure_loss(
74             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
75             traffic_profile):
76         """Runs the traffic and evaluate the measured results.
77
78         :param rate: Offered traffic load.
79         :param frame_size: Size of frame.
80         :param loss_acceptance: Permitted drop ratio or frames count.
81         :param loss_acceptance_type: Type of permitted loss.
82         :param traffic_profile: Module name as a traffic profile identifier.
83             See GPL/traffic_profiles/trex for implemented modules.
84         :type rate: float
85         :type frame_size: str
86         :type loss_acceptance: float
87         :type loss_acceptance_type: LossAcceptanceType
88         :type traffic_profile: str
89         :returns: Drop threshold exceeded? (True/False)
90         :rtype: bool
91         :raises NotImplementedError: If TG is not supported.
92         :raises RuntimeError: If TG is not specified.
93         """
94         # we need instance of TrafficGenerator instantiated by Robot Framework
95         # to be able to use trex_stl-*()
96         tg_instance = BuiltIn().get_library_instance(
97             u"resources.libraries.python.TrafficGenerator"
98         )
99         subtype = check_subtype(tg_instance.node)
100         if subtype == NodeSubTypeTG.TREX:
101             unit_rate = str(rate) + self.get_rate_type_str()
102             tg_instance.trex_stl_start_remote_exec(
103                 self.get_duration(), unit_rate, frame_size, traffic_profile
104             )
105             loss = tg_instance.get_loss()
106             sent = tg_instance.get_sent()
107             if self.loss_acceptance_type_is_percentage():
108                 loss = (float(loss) / float(sent)) * 100
109             logger.trace(
110                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
111             )
112             return float(loss) <= float(loss_acceptance)
113         return False
114
115     def get_latency(self):
116         """Returns min/avg/max latency.
117
118         :returns: Latency stats.
119         :rtype: list
120         """
121         tg_instance = BuiltIn().get_library_instance(
122             u"resources.libraries.python.TrafficGenerator"
123         )
124         return tg_instance.get_latency_int()
125
126
127 class TrexMode:
128     """Defines mode of T-Rex traffic generator."""
129     # Advanced stateful mode
130     ASTF = u"ASTF"
131     # Stateless mode
132     STL = u"STL"
133
134
135 class TrafficGenerator(AbstractMeasurer):
136     """Traffic Generator."""
137
138     # Use one instance of TrafficGenerator for all tests in test suite
139     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
140
141     def __init__(self):
142         self._node = None
143         self._mode = None
144         # TG interface order mapping
145         self._ifaces_reordered = False
146         # Result holding fields, to be removed.
147         self._result = None
148         self._loss = None
149         self._sent = None
150         self._latency = None
151         self._received = None
152         self._approximated_rate = None
153         self._approximated_duration = None
154         self._l7_data = None
155         # Measurement input fields, needed for async stop result.
156         self._start_time = None
157         self._stop_time = None
158         self._rate = None
159         self._target_duration = None
160         self._duration = None
161         # Other input parameters, not knowable from measure() signature.
162         self.frame_size = None
163         self.traffic_profile = None
164         self.traffic_directions = None
165         self.negative_loss = None
166         self.use_latency = None
167         self.ppta = None
168         self.resetter = None
169         self.transaction_scale = None
170         self.transaction_duration = None
171         self.sleep_till_duration = None
172         self.transaction_type = None
173         self.duration_limit = None
174         self.ramp_up_start = None
175         self.ramp_up_stop = None
176         self.ramp_up_rate = None
177         self.ramp_up_duration = None
178         self.state_timeout = None
179         # Transient data needed for async measurements.
180         self._xstats = ()
181
182     @property
183     def node(self):
184         """Getter.
185
186         :returns: Traffic generator node.
187         :rtype: dict
188         """
189         return self._node
190
191     def get_loss(self):
192         """Return number of lost packets.
193
194         :returns: Number of lost packets.
195         :rtype: str
196         """
197         return self._loss
198
199     def get_sent(self):
200         """Return number of sent packets.
201
202         :returns: Number of sent packets.
203         :rtype: str
204         """
205         return self._sent
206
207     def get_received(self):
208         """Return number of received packets.
209
210         :returns: Number of received packets.
211         :rtype: str
212         """
213         return self._received
214
215     def get_latency_int(self):
216         """Return rounded min/avg/max latency.
217
218         :returns: Latency stats.
219         :rtype: list
220         """
221         return self._latency
222
223     def get_approximated_rate(self):
224         """Return approximated rate computed as ratio of transmitted packets
225         over duration of trial.
226
227         :returns: Approximated rate.
228         :rtype: str
229         """
230         return self._approximated_rate
231
232     def get_l7_data(self):
233         """Return L7 data.
234
235         :returns: Number of received packets.
236         :rtype: dict
237         """
238         return self._l7_data
239
240     def check_mode(self, expected_mode):
241         """Check TG mode.
242
243         :param expected_mode: Expected traffic generator mode.
244         :type expected_mode: object
245         :raises RuntimeError: In case of unexpected TG mode.
246         """
247         if self._mode == expected_mode:
248             return
249         raise RuntimeError(
250             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
251         )
252
253     @staticmethod
254     def get_tg_type(tg_node):
255         """Log and return the installed traffic generator type.
256
257         :param tg_node: Node from topology file.
258         :type tg_node: dict
259         :returns: Traffic generator type string.
260         :rtype: str
261         :raises RuntimeError: If command returns nonzero return code.
262         """
263         return str(check_subtype(tg_node))
264
265     @staticmethod
266     def get_tg_version(tg_node):
267         """Log and return the installed traffic generator version.
268
269         :param tg_node: Node from topology file.
270         :type tg_node: dict
271         :returns: Traffic generator version string.
272         :rtype: str
273         :raises RuntimeError: If command returns nonzero return code.
274         """
275         subtype = check_subtype(tg_node)
276         if subtype == NodeSubTypeTG.TREX:
277             command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
278             message = u"Get T-Rex version failed!"
279             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
280             return stdout.strip()
281         return "none"
282
283     def initialize_traffic_generator(self, osi_layer, parallel_links=1):
284         """TG initialization.
285
286         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
287         :param parallel_links: Number of parallel links to configure.
288         :type osi_layer: str
289         :type parallel_links: int
290         :raises ValueError: If OSI layer is unknown.
291         """
292         if osi_layer not in ("L2", "L3", "L7"):
293             raise ValueError("Unknown OSI layer!")
294
295         topology = BuiltIn().get_variable_value("&{topology_info}")
296         self._node = topology["TG"]
297         subtype = check_subtype(self._node)
298
299         if subtype == NodeSubTypeTG.TREX:
300             trex_topology = list()
301             self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
302
303             for link in range(1, parallel_links*2, 2):
304                 tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
305                 tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
306                 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
307                     ifl = BuiltIn().get_variable_value("${int}")
308                     last = topology["duts_count"]
309                     tg_if1_adj_addr = Topology().get_interface_mac(
310                         topology["DUT1"],
311                         BuiltIn().get_variable_value(
312                             f"${{DUT1_{ifl}{link}}}[0]"
313                         )
314                     )
315                     tg_if2_adj_addr = Topology().get_interface_mac(
316                         topology[f"DUT{last}"],
317                         BuiltIn().get_variable_value(
318                             f"${{DUT{last}_{ifl}{link+1}}}[0]"
319                         )
320                     )
321
322                 trex_topology.append(
323                     dict(
324                         interface=topology[f"TG_pf{link}"][0],
325                         dst_mac=tg_if1_adj_addr
326                     )
327                 )
328                 trex_topology.append(
329                     dict(
330                         interface=topology[f"TG_pf{link+1}"][0],
331                         dst_mac=tg_if2_adj_addr
332                     )
333                 )
334                 if1_pci = topology[f"TG_pf{link}_pci"][0]
335                 if2_pci = topology[f"TG_pf{link+1}_pci"][0]
336                 if min(if1_pci, if2_pci) != if1_pci:
337                     self._ifaces_reordered = True
338                     trex_topology.reverse()
339
340             TrexConfig.add_startup_configuration(
341                 self._node, trex_topology
342             )
343             TrafficGenerator.startup_trex(
344                 self._node, osi_layer, subtype=subtype
345             )
346
347     @staticmethod
348     def startup_trex(tg_node, osi_layer, subtype=None):
349         """Startup sequence for the TRex traffic generator.
350
351         :param tg_node: Traffic generator node.
352         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
353         :param subtype: Traffic generator sub-type.
354         :type tg_node: dict
355         :type osi_layer: str
356         :type subtype: NodeSubTypeTG
357         :raises RuntimeError: If T-Rex startup failed.
358         :raises ValueError: If OSI layer is not supported.
359         """
360         if not subtype:
361             subtype = check_subtype(tg_node)
362         if subtype == NodeSubTypeTG.TREX:
363             for _ in range(0, 3):
364                 # Kill TRex only if it is already running.
365                 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
366                 exec_cmd_no_error(
367                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
368                 )
369
370                 # Prepare interfaces for TRex.
371                 tg_port_drv = Constants.TREX_PORT_DRIVER
372                 mlx_driver = u""
373                 for port in tg_node[u"interfaces"].values():
374                     if u"Mellanox" in port.get(u"model"):
375                         mlx_driver = port.get(u"driver")
376                         pci_addr = port.get(u'pci_address')
377                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
378                         if cur_driver == mlx_driver:
379                             pass
380                         elif not cur_driver:
381                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
382                         else:
383                             DS.pci_driver_unbind(tg_node, pci_addr)
384                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
385                     else:
386                         pci_addr = port.get(u'pci_address')
387                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
388                         if cur_driver:
389                             DS.pci_driver_unbind(tg_node, pci_addr)
390                         DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
391
392                 # Start TRex.
393                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
394                 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
395                 trex_cmd.add(u"-i")
396                 trex_cmd.add(u"--prefix $(hostname)")
397                 trex_cmd.add(u"--hdrh")
398                 trex_cmd.add(u"--no-scapy-server")
399                 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
400                 # OptionString does not create double space if extra is empty.
401                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
402                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
403                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
404                 try:
405                     exec_cmd_no_error(tg_node, cmd, sudo=True)
406                 except RuntimeError:
407                     cmd = u"sh -c \"cat /tmp/trex.log\""
408                     exec_cmd_no_error(
409                         tg_node, cmd, sudo=True,
410                         message=u"Get TRex logs failed!"
411                     )
412                     raise RuntimeError(u"Start TRex failed!")
413
414                 # Test T-Rex API responsiveness.
415                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
416                 if osi_layer in (u"L2", u"L3"):
417                     cmd += u"trex_stl_assert.py"
418                 elif osi_layer == u"L7":
419                     cmd += u"trex_astf_assert.py"
420                 else:
421                     raise ValueError(u"Unknown OSI layer!")
422                 try:
423                     exec_cmd_no_error(
424                         tg_node, cmd, sudo=True,
425                         message=u"T-Rex API is not responding!", retries=20
426                     )
427                 except RuntimeError:
428                     continue
429                 return
430             # After max retries TRex is still not responding to API critical
431             # error occurred.
432             exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
433             raise RuntimeError(u"Start T-Rex failed after multiple retries!")
434
435     @staticmethod
436     def is_trex_running(node):
437         """Check if T-Rex is running using pidof.
438
439         :param node: Traffic generator node.
440         :type node: dict
441         :returns: True if T-Rex is running otherwise False.
442         :rtype: bool
443         """
444         ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
445         return bool(int(ret) == 0)
446
447     @staticmethod
448     def teardown_traffic_generator(node):
449         """TG teardown.
450
451         :param node: Traffic generator node.
452         :type node: dict
453         :returns: nothing
454         :raises RuntimeError: If node type is not a TG,
455             or if T-Rex teardown fails.
456         """
457         subtype = check_subtype(node)
458         if subtype == NodeSubTypeTG.TREX:
459             exec_cmd_no_error(
460                 node,
461                 u"sh -c "
462                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
463                 sudo=False,
464                 message=u"T-Rex kill failed!"
465             )
466
467     def trex_astf_stop_remote_exec(self, node):
468         """Execute T-Rex ASTF script on remote node over ssh to stop running
469         traffic.
470
471         Internal state is updated with measurement results.
472
473         :param node: T-Rex generator node.
474         :type node: dict
475         :raises RuntimeError: If stop traffic script fails.
476         """
477         command_line = OptionString().add("python3")
478         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
479         command_line.add(f"'{dirname}/trex_astf_stop.py'")
480         command_line.add("--xstat")
481         for value in self._xstats:
482             if value is not None:
483                 value = value.replace("'", "\"")
484                 command_line.add(f"'{value}'")
485         stdout, _ = exec_cmd_no_error(
486             node, command_line,
487             message="T-Rex ASTF runtime error!"
488         )
489         self._parse_traffic_results(stdout)
490
491     def trex_stl_stop_remote_exec(self, node):
492         """Execute T-Rex STL script on remote node over ssh to stop running
493         traffic.
494
495         Internal state is updated with measurement results.
496
497         :param node: T-Rex generator node.
498         :type node: dict
499         :raises RuntimeError: If stop traffic script fails.
500         """
501         command_line = OptionString().add("python3")
502         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
503         command_line.add(f"'{dirname}/trex_stl_stop.py'")
504         command_line.add("--xstat")
505         for value in self._xstats:
506             if value is not None:
507                 value = value.replace("'", "\"")
508                 command_line.add(f"'{value}'")
509         stdout, _ = exec_cmd_no_error(
510             node, command_line,
511             message="T-Rex STL runtime error!"
512         )
513         self._parse_traffic_results(stdout)
514
515     def stop_traffic_on_tg(self):
516         """Stop all traffic on TG.
517
518         :returns: Structure containing the result of the measurement.
519         :rtype: MeasurementResult
520         :raises ValueError: If TG traffic profile is not supported.
521         """
522         subtype = check_subtype(self._node)
523         if subtype != NodeSubTypeTG.TREX:
524             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
525         if u"trex-astf" in self.traffic_profile:
526             self.trex_astf_stop_remote_exec(self._node)
527         elif u"trex-stl" in self.traffic_profile:
528             self.trex_stl_stop_remote_exec(self._node)
529         else:
530             raise ValueError(u"Unsupported T-Rex traffic profile!")
531         self._stop_time = time.monotonic()
532
533         return self._get_measurement_result()
534
535     def _compute_duration(self, duration, multiplier):
536         """Compute duration for profile driver.
537
538         The final result is influenced by transaction scale and duration limit.
539         It is assumed a higher level function has already set those on self.
540         The duration argument is the target value from search point of view,
541         before the overrides are applied here.
542
543         Minus one (signalling async traffic start) is kept.
544
545         Completeness flag is also included. Duration limited or async trials
546         are not considered complete for ramp-up purposes.
547
548         :param duration: Time expressed in seconds for how long to send traffic.
549         :param multiplier: Traffic rate in transactions per second.
550         :type duration: float
551         :type multiplier: float
552         :returns: New duration and whether it was a complete ramp-up candidate.
553         :rtype: float, bool
554         """
555         if duration < 0.0:
556             # Keep the async -1.
557             return duration, False
558         computed_duration = duration
559         if self.transaction_scale:
560             computed_duration = self.transaction_scale / multiplier
561             # Log the computed duration,
562             # so we can compare with what telemetry suggests
563             # the real duration was.
564             logger.debug(f"Expected duration {computed_duration}")
565         if not self.duration_limit:
566             return computed_duration, True
567         limited_duration = min(computed_duration, self.duration_limit)
568         return limited_duration, (limited_duration == computed_duration)
569
570     def trex_astf_start_remote_exec(
571             self, duration, multiplier, async_call=False):
572         """Execute T-Rex ASTF script on remote node over ssh to start running
573         traffic.
574
575         In sync mode, measurement results are stored internally.
576         In async mode, initial data including xstats are stored internally.
577
578         This method contains the logic to compute duration as maximum time
579         if transaction_scale is nonzero.
580         The transaction_scale argument defines (limits) how many transactions
581         will be started in total. As that amount of transaction can take
582         considerable time (sometimes due to explicit delays in the profile),
583         the real time a trial needs to finish is computed here. For now,
584         in that case the duration argument is ignored, assuming it comes
585         from ASTF-unaware search algorithm. The overall time a single
586         transaction needs is given in parameter transaction_duration,
587         it includes both explicit delays and implicit time it takes
588         to transfer data (or whatever the transaction does).
589
590         Currently it is observed TRex does not start the ASTF traffic
591         immediately, an ad-hoc constant is added to the computed duration
592         to compensate for that.
593
594         If transaction_scale is zero, duration is not recomputed.
595         It is assumed the subsequent result parsing gets the real duration
596         if the traffic stops sooner for any reason.
597
598         Currently, it is assumed traffic profile defines a single transaction.
599         To avoid heavy logic here, the input rate is expected to be in
600         transactions per second, as that directly translates to TRex multiplier,
601         (assuming the profile does not override the default cps value of one).
602
603         :param duration: Time expressed in seconds for how long to send traffic.
604         :param multiplier: Traffic rate in transactions per second.
605         :param async_call: If enabled then don't wait for all incoming traffic.
606         :type duration: float
607         :type multiplier: int
608         :type async_call: bool
609         :raises RuntimeError: In case of T-Rex driver issue.
610         """
611         self.check_mode(TrexMode.ASTF)
612         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
613         if not isinstance(duration, (float, int)):
614             duration = float(duration)
615
616         computed_duration, _ = self._compute_duration(duration, multiplier)
617
618         command_line = OptionString().add(u"python3")
619         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
620         command_line.add(f"'{dirname}/trex_astf_profile.py'")
621         command_line.change_prefix(u"--")
622         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
623         command_line.add_with_value(
624             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
625         )
626         command_line.add_with_value(u"duration", f"{computed_duration!r}")
627         command_line.add_with_value(u"frame_size", self.frame_size)
628         command_line.add_with_value(
629             u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
630         )
631         command_line.add_with_value(u"multiplier", multiplier)
632         command_line.add_with_value(u"port_0", p_0)
633         command_line.add_with_value(u"port_1", p_1)
634         command_line.add_with_value(
635             u"traffic_directions", self.traffic_directions
636         )
637         command_line.add_if(u"async_start", async_call)
638         command_line.add_if(u"latency", self.use_latency)
639         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
640         command_line.add_with_value(
641             u"delay", Constants.PERF_TRIAL_ASTF_DELAY
642         )
643
644         self._start_time = time.monotonic()
645         self._rate = multiplier
646         stdout, _ = exec_cmd_no_error(
647             self._node, command_line, timeout=computed_duration + 10.0,
648             message=u"T-Rex ASTF runtime error!"
649         )
650
651         if async_call:
652             # no result
653             self._target_duration = None
654             self._duration = None
655             self._received = None
656             self._sent = None
657             self._loss = None
658             self._latency = None
659             xstats = []
660             self._l7_data = dict()
661             self._l7_data[u"client"] = dict()
662             self._l7_data[u"client"][u"active_flows"] = None
663             self._l7_data[u"client"][u"established_flows"] = None
664             self._l7_data[u"client"][u"traffic_duration"] = None
665             self._l7_data[u"server"] = dict()
666             self._l7_data[u"server"][u"active_flows"] = None
667             self._l7_data[u"server"][u"established_flows"] = None
668             self._l7_data[u"server"][u"traffic_duration"] = None
669             if u"udp" in self.traffic_profile:
670                 self._l7_data[u"client"][u"udp"] = dict()
671                 self._l7_data[u"client"][u"udp"][u"connects"] = None
672                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
673                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
674                 self._l7_data[u"server"][u"udp"] = dict()
675                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
676                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
677             elif u"tcp" in self.traffic_profile:
678                 self._l7_data[u"client"][u"tcp"] = dict()
679                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
680                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
681                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
682                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
683                 self._l7_data[u"server"][u"tcp"] = dict()
684                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
685                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
686                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
687             else:
688                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
689             index = 0
690             for line in stdout.splitlines():
691                 if f"Xstats snapshot {index}: " in line:
692                     xstats.append(line[19:])
693                     index += 1
694             self._xstats = tuple(xstats)
695         else:
696             self._target_duration = duration
697             self._duration = computed_duration
698             self._parse_traffic_results(stdout)
699
700     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
701         """Execute T-Rex STL script on remote node over ssh to start running
702         traffic.
703
704         In sync mode, measurement results are stored internally.
705         In async mode, initial data including xstats are stored internally.
706
707         Mode-unaware code (e.g. in search algorithms) works with transactions.
708         To keep the logic simple, multiplier is set to that value.
709         As bidirectional traffic profiles send packets in both directions,
710         they are treated as transactions with two packets (one per direction).
711
712         :param duration: Time expressed in seconds for how long to send traffic.
713         :param rate: Traffic rate in transactions per second.
714         :param async_call: If enabled then don't wait for all incoming traffic.
715         :type duration: float
716         :type rate: str
717         :type async_call: bool
718         :raises RuntimeError: In case of T-Rex driver issue.
719         """
720         self.check_mode(TrexMode.STL)
721         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
722         if not isinstance(duration, (float, int)):
723             duration = float(duration)
724
725         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
726
727         command_line = OptionString().add(u"python3")
728         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
729         command_line.add(f"'{dirname}/trex_stl_profile.py'")
730         command_line.change_prefix(u"--")
731         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
732         command_line.add_with_value(
733             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
734         )
735         command_line.add_with_value(u"duration", f"{duration!r}")
736         command_line.add_with_value(u"frame_size", self.frame_size)
737         command_line.add_with_value(u"rate", f"{rate!r}")
738         command_line.add_with_value(u"port_0", p_0)
739         command_line.add_with_value(u"port_1", p_1)
740         command_line.add_with_value(
741             u"traffic_directions", self.traffic_directions
742         )
743         command_line.add_if(u"async_start", async_call)
744         command_line.add_if(u"latency", self.use_latency)
745         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
746         command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
747
748         self._start_time = time.monotonic()
749         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
750         stdout, _ = exec_cmd_no_error(
751             self._node, command_line, timeout=int(duration) + 60,
752             message=u"T-Rex STL runtime error"
753         )
754
755         if async_call:
756             # no result
757             self._target_duration = None
758             self._duration = None
759             self._received = None
760             self._sent = None
761             self._loss = None
762             self._latency = None
763
764             xstats = []
765             index = 0
766             for line in stdout.splitlines():
767                 if f"Xstats snapshot {index}: " in line:
768                     xstats.append(line[19:])
769                     index += 1
770             self._xstats = tuple(xstats)
771         else:
772             self._target_duration = duration
773             self._duration = duration
774             self._parse_traffic_results(stdout)
775
776     def send_traffic_on_tg(
777             self,
778             duration,
779             rate,
780             frame_size,
781             traffic_profile,
782             async_call=False,
783             ppta=1,
784             traffic_directions=2,
785             transaction_duration=0.0,
786             transaction_scale=0,
787             transaction_type=u"packet",
788             duration_limit=0.0,
789             use_latency=False,
790             ramp_up_rate=None,
791             ramp_up_duration=None,
792             state_timeout=240.0,
793             ramp_up_only=False,
794         ):
795         """Send traffic from all configured interfaces on TG.
796
797         In async mode, xstats is stored internally,
798         to enable getting correct result when stopping the traffic.
799         In both modes, stdout is returned,
800         but _parse_traffic_results only works in sync output.
801
802         Note that traffic generator uses DPDK driver which might
803         reorder port numbers based on wiring and PCI numbering.
804         This method handles that, so argument values are invariant,
805         but you can see swapped valued in debug logs.
806
807         When transaction_scale is specified, the duration value is ignored
808         and the needed time is computed. For cases where this results in
809         to too long measurement (e.g. teardown trial with small rate),
810         duration_limit is applied (of non-zero), so the trial is stopped sooner.
811
812         Bidirectional STL profiles are treated as transactions with two packets.
813
814         The return value is None for async.
815
816         :param duration: Duration of test traffic generation in seconds.
817         :param rate: Traffic rate in transactions per second.
818         :param frame_size: Frame size (L2) in Bytes.
819         :param traffic_profile: Module name as a traffic profile identifier.
820             See GPL/traffic_profiles/trex for implemented modules.
821         :param async_call: Async mode.
822         :param ppta: Packets per transaction, aggregated over directions.
823             Needed for udp_pps which does not have a good transaction counter,
824             so we need to compute expected number of packets.
825             Default: 1.
826         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
827             Default: 2
828         :param transaction_duration: Total expected time to close transaction.
829         :param transaction_scale: Number of transactions to perform.
830             0 (default) means unlimited.
831         :param transaction_type: An identifier specifying which counters
832             and formulas to use when computing attempted and failed
833             transactions. Default: "packet".
834         :param duration_limit: Zero or maximum limit for computed (or given)
835             duration.
836         :param use_latency: Whether to measure latency during the trial.
837             Default: False.
838         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
839         :param ramp_up_duration: Duration of ramp-up trials [s].
840         :param state_timeout: Time of life of DUT state [s].
841         :param ramp_up_only: If true, do not perform main trial measurement.
842         :type duration: float
843         :type rate: float
844         :type frame_size: str
845         :type traffic_profile: str
846         :type async_call: bool
847         :type ppta: int
848         :type traffic_directions: int
849         :type transaction_duration: float
850         :type transaction_scale: int
851         :type transaction_type: str
852         :type duration_limit: float
853         :type use_latency: bool
854         :type ramp_up_rate: float
855         :type ramp_up_duration: float
856         :type state_timeout: float
857         :type ramp_up_only: bool
858         :returns: TG results.
859         :rtype: MeasurementResult or None
860         :raises ValueError: If TG traffic profile is not supported.
861         """
862         self.set_rate_provider_defaults(
863             frame_size=frame_size,
864             traffic_profile=traffic_profile,
865             ppta=ppta,
866             traffic_directions=traffic_directions,
867             transaction_duration=transaction_duration,
868             transaction_scale=transaction_scale,
869             transaction_type=transaction_type,
870             duration_limit=duration_limit,
871             use_latency=use_latency,
872             ramp_up_rate=ramp_up_rate,
873             ramp_up_duration=ramp_up_duration,
874             state_timeout=state_timeout,
875         )
876         return self._send_traffic_on_tg_with_ramp_up(
877             duration=duration,
878             rate=rate,
879             async_call=async_call,
880             ramp_up_only=ramp_up_only,
881         )
882
883     def _send_traffic_on_tg_internal(
884             self, duration, rate, async_call=False):
885         """Send traffic from all configured interfaces on TG.
886
887         This is an internal function, it assumes set_rate_provider_defaults
888         has been called to remember most values.
889         The reason why need to remember various values is that
890         the traffic can be asynchronous, and parsing needs those values.
891         The reason why this is is a separate function from the one
892         which calls set_rate_provider_defaults is that some search algorithms
893         need to specify their own values, and we do not want the measure call
894         to overwrite them with defaults.
895
896         This function is used both for automated ramp-up trials
897         and for explicitly called trials.
898
899         :param duration: Duration of test traffic generation in seconds.
900         :param rate: Traffic rate in transactions per second.
901         :param async_call: Async mode.
902         :type duration: float
903         :type rate: float
904         :type async_call: bool
905         :returns: TG results.
906         :rtype: MeasurementResult or None
907         :raises ValueError: If TG traffic profile is not supported.
908         """
909         subtype = check_subtype(self._node)
910         if subtype == NodeSubTypeTG.TREX:
911             if u"trex-astf" in self.traffic_profile:
912                 self.trex_astf_start_remote_exec(
913                     duration, float(rate), async_call
914                 )
915             elif u"trex-stl" in self.traffic_profile:
916                 unit_rate_str = str(rate) + u"pps"
917                 self.trex_stl_start_remote_exec(
918                     duration, unit_rate_str, async_call
919                 )
920             else:
921                 raise ValueError(u"Unsupported T-Rex traffic profile!")
922
923         return None if async_call else self._get_measurement_result()
924
925     def _send_traffic_on_tg_with_ramp_up(
926             self, duration, rate, async_call=False, ramp_up_only=False):
927         """Send traffic from all interfaces on TG, maybe after ramp-up.
928
929         This is an internal function, it assumes set_rate_provider_defaults
930         has been called to remember most values.
931         The reason why need to remember various values is that
932         the traffic can be asynchronous, and parsing needs those values.
933         The reason why this is a separate function from the one
934         which calls set_rate_provider_defaults is that some search algorithms
935         need to specify their own values, and we do not want the measure call
936         to overwrite them with defaults.
937
938         If ramp-up tracking is detected, a computation is performed,
939         and if state timeout is near, trial at ramp-up rate and duration
940         is inserted before the main trial measurement.
941
942         The ramp_up_only parameter forces a ramp-up without immediate
943         trial measurement, which is useful in case self remembers
944         a previous ramp-up trial that belongs to a different test (phase).
945
946         Return None if trial is async or ramp-up only.
947
948         :param duration: Duration of test traffic generation in seconds.
949         :param rate: Traffic rate in transactions per second.
950         :param async_call: Async mode.
951         :param ramp_up_only: If true, do not perform main trial measurement.
952         :type duration: float
953         :type rate: float
954         :type async_call: bool
955         :type ramp_up_only: bool
956         :returns: TG results.
957         :rtype: MeasurementResult or None
958         :raises ValueError: If TG traffic profile is not supported.
959         """
960         complete = False
961         if self.ramp_up_rate:
962             # Figure out whether we need to insert a ramp-up trial.
963             if ramp_up_only or self.ramp_up_start is None:
964                 # We never ramped up yet (at least not in this test case).
965                 ramp_up_needed = True
966             else:
967                 # We ramped up before, but maybe it was too long ago.
968                 # Adding a constant overhead to be safe.
969                 time_now = time.monotonic() + 1.0
970                 computed_duration, complete = self._compute_duration(
971                     duration=duration,
972                     multiplier=rate,
973                 )
974                 # There are two conditions for inserting ramp-up.
975                 # If early sessions are expiring already,
976                 # or if late sessions are to expire before measurement is over.
977                 ramp_up_start_delay = time_now - self.ramp_up_start
978                 ramp_up_stop_delay = time_now - self.ramp_up_stop
979                 ramp_up_stop_delay += computed_duration
980                 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
981                 # Final boolean decision.
982                 ramp_up_needed = (bigger_delay >= self.state_timeout)
983             if ramp_up_needed:
984                 logger.debug(
985                     u"State may time out during next real trial, "
986                     u"inserting a ramp-up trial."
987                 )
988                 self.ramp_up_start = time.monotonic()
989                 self._send_traffic_on_tg_internal(
990                     duration=self.ramp_up_duration,
991                     rate=self.ramp_up_rate,
992                     async_call=async_call,
993                 )
994                 self.ramp_up_stop = time.monotonic()
995                 logger.debug(u"Ramp-up done.")
996             else:
997                 logger.debug(
998                     u"State will probably not time out during next real trial, "
999                     u"no ramp-up trial needed just yet."
1000                 )
1001         if ramp_up_only:
1002             return None
1003         trial_start = time.monotonic()
1004         result = self._send_traffic_on_tg_internal(
1005             duration=duration,
1006             rate=rate,
1007             async_call=async_call,
1008         )
1009         trial_end = time.monotonic()
1010         if self.ramp_up_rate:
1011             # Optimization: No loss acts as a good ramp-up, if it was complete.
1012             if complete and result is not None and result.loss_ratio == 0.0:
1013                 logger.debug(u"Good trial acts as a ramp-up")
1014                 self.ramp_up_start = trial_start
1015                 self.ramp_up_stop = trial_end
1016             else:
1017                 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1018         return result
1019
1020     def no_traffic_loss_occurred(self):
1021         """Fail if loss occurred in traffic run.
1022
1023         :returns: nothing
1024         :raises Exception: If loss occured.
1025         """
1026         if self._loss is None:
1027             raise RuntimeError(u"The traffic generation has not been issued")
1028         if self._loss != u"0":
1029             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1030
1031     def fail_if_no_traffic_forwarded(self):
1032         """Fail if no traffic forwarded.
1033
1034         :returns: nothing
1035         :raises Exception: If no traffic forwarded.
1036         """
1037         if self._received is None:
1038             raise RuntimeError(u"The traffic generation has not been issued")
1039         if self._received == 0:
1040             raise RuntimeError(u"No traffic forwarded")
1041
1042     def partial_traffic_loss_accepted(
1043             self, loss_acceptance, loss_acceptance_type):
1044         """Fail if loss is higher then accepted in traffic run.
1045
1046         :param loss_acceptance: Permitted drop ratio or frames count.
1047         :param loss_acceptance_type: Type of permitted loss.
1048         :type loss_acceptance: float
1049         :type loss_acceptance_type: LossAcceptanceType
1050         :returns: nothing
1051         :raises Exception: If loss is above acceptance criteria.
1052         """
1053         if self._loss is None:
1054             raise Exception(u"The traffic generation has not been issued")
1055
1056         if loss_acceptance_type == u"percentage":
1057             loss = (float(self._loss) / float(self._sent)) * 100
1058         elif loss_acceptance_type == u"frames":
1059             loss = float(self._loss)
1060         else:
1061             raise Exception(u"Loss acceptance type not supported")
1062
1063         if loss > float(loss_acceptance):
1064             raise Exception(
1065                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1066             )
1067
1068     def _parse_traffic_results(self, stdout):
1069         """Parse stdout of scripts into fields of self.
1070
1071         Block of code to reuse, by sync start, or stop after async.
1072
1073         :param stdout: Text containing the standard output.
1074         :type stdout: str
1075         """
1076         subtype = check_subtype(self._node)
1077         if subtype == NodeSubTypeTG.TREX:
1078             # Last line from console output
1079             line = stdout.splitlines()[-1]
1080             results = line.split(u";")
1081             if results[-1] in (u" ", u""):
1082                 results.pop(-1)
1083             self._result = dict()
1084             for result in results:
1085                 key, value = result.split(u"=", maxsplit=1)
1086                 self._result[key.strip()] = value
1087             logger.info(f"TrafficGen results:\n{self._result}")
1088             self._received = int(self._result.get(u"total_received"), 0)
1089             self._sent = int(self._result.get(u"total_sent", 0))
1090             self._loss = int(self._result.get(u"frame_loss", 0))
1091             self._approximated_duration = \
1092                 self._result.get(u"approximated_duration", 0.0)
1093             if u"manual" not in str(self._approximated_duration):
1094                 self._approximated_duration = float(self._approximated_duration)
1095             self._latency = list()
1096             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1097             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1098             if self._mode == TrexMode.ASTF:
1099                 self._l7_data = dict()
1100                 self._l7_data[u"client"] = dict()
1101                 self._l7_data[u"client"][u"sent"] = \
1102                     int(self._result.get(u"client_sent", 0))
1103                 self._l7_data[u"client"][u"received"] = \
1104                     int(self._result.get(u"client_received", 0))
1105                 self._l7_data[u"client"][u"active_flows"] = \
1106                     int(self._result.get(u"client_active_flows", 0))
1107                 self._l7_data[u"client"][u"established_flows"] = \
1108                     int(self._result.get(u"client_established_flows", 0))
1109                 self._l7_data[u"client"][u"traffic_duration"] = \
1110                     float(self._result.get(u"client_traffic_duration", 0.0))
1111                 self._l7_data[u"client"][u"err_rx_throttled"] = \
1112                     int(self._result.get(u"client_err_rx_throttled", 0))
1113                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1114                     int(self._result.get(u"client_err_nf_throttled", 0))
1115                 self._l7_data[u"client"][u"err_flow_overflow"] = \
1116                     int(self._result.get(u"client_err_flow_overflow", 0))
1117                 self._l7_data[u"server"] = dict()
1118                 self._l7_data[u"server"][u"active_flows"] = \
1119                     int(self._result.get(u"server_active_flows", 0))
1120                 self._l7_data[u"server"][u"established_flows"] = \
1121                     int(self._result.get(u"server_established_flows", 0))
1122                 self._l7_data[u"server"][u"traffic_duration"] = \
1123                     float(self._result.get(u"server_traffic_duration", 0.0))
1124                 self._l7_data[u"server"][u"err_rx_throttled"] = \
1125                     int(self._result.get(u"client_err_rx_throttled", 0))
1126                 if u"udp" in self.traffic_profile:
1127                     self._l7_data[u"client"][u"udp"] = dict()
1128                     self._l7_data[u"client"][u"udp"][u"connects"] = \
1129                         int(self._result.get(u"client_udp_connects", 0))
1130                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1131                         int(self._result.get(u"client_udp_closed", 0))
1132                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1133                         int(self._result.get(u"client_udp_tx_bytes", 0))
1134                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1135                         int(self._result.get(u"client_udp_rx_bytes", 0))
1136                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1137                         int(self._result.get(u"client_udp_tx_packets", 0))
1138                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1139                         int(self._result.get(u"client_udp_rx_packets", 0))
1140                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1141                         int(self._result.get(u"client_udp_keep_drops", 0))
1142                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1143                         int(self._result.get(u"client_err_cwf", 0))
1144                     self._l7_data[u"server"][u"udp"] = dict()
1145                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1146                         int(self._result.get(u"server_udp_accepts", 0))
1147                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1148                         int(self._result.get(u"server_udp_closed", 0))
1149                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1150                         int(self._result.get(u"server_udp_tx_bytes", 0))
1151                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1152                         int(self._result.get(u"server_udp_rx_bytes", 0))
1153                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1154                         int(self._result.get(u"server_udp_tx_packets", 0))
1155                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1156                         int(self._result.get(u"server_udp_rx_packets", 0))
1157                 elif u"tcp" in self.traffic_profile:
1158                     self._l7_data[u"client"][u"tcp"] = dict()
1159                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1160                         int(self._result.get(u"client_tcp_connect_inits", 0))
1161                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1162                         int(self._result.get(u"client_tcp_connects", 0))
1163                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1164                         int(self._result.get(u"client_tcp_closed", 0))
1165                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1166                         int(self._result.get(u"client_tcp_connattempt", 0))
1167                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1168                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1169                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1170                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1171                     self._l7_data[u"server"][u"tcp"] = dict()
1172                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1173                         int(self._result.get(u"server_tcp_accepts", 0))
1174                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1175                         int(self._result.get(u"server_tcp_connects", 0))
1176                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1177                         int(self._result.get(u"server_tcp_closed", 0))
1178                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1179                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1180                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1181                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1182
1183     def _get_measurement_result(self):
1184         """Return the result of last measurement as MeasurementResult.
1185
1186         Separate function, as measurements can end either by time
1187         or by explicit call, this is the common block at the end.
1188
1189         The intended_load field of MeasurementResult is in
1190         transactions per second. Transmit count and loss count units
1191         depend on the transaction type. Usually they are in transactions
1192         per second, or aggregated packets per second.
1193
1194         :returns: Structure containing the result of the measurement.
1195         :rtype: MeasurementResult
1196         """
1197         duration_with_overheads = time.monotonic() - self._start_time
1198         try:
1199             # Client duration seems to include a setup period
1200             # where TRex does not send any packets yet.
1201             # Server duration does not include it.
1202             server_data = self._l7_data[u"server"]
1203             approximated_duration = float(server_data[u"traffic_duration"])
1204         except (KeyError, AttributeError, ValueError, TypeError):
1205             approximated_duration = None
1206         try:
1207             if not approximated_duration:
1208                 approximated_duration = float(self._approximated_duration)
1209         except ValueError:  # "manual"
1210             approximated_duration = None
1211         if not approximated_duration:
1212             if self._duration and self._duration > 0:
1213                 # Known recomputed or target duration.
1214                 approximated_duration = self._duration
1215             else:
1216                 # It was an explicit stop.
1217                 if not self._stop_time:
1218                     raise RuntimeError(u"Unable to determine duration.")
1219                 approximated_duration = self._stop_time - self._start_time
1220         target_duration = self._target_duration
1221         if not target_duration:
1222             target_duration = approximated_duration
1223         transmit_rate = self._rate
1224         unsent = 0
1225         if self.transaction_type == u"packet":
1226             partial_attempt_count = self._sent
1227             packet_rate = transmit_rate * self.ppta
1228             # We have a float. TRex way of rounding it is not obvious.
1229             # The biggest source of mismatch is Inter Stream Gap.
1230             # So the code tolerates 10 usec of missing packets.
1231             expected_attempt_count = (target_duration - 1e-5) * packet_rate
1232             expected_attempt_count = math.ceil(expected_attempt_count)
1233             # TRex can send more.
1234             expected_attempt_count = max(expected_attempt_count, self._sent)
1235             unsent = expected_attempt_count - self._sent
1236             pass_count = self._received
1237             loss_count = self._loss
1238         elif self.transaction_type == u"udp_cps":
1239             if not self.transaction_scale:
1240                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1241             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1242             # We do not care whether TG is slow, it should have attempted all.
1243             expected_attempt_count = self.transaction_scale
1244             unsent = expected_attempt_count - partial_attempt_count
1245             pass_count = self._l7_data[u"client"][u"received"]
1246             loss_count = partial_attempt_count - pass_count
1247         elif self.transaction_type == u"tcp_cps":
1248             if not self.transaction_scale:
1249                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1250             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1251             partial_attempt_count = ctca
1252             # We do not care whether TG is slow, it should have attempted all.
1253             expected_attempt_count = self.transaction_scale
1254             unsent = expected_attempt_count - partial_attempt_count
1255             # From TCP point of view, server/connects counts full connections,
1256             # but we are testing NAT session so client/connects counts that
1257             # (half connections from TCP point of view).
1258             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1259             loss_count = partial_attempt_count - pass_count
1260         elif self.transaction_type == u"udp_pps":
1261             if not self.transaction_scale:
1262                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1263             partial_attempt_count = self._sent
1264             expected_attempt_count = self.transaction_scale * self.ppta
1265             unsent = expected_attempt_count - self._sent
1266             loss_count = self._loss
1267         elif self.transaction_type == u"tcp_pps":
1268             if not self.transaction_scale:
1269                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1270             partial_attempt_count = self._sent
1271             expected_attempt_count = self.transaction_scale * self.ppta
1272             # One loss-like scenario happens when TRex receives all packets
1273             # on L2 level, but is not fast enough to process them all
1274             # at L7 level, which leads to retransmissions.
1275             # Those manifest as opackets larger than expected.
1276             # A simple workaround is to add absolute difference.
1277             # Probability of retransmissions exactly cancelling
1278             # packets unsent due to duration stretching is quite low.
1279             unsent = abs(expected_attempt_count - self._sent)
1280             loss_count = self._loss
1281         else:
1282             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1283         if unsent and isinstance(self._approximated_duration, float):
1284             # Do not report unsent for "manual".
1285             logger.debug(f"Unsent packets/transactions: {unsent}")
1286         if loss_count < 0 and not self.negative_loss:
1287             loss_count = 0
1288         measurement = MeasurementResult(
1289             intended_duration=target_duration,
1290             intended_load=transmit_rate,
1291             offered_count=partial_attempt_count,
1292             loss_count=loss_count,
1293             offered_duration=approximated_duration,
1294             duration_with_overheads=duration_with_overheads,
1295             intended_count=expected_attempt_count,
1296         )
1297         measurement.latency = self.get_latency_int()
1298         return measurement
1299
1300     def measure(self, intended_duration, intended_load):
1301         """Run trial measurement, parse and return results.
1302
1303         The intended load is for transactions. Stateles bidirectional traffic
1304         is understood as sequence of (asynchronous) transactions,
1305         two packets each.
1306
1307         The result units depend on test type, generally
1308         the count either transactions or packets (aggregated over directions).
1309
1310         Optionally, this method sleeps if measurement finished before
1311         the time specified as intended_duration (PLRsearch needs time for math).
1312
1313         :param intended_duration: Trial duration [s].
1314         :param intended_load: Target rate in transactions per second.
1315         :type intended_duration: float
1316         :type intended_load: float
1317         :returns: Structure containing the result of the measurement.
1318         :rtype: MeasurementResult
1319         :raises RuntimeError: If TG is not set or if node is not TG
1320             or if subtype is not specified.
1321         :raises NotImplementedError: If TG is not supported.
1322         """
1323         intended_duration = float(intended_duration)
1324         time_start = time.monotonic()
1325         time_stop = time_start + intended_duration
1326         if self.resetter:
1327             self.resetter()
1328         result = self._send_traffic_on_tg_with_ramp_up(
1329             duration=intended_duration,
1330             rate=intended_load,
1331             async_call=False,
1332         )
1333         logger.debug(f"trial measurement result: {result!r}")
1334         # In PLRsearch, computation needs the specified time to complete.
1335         if self.sleep_till_duration:
1336             while (sleeptime := time_stop - time.monotonic()) > 0.0:
1337                 time.sleep(sleeptime)
1338         return result
1339
1340     def set_rate_provider_defaults(
1341             self,
1342             frame_size,
1343             traffic_profile,
1344             ppta=1,
1345             resetter=None,
1346             traffic_directions=2,
1347             transaction_duration=0.0,
1348             transaction_scale=0,
1349             transaction_type=u"packet",
1350             duration_limit=0.0,
1351             negative_loss=True,
1352             sleep_till_duration=False,
1353             use_latency=False,
1354             ramp_up_rate=None,
1355             ramp_up_duration=None,
1356             state_timeout=240.0,
1357         ):
1358         """Store values accessed by measure().
1359
1360         :param frame_size: Frame size identifier or value [B].
1361         :param traffic_profile: Module name as a traffic profile identifier.
1362             See GPL/traffic_profiles/trex for implemented modules.
1363         :param ppta: Packets per transaction, aggregated over directions.
1364             Needed for udp_pps which does not have a good transaction counter,
1365             so we need to compute expected number of packets.
1366             Default: 1.
1367         :param resetter: Callable to reset DUT state for repeated trials.
1368         :param traffic_directions: Traffic from packet counting point of view
1369             is bi- (2) or uni- (1) directional.
1370             Default: 2
1371         :param transaction_duration: Total expected time to close transaction.
1372         :param transaction_scale: Number of transactions to perform.
1373             0 (default) means unlimited.
1374         :param transaction_type: An identifier specifying which counters
1375             and formulas to use when computing attempted and failed
1376             transactions. Default: "packet".
1377         :param duration_limit: Zero or maximum limit for computed (or given)
1378             duration.
1379         :param negative_loss: If false, negative loss is reported as zero loss.
1380         :param sleep_till_duration: If true and measurement returned faster,
1381             sleep until it matches duration. Needed for PLRsearch.
1382         :param use_latency: Whether to measure latency during the trial.
1383             Default: False.
1384         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1385         :param ramp_up_duration: Duration of ramp-up trials [s].
1386         :param state_timeout: Time of life of DUT state [s].
1387         :type frame_size: str or int
1388         :type traffic_profile: str
1389         :type ppta: int
1390         :type resetter: Optional[Callable[[], None]]
1391         :type traffic_directions: int
1392         :type transaction_duration: float
1393         :type transaction_scale: int
1394         :type transaction_type: str
1395         :type duration_limit: float
1396         :type negative_loss: bool
1397         :type sleep_till_duration: bool
1398         :type use_latency: bool
1399         :type ramp_up_rate: float
1400         :type ramp_up_duration: float
1401         :type state_timeout: float
1402         """
1403         self.frame_size = frame_size
1404         self.traffic_profile = str(traffic_profile)
1405         self.resetter = resetter
1406         self.ppta = int(ppta)
1407         self.traffic_directions = int(traffic_directions)
1408         self.transaction_duration = float(transaction_duration)
1409         self.transaction_scale = int(transaction_scale)
1410         self.transaction_type = str(transaction_type)
1411         self.duration_limit = float(duration_limit)
1412         self.negative_loss = bool(negative_loss)
1413         self.sleep_till_duration = bool(sleep_till_duration)
1414         self.use_latency = bool(use_latency)
1415         self.ramp_up_rate = float(ramp_up_rate)
1416         self.ramp_up_duration = float(ramp_up_duration)
1417         self.state_timeout = float(state_timeout)
1418
1419
1420 class OptimizedSearch:
1421     """Class to be imported as Robot Library, containing search keywords.
1422
1423     Aside of setting up measurer and forwarding arguments,
1424     the main business is to translate min/max rate from unidir to aggregated.
1425     """
1426
1427     @staticmethod
1428     def perform_mlr_search(
1429         frame_size: Union[int, str],
1430         traffic_profile: str,
1431         min_load: float,
1432         max_load: float,
1433         loss_ratio: float = 0.005,
1434         relative_width: float = 0.005,
1435         initial_trial_duration: float = 1.0,
1436         final_trial_duration: float = 1.0,
1437         duration_sum: float = 20.0,
1438         expansion_coefficient: int = 2,
1439         preceding_targets: int = 2,
1440         search_duration_max: float = 1200.0,
1441         ppta: int = 1,
1442         resetter: Optional[Callable[[], None]] = None,
1443         traffic_directions: int = 2,
1444         transaction_duration: float = 0.0,
1445         transaction_scale: int = 0,
1446         transaction_type: str = "packet",
1447         use_latency: bool = False,
1448         ramp_up_rate: float = 0.0,
1449         ramp_up_duration: float = 0.0,
1450         state_timeout: float = 240.0,
1451     ) -> List[TrimmedStat]:
1452         """Setup initialized TG, perform optimized search, return intervals.
1453
1454         If transaction_scale is nonzero, all init and non-init trial durations
1455         are set to 1.0 (as they do not affect the real trial duration)
1456         and zero intermediate phases are used.
1457         This way no re-measurement happens.
1458         Warmup has to be handled via resetter or ramp-up mechanisms.
1459
1460         :param frame_size: Frame size identifier or value [B].
1461         :param traffic_profile: Module name as a traffic profile identifier.
1462             See GPL/traffic_profiles/trex for implemented modules.
1463         :param min_load: Minimal load in transactions per second.
1464         :param max_load: Maximal load in transactions per second.
1465         :param loss_ratio: Ratio of packets lost, for PDR [1].
1466         :param relative_width: Final lower bound intended load
1467             cannot be more distant that this multiple of upper bound [1].
1468         :param initial_trial_duration: Trial duration for the initial phase
1469             and also for the first intermediate phase [s].
1470         :param final_trial_duration: Trial duration for the final phase [s].
1471         :param duration_sum: Max sum of duration for deciding [s].
1472         :param expansion_coefficient: In external search multiply width by this.
1473         :param preceding_targets: Number of intermediate phases
1474             to perform before the final phase [1].
1475         :param search_duration_max: The search will fail itself
1476             when not finished before this overall time [s].
1477         :param ppta: Packets per transaction, aggregated over directions.
1478             Needed for udp_pps which does not have a good transaction counter,
1479             so we need to compute expected number of packets.
1480             Default: 1.
1481         :param resetter: Callable to reset DUT state for repeated trials.
1482         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1483             Default: 2
1484         :param transaction_duration: Total expected time to close transaction.
1485         :param transaction_scale: Number of transactions to perform.
1486             0 (default) means unlimited.
1487         :param transaction_type: An identifier specifying which counters
1488             and formulas to use when computing attempted and failed
1489             transactions. Default: "packet".
1490         :param use_latency: Whether to measure latency during the trial.
1491             Default: False.
1492         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1493         :param ramp_up_duration: Duration of ramp-up trials [s].
1494         :param state_timeout: Time of life of DUT state [s].
1495         :type frame_size: str or int
1496         :type traffic_profile: str
1497         :type min_load: float
1498         :type max_load: float
1499         :type loss_ratio: float
1500         :type relative_width: float
1501         :type initial_trial_duration: float
1502         :type final_trial_duration: float
1503         :type duration_sum: float
1504         :type expansion_coefficient: int
1505         :type preceding_targets: int
1506         :type search_duration_max: float
1507         :type ppta: int
1508         :type resetter: Optional[Callable[[], None]]
1509         :type traffic_directions: int
1510         :type transaction_duration: float
1511         :type transaction_scale: int
1512         :type transaction_type: str
1513         :type use_latency: bool
1514         :type ramp_up_rate: float
1515         :type ramp_up_duration: float
1516         :type state_timeout: float
1517         :returns: Structure containing narrowed down NDR and PDR intervals
1518             and their measurements.
1519         :rtype: List[TrimmedStat]
1520         :raises RuntimeError: If search duration exceeds search_duration_max
1521             or if min load becomes an upper bound for any search goal.
1522         """
1523         # we need instance of TrafficGenerator instantiated by Robot Framework
1524         # to be able to use trex_stl-*()
1525         tg_instance = BuiltIn().get_library_instance(
1526             u"resources.libraries.python.TrafficGenerator"
1527         )
1528         # Overrides for fixed transaction amount.
1529         if transaction_scale:
1530             initial_trial_duration = 1.0
1531             final_trial_duration = 1.0
1532             preceding_targets = 1
1533             # TODO: Move the value to Constants.py?
1534             search_duration_max += transaction_scale * 3e-4
1535         tg_instance.set_rate_provider_defaults(
1536             frame_size=frame_size,
1537             traffic_profile=traffic_profile,
1538             sleep_till_duration=False,
1539             ppta=ppta,
1540             resetter=resetter,
1541             traffic_directions=traffic_directions,
1542             transaction_duration=transaction_duration,
1543             transaction_scale=transaction_scale,
1544             transaction_type=transaction_type,
1545             use_latency=use_latency,
1546             ramp_up_rate=ramp_up_rate,
1547             ramp_up_duration=ramp_up_duration,
1548             state_timeout=state_timeout,
1549         )
1550         if loss_ratio:
1551             loss_ratios = [0.0, loss_ratio]
1552             exceed_ratio = 0.5
1553         else:
1554             # Happens in reconf tests.
1555             loss_ratios = [0.0]
1556             exceed_ratio = 0.0
1557         goals = [
1558             SearchGoal(
1559                 loss_ratio=loss_ratio,
1560                 exceed_ratio=exceed_ratio,
1561                 relative_width=relative_width,
1562                 initial_trial_duration=initial_trial_duration,
1563                 final_trial_duration=final_trial_duration,
1564                 duration_sum=duration_sum,
1565                 preceding_targets=preceding_targets,
1566                 expansion_coefficient=expansion_coefficient,
1567                 fail_fast=True,
1568             )
1569             for loss_ratio in loss_ratios
1570         ]
1571         config = Config()
1572         config.goals = goals
1573         config.min_load = min_load
1574         config.max_load = max_load
1575         config.search_duration_max = search_duration_max
1576         config.warmup_duration = 1.0
1577         algorithm = MultipleLossRatioSearch(config)
1578         results = algorithm.search(measurer=tg_instance, debug=logger.debug)
1579         return [results[goal] for goal in goals]
1580
1581     @staticmethod
1582     def perform_soak_search(
1583             frame_size,
1584             traffic_profile,
1585             min_load,
1586             max_load,
1587             plr_target=1e-7,
1588             tdpt=0.1,
1589             initial_count=50,
1590             timeout=7200.0,
1591             ppta=1,
1592             resetter=None,
1593             trace_enabled=False,
1594             traffic_directions=2,
1595             transaction_duration=0.0,
1596             transaction_scale=0,
1597             transaction_type=u"packet",
1598             use_latency=False,
1599             ramp_up_rate=None,
1600             ramp_up_duration=None,
1601             state_timeout=240.0,
1602     ):
1603         """Setup initialized TG, perform soak search, return avg and stdev.
1604
1605         :param frame_size: Frame size identifier or value [B].
1606         :param traffic_profile: Module name as a traffic profile identifier.
1607             See GPL/traffic_profiles/trex for implemented modules.
1608         :param min_load: Minimal load in transactions per second.
1609         :param max_load: Maximal load in transactions per second.
1610         :param plr_target: Ratio of packets lost to achieve [1].
1611         :param tdpt: Trial duration per trial.
1612             The algorithm linearly increases trial duration with trial number,
1613             this is the increment between succesive trials, in seconds.
1614         :param initial_count: Offset to apply before the first trial.
1615             For example initial_count=50 makes first trial to be 51*tdpt long.
1616             This is needed because initial "search" phase of integrator
1617             takes significant time even without any trial results.
1618         :param timeout: The search will stop after this overall time [s].
1619         :param ppta: Packets per transaction, aggregated over directions.
1620             Needed for udp_pps which does not have a good transaction counter,
1621             so we need to compute expected number of packets.
1622             Default: 1.
1623         :param resetter: Callable to reset DUT state for repeated trials.
1624         :param trace_enabled: True if trace enabled else False.
1625             This is very verbose tracing on numeric computations,
1626             do not use in production.
1627             Default: False
1628         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1629             Default: 2
1630         :param transaction_duration: Total expected time to close transaction.
1631         :param transaction_scale: Number of transactions to perform.
1632             0 (default) means unlimited.
1633         :param transaction_type: An identifier specifying which counters
1634             and formulas to use when computing attempted and failed
1635             transactions. Default: "packet".
1636         :param use_latency: Whether to measure latency during the trial.
1637             Default: False.
1638         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1639         :param ramp_up_duration: Duration of ramp-up trials [s].
1640         :param state_timeout: Time of life of DUT state [s].
1641         :type frame_size: str or int
1642         :type traffic_profile: str
1643         :type min_load: float
1644         :type max_load: float
1645         :type plr_target: float
1646         :type initial_count: int
1647         :type timeout: float
1648         :type ppta: int
1649         :type resetter: Optional[Callable[[], None]]
1650         :type trace_enabled: bool
1651         :type traffic_directions: int
1652         :type transaction_duration: float
1653         :type transaction_scale: int
1654         :type transaction_type: str
1655         :type use_latency: bool
1656         :type ramp_up_rate: float
1657         :type ramp_up_duration: float
1658         :type state_timeout: float
1659         :returns: Average and stdev of estimated aggregated rate giving PLR.
1660         :rtype: 2-tuple of float
1661         """
1662         tg_instance = BuiltIn().get_library_instance(
1663             u"resources.libraries.python.TrafficGenerator"
1664         )
1665         # Overrides for fixed transaction amount.
1666         if transaction_scale:
1667             timeout = 7200.0
1668         tg_instance.set_rate_provider_defaults(
1669             frame_size=frame_size,
1670             traffic_profile=traffic_profile,
1671             negative_loss=False,
1672             sleep_till_duration=True,
1673             ppta=ppta,
1674             resetter=resetter,
1675             traffic_directions=traffic_directions,
1676             transaction_duration=transaction_duration,
1677             transaction_scale=transaction_scale,
1678             transaction_type=transaction_type,
1679             use_latency=use_latency,
1680             ramp_up_rate=ramp_up_rate,
1681             ramp_up_duration=ramp_up_duration,
1682             state_timeout=state_timeout,
1683         )
1684         algorithm = PLRsearch(
1685             measurer=tg_instance,
1686             trial_duration_per_trial=tdpt,
1687             packet_loss_ratio_target=plr_target,
1688             trial_number_offset=initial_count,
1689             timeout=timeout,
1690             trace_enabled=trace_enabled,
1691         )
1692         result = algorithm.search(
1693             min_rate=min_load,
1694             max_rate=max_load,
1695         )
1696         return result