fix(core): TRex self loop tests
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import math
17 import time
18
19 from robot.api import logger
20 from robot.libraries.BuiltIn import BuiltIn
21
22 from .Constants import Constants
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
33 from .TRexConfigGenerator import TrexInitConfig
34 from .DUTSetup import DUTSetup as DS
35
36 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
37
38
39 def check_subtype(node):
40     """Return supported subtype of given node, or raise an exception.
41
42     Currently only one subtype is supported,
43     but we want our code to be ready for other ones.
44
45     :param node: Topology node to check. Can be None.
46     :type node: dict or NoneType
47     :returns: Subtype detected.
48     :rtype: NodeSubTypeTG
49     :raises RuntimeError: If node is not supported, message explains how.
50     """
51     if node.get(u"type") is None:
52         msg = u"Node type is not defined"
53     elif node[u"type"] != NodeType.TG:
54         msg = f"Node type is {node[u'type']!r}, not a TG"
55     elif node.get(u"subtype") is None:
56         msg = u"TG subtype is not defined"
57     elif node[u"subtype"] != NodeSubTypeTG.TREX:
58         msg = f"TG subtype {node[u'subtype']!r} is not supported"
59     else:
60         return NodeSubTypeTG.TREX
61     raise RuntimeError(msg)
62
63
64 class TGDropRateSearchImpl(DropRateSearch):
65     """Drop Rate Search implementation."""
66
67     # def __init__(self):
68     #     super(TGDropRateSearchImpl, self).__init__()
69
70     def measure_loss(
71             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
72             traffic_profile):
73         """Runs the traffic and evaluate the measured results.
74
75         :param rate: Offered traffic load.
76         :param frame_size: Size of frame.
77         :param loss_acceptance: Permitted drop ratio or frames count.
78         :param loss_acceptance_type: Type of permitted loss.
79         :param traffic_profile: Module name as a traffic profile identifier.
80             See GPL/traffic_profiles/trex for implemented modules.
81         :type rate: float
82         :type frame_size: str
83         :type loss_acceptance: float
84         :type loss_acceptance_type: LossAcceptanceType
85         :type traffic_profile: str
86         :returns: Drop threshold exceeded? (True/False)
87         :rtype: bool
88         :raises NotImplementedError: If TG is not supported.
89         :raises RuntimeError: If TG is not specified.
90         """
91         # we need instance of TrafficGenerator instantiated by Robot Framework
92         # to be able to use trex_stl-*()
93         tg_instance = BuiltIn().get_library_instance(
94             u"resources.libraries.python.TrafficGenerator"
95         )
96         subtype = check_subtype(tg_instance.node)
97         if subtype == NodeSubTypeTG.TREX:
98             unit_rate = str(rate) + self.get_rate_type_str()
99             tg_instance.trex_stl_start_remote_exec(
100                 self.get_duration(), unit_rate, frame_size, traffic_profile
101             )
102             loss = tg_instance.get_loss()
103             sent = tg_instance.get_sent()
104             if self.loss_acceptance_type_is_percentage():
105                 loss = (float(loss) / float(sent)) * 100
106             logger.trace(
107                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
108             )
109             return float(loss) <= float(loss_acceptance)
110         return False
111
112     def get_latency(self):
113         """Returns min/avg/max latency.
114
115         :returns: Latency stats.
116         :rtype: list
117         """
118         tg_instance = BuiltIn().get_library_instance(
119             u"resources.libraries.python.TrafficGenerator"
120         )
121         return tg_instance.get_latency_int()
122
123
124 class TrexMode:
125     """Defines mode of T-Rex traffic generator."""
126     # Advanced stateful mode
127     ASTF = u"ASTF"
128     # Stateless mode
129     STL = u"STL"
130
131
132 class TrafficGenerator(AbstractMeasurer):
133     """Traffic Generator."""
134
135     # Use one instance of TrafficGenerator for all tests in test suite
136     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
137
138     def __init__(self):
139         self._node = None
140         self._mode = None
141         # TG interface order mapping
142         self._ifaces_reordered = False
143         # Result holding fields, to be removed.
144         self._result = None
145         self._loss = None
146         self._sent = None
147         self._latency = None
148         self._received = None
149         self._approximated_rate = None
150         self._approximated_duration = None
151         self._l7_data = None
152         # Measurement input fields, needed for async stop result.
153         self._start_time = None
154         self._stop_time = None
155         self._rate = None
156         self._target_duration = None
157         self._duration = None
158         # Other input parameters, not knowable from measure() signature.
159         self.frame_size = None
160         self.traffic_profile = None
161         self.traffic_directions = None
162         self.negative_loss = None
163         self.use_latency = None
164         self.ppta = None
165         self.resetter = None
166         self.transaction_scale = None
167         self.transaction_duration = None
168         self.sleep_till_duration = None
169         self.transaction_type = None
170         self.duration_limit = None
171         self.ramp_up_start = None
172         self.ramp_up_stop = None
173         self.ramp_up_rate = None
174         self.ramp_up_duration = None
175         self.state_timeout = None
176         # Transient data needed for async measurements.
177         self._xstats = (None, None)
178
179     @property
180     def node(self):
181         """Getter.
182
183         :returns: Traffic generator node.
184         :rtype: dict
185         """
186         return self._node
187
188     def get_loss(self):
189         """Return number of lost packets.
190
191         :returns: Number of lost packets.
192         :rtype: str
193         """
194         return self._loss
195
196     def get_sent(self):
197         """Return number of sent packets.
198
199         :returns: Number of sent packets.
200         :rtype: str
201         """
202         return self._sent
203
204     def get_received(self):
205         """Return number of received packets.
206
207         :returns: Number of received packets.
208         :rtype: str
209         """
210         return self._received
211
212     def get_latency_int(self):
213         """Return rounded min/avg/max latency.
214
215         :returns: Latency stats.
216         :rtype: list
217         """
218         return self._latency
219
220     def get_approximated_rate(self):
221         """Return approximated rate computed as ratio of transmitted packets
222         over duration of trial.
223
224         :returns: Approximated rate.
225         :rtype: str
226         """
227         return self._approximated_rate
228
229     def get_l7_data(self):
230         """Return L7 data.
231
232         :returns: Number of received packets.
233         :rtype: dict
234         """
235         return self._l7_data
236
237     def check_mode(self, expected_mode):
238         """Check TG mode.
239
240         :param expected_mode: Expected traffic generator mode.
241         :type expected_mode: object
242         :raises RuntimeError: In case of unexpected TG mode.
243         """
244         if self._mode == expected_mode:
245             return
246         raise RuntimeError(
247             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
248         )
249
250     @staticmethod
251     def get_tg_type(tg_node):
252         """Log and return the installed traffic generator type.
253
254         :param tg_node: Node from topology file.
255         :type tg_node: dict
256         :returns: Traffic generator type string.
257         :rtype: str
258         :raises RuntimeError: If command returns nonzero return code.
259         """
260         return str(check_subtype(tg_node))
261
262     @staticmethod
263     def get_tg_version(tg_node):
264         """Log and return the installed traffic generator version.
265
266         :param tg_node: Node from topology file.
267         :type tg_node: dict
268         :returns: Traffic generator version string.
269         :rtype: str
270         :raises RuntimeError: If command returns nonzero return code.
271         """
272         subtype = check_subtype(tg_node)
273         if subtype == NodeSubTypeTG.TREX:
274             command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
275             message = u"Get T-Rex version failed!"
276             stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
277             return stdout.strip()
278         else:
279             return "none"
280
281     def initialize_traffic_generator(self, osi_layer, parallel_links=1):
282         """TG initialization.
283
284         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
285         :param parallel_links: Number of parallel links to configure.
286         :type osi_layer: str
287         :type parallel_links: int
288         :raises ValueError: If OSI layer is unknown.
289         """
290         if osi_layer not in ("L2", "L3", "L7"):
291             raise ValueError("Unknown OSI layer!")
292
293         topology = BuiltIn().get_variable_value("&{topology_info}")
294         self._node = topology["TG"]
295         subtype = check_subtype(self._node)
296
297         if subtype == NodeSubTypeTG.TREX:
298             trex_topology = list()
299             self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
300
301             for l in range(1, parallel_links*2, 2):
302                 tg_if1_adj_addr = topology[f"TG_pf{l+1}_mac"][0]
303                 tg_if2_adj_addr = topology[f"TG_pf{l}_mac"][0]
304                 if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
305                     ifl = BuiltIn().get_variable_value("${int}")
306                     last = topology["duts_count"]
307                     tg_if1_adj_addr = Topology().get_interface_mac(
308                         topology["DUT1"], 
309                         BuiltIn().get_variable_value(
310                             f"${{DUT1_{ifl}{l}}}[0]"
311                         )
312                     )
313                     tg_if2_adj_addr = Topology().get_interface_mac(
314                         topology[f"DUT{last}"], 
315                         BuiltIn().get_variable_value(
316                             f"${{DUT{last}_{ifl}{l+1}}}[0]"
317                         )
318                     )
319
320                 trex_topology.append(
321                     dict(
322                         interface=topology[f"TG_pf{l}"][0],
323                         dst_mac=tg_if1_adj_addr
324                     )
325                 )
326                 trex_topology.append(
327                     dict(
328                         interface=topology[f"TG_pf{l+1}"][0],
329                         dst_mac=tg_if2_adj_addr
330                     )
331                 )
332                 if1_pci = topology[f"TG_pf{l}_pci"][0]
333                 if2_pci = topology[f"TG_pf{l+1}_pci"][0]
334                 if min(if1_pci, if2_pci) != if1_pci:
335                     self._ifaces_reordered = True
336                     trex_topology.reverse()
337
338             TrexInitConfig.init_trex_startup_configuration(
339                 self._node, trex_topology
340             )
341             TrafficGenerator.startup_trex(
342                 self._node, osi_layer, subtype=subtype
343             )
344
345     @staticmethod
346     def startup_trex(tg_node, osi_layer, subtype=None):
347         """Startup sequence for the TRex traffic generator.
348
349         :param tg_node: Traffic generator node.
350         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
351         :param subtype: Traffic generator sub-type.
352         :type tg_node: dict
353         :type osi_layer: str
354         :type subtype: NodeSubTypeTG
355         :raises RuntimeError: If T-Rex startup failed.
356         :raises ValueError: If OSI layer is not supported.
357         """
358         if not subtype:
359             subtype = check_subtype(tg_node)
360         if subtype == NodeSubTypeTG.TREX:
361             for _ in range(0, 3):
362                 # Kill TRex only if it is already running.
363                 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
364                 exec_cmd_no_error(
365                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
366                 )
367
368                 # Prepare interfaces for TRex.
369                 tg_port_drv = Constants.TREX_PORT_DRIVER
370                 mlx_driver = u""
371                 for port in tg_node[u"interfaces"].values():
372                     if u"Mellanox" in port.get(u"model"):
373                         mlx_driver = port.get(u"driver")
374                         pci_addr = port.get(u'pci_address')
375                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
376                         if cur_driver == mlx_driver:
377                             pass
378                         elif not cur_driver:
379                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
380                         else:
381                             DS.pci_driver_unbind(tg_node, pci_addr)
382                             DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
383                     else:
384                         pci_addr = port.get(u'pci_address')
385                         cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
386                         if cur_driver:
387                             DS.pci_driver_unbind(tg_node, pci_addr)
388                         DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
389
390                 # Start TRex.
391                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
392                 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
393                 trex_cmd.add(u"-i")
394                 trex_cmd.add(u"--prefix $(hostname)")
395                 trex_cmd.add(u"--hdrh")
396                 trex_cmd.add(u"--no-scapy-server")
397                 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
398                 # OptionString does not create double space if extra is empty.
399                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
400                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
401                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
402                 try:
403                     exec_cmd_no_error(tg_node, cmd, sudo=True)
404                 except RuntimeError:
405                     cmd = u"sh -c \"cat /tmp/trex.log\""
406                     exec_cmd_no_error(
407                         tg_node, cmd, sudo=True,
408                         message=u"Get TRex logs failed!"
409                     )
410                     raise RuntimeError(u"Start TRex failed!")
411
412                 # Test T-Rex API responsiveness.
413                 cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
414                 if osi_layer in (u"L2", u"L3"):
415                     cmd += u"trex_stl_assert.py"
416                 elif osi_layer == u"L7":
417                     cmd += u"trex_astf_assert.py"
418                 else:
419                     raise ValueError(u"Unknown OSI layer!")
420                 try:
421                     exec_cmd_no_error(
422                         tg_node, cmd, sudo=True,
423                         message=u"T-Rex API is not responding!", retries=20
424                     )
425                 except RuntimeError:
426                     continue
427                 return
428             # After max retries TRex is still not responding to API critical
429             # error occurred.
430             exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
431             raise RuntimeError(u"Start T-Rex failed after multiple retries!")
432
433     @staticmethod
434     def is_trex_running(node):
435         """Check if T-Rex is running using pidof.
436
437         :param node: Traffic generator node.
438         :type node: dict
439         :returns: True if T-Rex is running otherwise False.
440         :rtype: bool
441         """
442         ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
443         return bool(int(ret) == 0)
444
445     @staticmethod
446     def teardown_traffic_generator(node):
447         """TG teardown.
448
449         :param node: Traffic generator node.
450         :type node: dict
451         :returns: nothing
452         :raises RuntimeError: If node type is not a TG,
453             or if T-Rex teardown fails.
454         """
455         subtype = check_subtype(node)
456         if subtype == NodeSubTypeTG.TREX:
457             exec_cmd_no_error(
458                 node,
459                 u"sh -c "
460                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
461                 sudo=False,
462                 message=u"T-Rex kill failed!"
463             )
464
465     def trex_astf_stop_remote_exec(self, node):
466         """Execute T-Rex ASTF script on remote node over ssh to stop running
467         traffic.
468
469         Internal state is updated with measurement results.
470
471         :param node: T-Rex generator node.
472         :type node: dict
473         :raises RuntimeError: If stop traffic script fails.
474         """
475         command_line = OptionString().add(u"python3")
476         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
477         command_line.add(f"'{dirname}/trex_astf_stop.py'")
478         command_line.change_prefix(u"--")
479         for index, value in enumerate(self._xstats):
480             if value is not None:
481                 value = value.replace(u"'", u"\"")
482                 command_line.add_equals(f"xstat{index}", f"'{value}'")
483         stdout, _ = exec_cmd_no_error(
484             node, command_line,
485             message=u"T-Rex ASTF runtime error!"
486         )
487         self._parse_traffic_results(stdout)
488
489     def trex_stl_stop_remote_exec(self, node):
490         """Execute T-Rex STL script on remote node over ssh to stop running
491         traffic.
492
493         Internal state is updated with measurement results.
494
495         :param node: T-Rex generator node.
496         :type node: dict
497         :raises RuntimeError: If stop traffic script fails.
498         """
499         command_line = OptionString().add(u"python3")
500         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
501         command_line.add(f"'{dirname}/trex_stl_stop.py'")
502         command_line.change_prefix(u"--")
503         for index, value in enumerate(self._xstats):
504             if value is not None:
505                 value = value.replace(u"'", u"\"")
506                 command_line.add_equals(f"xstat{index}", f"'{value}'")
507         stdout, _ = exec_cmd_no_error(
508             node, command_line,
509             message=u"T-Rex STL runtime error!"
510         )
511         self._parse_traffic_results(stdout)
512
513     def stop_traffic_on_tg(self):
514         """Stop all traffic on TG.
515
516         :returns: Structure containing the result of the measurement.
517         :rtype: ReceiveRateMeasurement
518         :raises ValueError: If TG traffic profile is not supported.
519         """
520         subtype = check_subtype(self._node)
521         if subtype != NodeSubTypeTG.TREX:
522             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
523         if u"trex-astf" in self.traffic_profile:
524             self.trex_astf_stop_remote_exec(self._node)
525         elif u"trex-stl" in self.traffic_profile:
526             self.trex_stl_stop_remote_exec(self._node)
527         else:
528             raise ValueError(u"Unsupported T-Rex traffic profile!")
529         self._stop_time = time.monotonic()
530
531         return self._get_measurement_result()
532
533     def _compute_duration(self, duration, multiplier):
534         """Compute duration for profile driver.
535
536         The final result is influenced by transaction scale and duration limit.
537         It is assumed a higher level function has already set those to self.
538         The duration argument is the target value from search point of view,
539         before the overrides are applied here.
540
541         Minus one (signalling async traffic start) is kept.
542
543         Completeness flag is also included. Duration limited or async trials
544         are not considered complete for ramp-up purposes.
545
546         :param duration: Time expressed in seconds for how long to send traffic.
547         :param multiplier: Traffic rate in transactions per second.
548         :type duration: float
549         :type multiplier: float
550         :returns: New duration and whether it was a complete ramp-up candidate.
551         :rtype: float, bool
552         """
553         if duration < 0.0:
554             # Keep the async -1.
555             return duration, False
556         computed_duration = duration
557         if self.transaction_scale:
558             computed_duration = self.transaction_scale / multiplier
559             # Log the computed duration,
560             # so we can compare with what telemetry suggests
561             # the real duration was.
562             logger.debug(f"Expected duration {computed_duration}")
563         if not self.duration_limit:
564             return computed_duration, True
565         limited_duration = min(computed_duration, self.duration_limit)
566         return limited_duration, (limited_duration == computed_duration)
567
568     def trex_astf_start_remote_exec(
569             self, duration, multiplier, async_call=False):
570         """Execute T-Rex ASTF script on remote node over ssh to start running
571         traffic.
572
573         In sync mode, measurement results are stored internally.
574         In async mode, initial data including xstats are stored internally.
575
576         This method contains the logic to compute duration as maximum time
577         if transaction_scale is nonzero.
578         The transaction_scale argument defines (limits) how many transactions
579         will be started in total. As that amount of transaction can take
580         considerable time (sometimes due to explicit delays in the profile),
581         the real time a trial needs to finish is computed here. For now,
582         in that case the duration argument is ignored, assuming it comes
583         from ASTF-unaware search algorithm. The overall time a single
584         transaction needs is given in parameter transaction_duration,
585         it includes both explicit delays and implicit time it takes
586         to transfer data (or whatever the transaction does).
587
588         Currently it is observed TRex does not start the ASTF traffic
589         immediately, an ad-hoc constant is added to the computed duration
590         to compensate for that.
591
592         If transaction_scale is zero, duration is not recomputed.
593         It is assumed the subsequent result parsing gets the real duration
594         if the traffic stops sooner for any reason.
595
596         Currently, it is assumed traffic profile defines a single transaction.
597         To avoid heavy logic here, the input rate is expected to be in
598         transactions per second, as that directly translates to TRex multiplier,
599         (assuming the profile does not override the default cps value of one).
600
601         :param duration: Time expressed in seconds for how long to send traffic.
602         :param multiplier: Traffic rate in transactions per second.
603         :param async_call: If enabled then don't wait for all incoming traffic.
604         :type duration: float
605         :type multiplier: int
606         :type async_call: bool
607         :raises RuntimeError: In case of T-Rex driver issue.
608         """
609         self.check_mode(TrexMode.ASTF)
610         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
611         if not isinstance(duration, (float, int)):
612             duration = float(duration)
613
614         computed_duration, _ = self._compute_duration(duration, multiplier)
615
616         command_line = OptionString().add(u"python3")
617         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
618         command_line.add(f"'{dirname}/trex_astf_profile.py'")
619         command_line.change_prefix(u"--")
620         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
621         command_line.add_with_value(
622             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
623         )
624         command_line.add_with_value(u"duration", f"{computed_duration!r}")
625         command_line.add_with_value(u"frame_size", self.frame_size)
626         command_line.add_with_value(
627             u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
628         )
629         command_line.add_with_value(u"multiplier", multiplier)
630         command_line.add_with_value(u"port_0", p_0)
631         command_line.add_with_value(u"port_1", p_1)
632         command_line.add_with_value(
633             u"traffic_directions", self.traffic_directions
634         )
635         command_line.add_if(u"async_start", async_call)
636         command_line.add_if(u"latency", self.use_latency)
637         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
638         command_line.add_with_value(
639             u"delay", Constants.PERF_TRIAL_ASTF_DELAY
640         )
641
642         self._start_time = time.monotonic()
643         self._rate = multiplier
644         stdout, _ = exec_cmd_no_error(
645             self._node, command_line, timeout=computed_duration + 10.0,
646             message=u"T-Rex ASTF runtime error!"
647         )
648
649         if async_call:
650             # no result
651             self._target_duration = None
652             self._duration = None
653             self._received = None
654             self._sent = None
655             self._loss = None
656             self._latency = None
657             xstats = [None, None]
658             self._l7_data = dict()
659             self._l7_data[u"client"] = dict()
660             self._l7_data[u"client"][u"active_flows"] = None
661             self._l7_data[u"client"][u"established_flows"] = None
662             self._l7_data[u"client"][u"traffic_duration"] = None
663             self._l7_data[u"server"] = dict()
664             self._l7_data[u"server"][u"active_flows"] = None
665             self._l7_data[u"server"][u"established_flows"] = None
666             self._l7_data[u"server"][u"traffic_duration"] = None
667             if u"udp" in self.traffic_profile:
668                 self._l7_data[u"client"][u"udp"] = dict()
669                 self._l7_data[u"client"][u"udp"][u"connects"] = None
670                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
671                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
672                 self._l7_data[u"server"][u"udp"] = dict()
673                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
674                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
675             elif u"tcp" in self.traffic_profile:
676                 self._l7_data[u"client"][u"tcp"] = dict()
677                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
678                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
679                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
680                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
681                 self._l7_data[u"server"][u"tcp"] = dict()
682                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
683                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
684                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
685             else:
686                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
687             index = 0
688             for line in stdout.splitlines():
689                 if f"Xstats snapshot {index}: " in line:
690                     xstats[index] = line[19:]
691                     index += 1
692                 if index == 2:
693                     break
694             self._xstats = tuple(xstats)
695         else:
696             self._target_duration = duration
697             self._duration = computed_duration
698             self._parse_traffic_results(stdout)
699
700     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
701         """Execute T-Rex STL script on remote node over ssh to start running
702         traffic.
703
704         In sync mode, measurement results are stored internally.
705         In async mode, initial data including xstats are stored internally.
706
707         Mode-unaware code (e.g. in search algorithms) works with transactions.
708         To keep the logic simple, multiplier is set to that value.
709         As bidirectional traffic profiles send packets in both directions,
710         they are treated as transactions with two packets (one per direction).
711
712         :param duration: Time expressed in seconds for how long to send traffic.
713         :param rate: Traffic rate in transactions per second.
714         :param async_call: If enabled then don't wait for all incoming traffic.
715         :type duration: float
716         :type rate: str
717         :type async_call: bool
718         :raises RuntimeError: In case of T-Rex driver issue.
719         """
720         self.check_mode(TrexMode.STL)
721         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
722         if not isinstance(duration, (float, int)):
723             duration = float(duration)
724
725         duration, _ = self._compute_duration(duration=duration, multiplier=rate)
726
727         command_line = OptionString().add(u"python3")
728         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
729         command_line.add(f"'{dirname}/trex_stl_profile.py'")
730         command_line.change_prefix(u"--")
731         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
732         command_line.add_with_value(
733             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
734         )
735         command_line.add_with_value(u"duration", f"{duration!r}")
736         command_line.add_with_value(u"frame_size", self.frame_size)
737         command_line.add_with_value(u"rate", f"{rate!r}")
738         command_line.add_with_value(u"port_0", p_0)
739         command_line.add_with_value(u"port_1", p_1)
740         command_line.add_with_value(
741             u"traffic_directions", self.traffic_directions
742         )
743         command_line.add_if(u"async_start", async_call)
744         command_line.add_if(u"latency", self.use_latency)
745         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
746         command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
747
748         self._start_time = time.monotonic()
749         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
750         stdout, _ = exec_cmd_no_error(
751             self._node, command_line, timeout=int(duration) + 60,
752             message=u"T-Rex STL runtime error"
753         )
754
755         if async_call:
756             # no result
757             self._target_duration = None
758             self._duration = None
759             self._received = None
760             self._sent = None
761             self._loss = None
762             self._latency = None
763
764             xstats = [None, None]
765             index = 0
766             for line in stdout.splitlines():
767                 if f"Xstats snapshot {index}: " in line:
768                     xstats[index] = line[19:]
769                     index += 1
770                 if index == 2:
771                     break
772             self._xstats = tuple(xstats)
773         else:
774             self._target_duration = duration
775             self._duration = duration
776             self._parse_traffic_results(stdout)
777
778     def send_traffic_on_tg(
779             self,
780             duration,
781             rate,
782             frame_size,
783             traffic_profile,
784             async_call=False,
785             ppta=1,
786             traffic_directions=2,
787             transaction_duration=0.0,
788             transaction_scale=0,
789             transaction_type=u"packet",
790             duration_limit=0.0,
791             use_latency=False,
792             ramp_up_rate=None,
793             ramp_up_duration=None,
794             state_timeout=240.0,
795             ramp_up_only=False,
796         ):
797         """Send traffic from all configured interfaces on TG.
798
799         In async mode, xstats is stored internally,
800         to enable getting correct result when stopping the traffic.
801         In both modes, stdout is returned,
802         but _parse_traffic_results only works in sync output.
803
804         Note that traffic generator uses DPDK driver which might
805         reorder port numbers based on wiring and PCI numbering.
806         This method handles that, so argument values are invariant,
807         but you can see swapped valued in debug logs.
808
809         When transaction_scale is specified, the duration value is ignored
810         and the needed time is computed. For cases where this results in
811         to too long measurement (e.g. teardown trial with small rate),
812         duration_limit is applied (of non-zero), so the trial is stopped sooner.
813
814         Bidirectional STL profiles are treated as transactions with two packets.
815
816         The return value is None for async.
817
818         :param duration: Duration of test traffic generation in seconds.
819         :param rate: Traffic rate in transactions per second.
820         :param frame_size: Frame size (L2) in Bytes.
821         :param traffic_profile: Module name as a traffic profile identifier.
822             See GPL/traffic_profiles/trex for implemented modules.
823         :param async_call: Async mode.
824         :param ppta: Packets per transaction, aggregated over directions.
825             Needed for udp_pps which does not have a good transaction counter,
826             so we need to compute expected number of packets.
827             Default: 1.
828         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
829             Default: 2
830         :param transaction_duration: Total expected time to close transaction.
831         :param transaction_scale: Number of transactions to perform.
832             0 (default) means unlimited.
833         :param transaction_type: An identifier specifying which counters
834             and formulas to use when computing attempted and failed
835             transactions. Default: "packet".
836         :param duration_limit: Zero or maximum limit for computed (or given)
837             duration.
838         :param use_latency: Whether to measure latency during the trial.
839             Default: False.
840         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
841         :param ramp_up_duration: Duration of ramp-up trials [s].
842         :param state_timeout: Time of life of DUT state [s].
843         :param ramp_up_only: If true, do not perform main trial measurement.
844         :type duration: float
845         :type rate: float
846         :type frame_size: str
847         :type traffic_profile: str
848         :type async_call: bool
849         :type ppta: int
850         :type traffic_directions: int
851         :type transaction_duration: float
852         :type transaction_scale: int
853         :type transaction_type: str
854         :type duration_limit: float
855         :type use_latency: bool
856         :type ramp_up_rate: float
857         :type ramp_up_duration: float
858         :type state_timeout: float
859         :type ramp_up_only: bool
860         :returns: TG results.
861         :rtype: ReceiveRateMeasurement or None
862         :raises ValueError: If TG traffic profile is not supported.
863         """
864         self.set_rate_provider_defaults(
865             frame_size=frame_size,
866             traffic_profile=traffic_profile,
867             ppta=ppta,
868             traffic_directions=traffic_directions,
869             transaction_duration=transaction_duration,
870             transaction_scale=transaction_scale,
871             transaction_type=transaction_type,
872             duration_limit=duration_limit,
873             use_latency=use_latency,
874             ramp_up_rate=ramp_up_rate,
875             ramp_up_duration=ramp_up_duration,
876             state_timeout=state_timeout,
877         )
878         return self._send_traffic_on_tg_with_ramp_up(
879             duration=duration,
880             rate=rate,
881             async_call=async_call,
882             ramp_up_only=ramp_up_only,
883         )
884
885     def _send_traffic_on_tg_internal(
886             self, duration, rate, async_call=False):
887         """Send traffic from all configured interfaces on TG.
888
889         This is an internal function, it assumes set_rate_provider_defaults
890         has been called to remember most values.
891         The reason why need to remember various values is that
892         the traffic can be asynchronous, and parsing needs those values.
893         The reason why this is is a separate function from the one
894         which calls set_rate_provider_defaults is that some search algorithms
895         need to specify their own values, and we do not want the measure call
896         to overwrite them with defaults.
897
898         This function is used both for automated ramp-up trials
899         and for explicitly called trials.
900
901         :param duration: Duration of test traffic generation in seconds.
902         :param rate: Traffic rate in transactions per second.
903         :param async_call: Async mode.
904         :type duration: float
905         :type rate: float
906         :type async_call: bool
907         :returns: TG results.
908         :rtype: ReceiveRateMeasurement or None
909         :raises ValueError: If TG traffic profile is not supported.
910         """
911         subtype = check_subtype(self._node)
912         if subtype == NodeSubTypeTG.TREX:
913             if u"trex-astf" in self.traffic_profile:
914                 self.trex_astf_start_remote_exec(
915                     duration, float(rate), async_call
916                 )
917             elif u"trex-stl" in self.traffic_profile:
918                 unit_rate_str = str(rate) + u"pps"
919                 self.trex_stl_start_remote_exec(
920                     duration, unit_rate_str, async_call
921                 )
922             else:
923                 raise ValueError(u"Unsupported T-Rex traffic profile!")
924
925         return None if async_call else self._get_measurement_result()
926
927     def _send_traffic_on_tg_with_ramp_up(
928             self, duration, rate, async_call=False, ramp_up_only=False):
929         """Send traffic from all interfaces on TG, maybe after ramp-up.
930
931         This is an internal function, it assumes set_rate_provider_defaults
932         has been called to remember most values.
933         The reason why need to remember various values is that
934         the traffic can be asynchronous, and parsing needs those values.
935         The reason why this is a separate function from the one
936         which calls set_rate_provider_defaults is that some search algorithms
937         need to specify their own values, and we do not want the measure call
938         to overwrite them with defaults.
939
940         If ramp-up tracking is detected, a computation is performed,
941         and if state timeout is near, trial at ramp-up rate and duration
942         is inserted before the main trial measurement.
943
944         The ramp_up_only parameter forces a ramp-up without immediate
945         trial measurement, which is useful in case self remembers
946         a previous ramp-up trial that belongs to a different test (phase).
947
948         Return None if trial is async or ramp-up only.
949
950         :param duration: Duration of test traffic generation in seconds.
951         :param rate: Traffic rate in transactions per second.
952         :param async_call: Async mode.
953         :param ramp_up_only: If true, do not perform main trial measurement.
954         :type duration: float
955         :type rate: float
956         :type async_call: bool
957         :type ramp_up_only: bool
958         :returns: TG results.
959         :rtype: ReceiveRateMeasurement or None
960         :raises ValueError: If TG traffic profile is not supported.
961         """
962         complete = False
963         if self.ramp_up_rate:
964             # Figure out whether we need to insert a ramp-up trial.
965             if ramp_up_only or self.ramp_up_start is None:
966                 # We never ramped up yet (at least not in this test case).
967                 ramp_up_needed = True
968             else:
969                 # We ramped up before, but maybe it was too long ago.
970                 # Adding a constant overhead to be safe.
971                 time_now = time.monotonic() + 1.0
972                 computed_duration, complete = self._compute_duration(
973                     duration=duration,
974                     multiplier=rate,
975                 )
976                 # There are two conditions for inserting ramp-up.
977                 # If early sessions are expiring already,
978                 # or if late sessions are to expire before measurement is over.
979                 ramp_up_start_delay = time_now - self.ramp_up_start
980                 ramp_up_stop_delay = time_now - self.ramp_up_stop
981                 ramp_up_stop_delay += computed_duration
982                 bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
983                 # Final boolean decision.
984                 ramp_up_needed = (bigger_delay >= self.state_timeout)
985             if ramp_up_needed:
986                 logger.debug(
987                     u"State may time out during next real trial, "
988                     u"inserting a ramp-up trial."
989                 )
990                 self.ramp_up_start = time.monotonic()
991                 self._send_traffic_on_tg_internal(
992                     duration=self.ramp_up_duration,
993                     rate=self.ramp_up_rate,
994                     async_call=async_call,
995                 )
996                 self.ramp_up_stop = time.monotonic()
997                 logger.debug(u"Ramp-up done.")
998             else:
999                 logger.debug(
1000                     u"State will probably not time out during next real trial, "
1001                     u"no ramp-up trial needed just yet."
1002                 )
1003         if ramp_up_only:
1004             return None
1005         trial_start = time.monotonic()
1006         result = self._send_traffic_on_tg_internal(
1007             duration=duration,
1008             rate=rate,
1009             async_call=async_call,
1010         )
1011         trial_end = time.monotonic()
1012         if self.ramp_up_rate:
1013             # Optimization: No loss acts as a good ramp-up, if it was complete.
1014             if complete and result is not None and result.loss_count == 0:
1015                 logger.debug(u"Good trial acts as a ramp-up")
1016                 self.ramp_up_start = trial_start
1017                 self.ramp_up_stop = trial_end
1018             else:
1019                 logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
1020         return result
1021
1022     def no_traffic_loss_occurred(self):
1023         """Fail if loss occurred in traffic run.
1024
1025         :returns: nothing
1026         :raises Exception: If loss occured.
1027         """
1028         if self._loss is None:
1029             raise RuntimeError(u"The traffic generation has not been issued")
1030         if self._loss != u"0":
1031             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
1032
1033     def fail_if_no_traffic_forwarded(self):
1034         """Fail if no traffic forwarded.
1035
1036         :returns: nothing
1037         :raises Exception: If no traffic forwarded.
1038         """
1039         if self._received is None:
1040             raise RuntimeError(u"The traffic generation has not been issued")
1041         if self._received == 0:
1042             raise RuntimeError(u"No traffic forwarded")
1043
1044     def partial_traffic_loss_accepted(
1045             self, loss_acceptance, loss_acceptance_type):
1046         """Fail if loss is higher then accepted in traffic run.
1047
1048         :param loss_acceptance: Permitted drop ratio or frames count.
1049         :param loss_acceptance_type: Type of permitted loss.
1050         :type loss_acceptance: float
1051         :type loss_acceptance_type: LossAcceptanceType
1052         :returns: nothing
1053         :raises Exception: If loss is above acceptance criteria.
1054         """
1055         if self._loss is None:
1056             raise Exception(u"The traffic generation has not been issued")
1057
1058         if loss_acceptance_type == u"percentage":
1059             loss = (float(self._loss) / float(self._sent)) * 100
1060         elif loss_acceptance_type == u"frames":
1061             loss = float(self._loss)
1062         else:
1063             raise Exception(u"Loss acceptance type not supported")
1064
1065         if loss > float(loss_acceptance):
1066             raise Exception(
1067                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
1068             )
1069
1070     def _parse_traffic_results(self, stdout):
1071         """Parse stdout of scripts into fields of self.
1072
1073         Block of code to reuse, by sync start, or stop after async.
1074
1075         :param stdout: Text containing the standard output.
1076         :type stdout: str
1077         """
1078         subtype = check_subtype(self._node)
1079         if subtype == NodeSubTypeTG.TREX:
1080             # Last line from console output
1081             line = stdout.splitlines()[-1]
1082             results = line.split(u";")
1083             if results[-1] in (u" ", u""):
1084                 results.pop(-1)
1085             self._result = dict()
1086             for result in results:
1087                 key, value = result.split(u"=", maxsplit=1)
1088                 self._result[key.strip()] = value
1089             logger.info(f"TrafficGen results:\n{self._result}")
1090             self._received = int(self._result.get(u"total_received"), 0)
1091             self._sent = int(self._result.get(u"total_sent", 0))
1092             self._loss = int(self._result.get(u"frame_loss", 0))
1093             self._approximated_duration = \
1094                 self._result.get(u"approximated_duration", 0.0)
1095             if u"manual" not in str(self._approximated_duration):
1096                 self._approximated_duration = float(self._approximated_duration)
1097             self._latency = list()
1098             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
1099             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
1100             if self._mode == TrexMode.ASTF:
1101                 self._l7_data = dict()
1102                 self._l7_data[u"client"] = dict()
1103                 self._l7_data[u"client"][u"sent"] = \
1104                     int(self._result.get(u"client_sent", 0))
1105                 self._l7_data[u"client"][u"received"] = \
1106                     int(self._result.get(u"client_received", 0))
1107                 self._l7_data[u"client"][u"active_flows"] = \
1108                     int(self._result.get(u"client_active_flows", 0))
1109                 self._l7_data[u"client"][u"established_flows"] = \
1110                     int(self._result.get(u"client_established_flows", 0))
1111                 self._l7_data[u"client"][u"traffic_duration"] = \
1112                     float(self._result.get(u"client_traffic_duration", 0.0))
1113                 self._l7_data[u"client"][u"err_rx_throttled"] = \
1114                     int(self._result.get(u"client_err_rx_throttled", 0))
1115                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
1116                     int(self._result.get(u"client_err_nf_throttled", 0))
1117                 self._l7_data[u"client"][u"err_flow_overflow"] = \
1118                     int(self._result.get(u"client_err_flow_overflow", 0))
1119                 self._l7_data[u"server"] = dict()
1120                 self._l7_data[u"server"][u"active_flows"] = \
1121                     int(self._result.get(u"server_active_flows", 0))
1122                 self._l7_data[u"server"][u"established_flows"] = \
1123                     int(self._result.get(u"server_established_flows", 0))
1124                 self._l7_data[u"server"][u"traffic_duration"] = \
1125                     float(self._result.get(u"server_traffic_duration", 0.0))
1126                 self._l7_data[u"server"][u"err_rx_throttled"] = \
1127                     int(self._result.get(u"client_err_rx_throttled", 0))
1128                 if u"udp" in self.traffic_profile:
1129                     self._l7_data[u"client"][u"udp"] = dict()
1130                     self._l7_data[u"client"][u"udp"][u"connects"] = \
1131                         int(self._result.get(u"client_udp_connects", 0))
1132                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
1133                         int(self._result.get(u"client_udp_closed", 0))
1134                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
1135                         int(self._result.get(u"client_udp_tx_bytes", 0))
1136                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
1137                         int(self._result.get(u"client_udp_rx_bytes", 0))
1138                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
1139                         int(self._result.get(u"client_udp_tx_packets", 0))
1140                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
1141                         int(self._result.get(u"client_udp_rx_packets", 0))
1142                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
1143                         int(self._result.get(u"client_udp_keep_drops", 0))
1144                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
1145                         int(self._result.get(u"client_err_cwf", 0))
1146                     self._l7_data[u"server"][u"udp"] = dict()
1147                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
1148                         int(self._result.get(u"server_udp_accepts", 0))
1149                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
1150                         int(self._result.get(u"server_udp_closed", 0))
1151                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1152                         int(self._result.get(u"server_udp_tx_bytes", 0))
1153                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1154                         int(self._result.get(u"server_udp_rx_bytes", 0))
1155                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1156                         int(self._result.get(u"server_udp_tx_packets", 0))
1157                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1158                         int(self._result.get(u"server_udp_rx_packets", 0))
1159                 elif u"tcp" in self.traffic_profile:
1160                     self._l7_data[u"client"][u"tcp"] = dict()
1161                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1162                         int(self._result.get(u"client_tcp_connect_inits", 0))
1163                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1164                         int(self._result.get(u"client_tcp_connects", 0))
1165                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1166                         int(self._result.get(u"client_tcp_closed", 0))
1167                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1168                         int(self._result.get(u"client_tcp_connattempt", 0))
1169                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1170                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1171                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1172                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1173                     self._l7_data[u"server"][u"tcp"] = dict()
1174                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1175                         int(self._result.get(u"server_tcp_accepts", 0))
1176                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1177                         int(self._result.get(u"server_tcp_connects", 0))
1178                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1179                         int(self._result.get(u"server_tcp_closed", 0))
1180                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1181                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1182                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1183                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1184
1185     def _get_measurement_result(self):
1186         """Return the result of last measurement as ReceiveRateMeasurement.
1187
1188         Separate function, as measurements can end either by time
1189         or by explicit call, this is the common block at the end.
1190
1191         The target_tr field of ReceiveRateMeasurement is in
1192         transactions per second. Transmit count and loss count units
1193         depend on the transaction type. Usually they are in transactions
1194         per second, or aggregated packets per second.
1195
1196         :returns: Structure containing the result of the measurement.
1197         :rtype: ReceiveRateMeasurement
1198         """
1199         try:
1200             # Client duration seems to include a setup period
1201             # where TRex does not send any packets yet.
1202             # Server duration does not include it.
1203             server_data = self._l7_data[u"server"]
1204             approximated_duration = float(server_data[u"traffic_duration"])
1205         except (KeyError, AttributeError, ValueError, TypeError):
1206             approximated_duration = None
1207         try:
1208             if not approximated_duration:
1209                 approximated_duration = float(self._approximated_duration)
1210         except ValueError:  # "manual"
1211             approximated_duration = None
1212         if not approximated_duration:
1213             if self._duration and self._duration > 0:
1214                 # Known recomputed or target duration.
1215                 approximated_duration = self._duration
1216             else:
1217                 # It was an explicit stop.
1218                 if not self._stop_time:
1219                     raise RuntimeError(u"Unable to determine duration.")
1220                 approximated_duration = self._stop_time - self._start_time
1221         target_duration = self._target_duration
1222         if not target_duration:
1223             target_duration = approximated_duration
1224         transmit_rate = self._rate
1225         unsent = 0
1226         if self.transaction_type == u"packet":
1227             partial_attempt_count = self._sent
1228             packet_rate = transmit_rate * self.ppta
1229             # We have a float. TRex way of rounding it is not obvious.
1230             # The biggest source of mismatch is Inter Stream Gap.
1231             # So the code tolerates 10 usec of missing packets.
1232             expected_attempt_count = (target_duration - 1e-5) * packet_rate
1233             expected_attempt_count = math.ceil(expected_attempt_count)
1234             # TRex can send more.
1235             expected_attempt_count = max(expected_attempt_count, self._sent)
1236             unsent = expected_attempt_count - self._sent
1237             pass_count = self._received
1238             fail_count = expected_attempt_count - pass_count
1239         elif self.transaction_type == u"udp_cps":
1240             if not self.transaction_scale:
1241                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1242             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1243             # We do not care whether TG is slow, it should have attempted all.
1244             expected_attempt_count = self.transaction_scale
1245             unsent = expected_attempt_count - partial_attempt_count
1246             pass_count = self._l7_data[u"client"][u"received"]
1247             fail_count = expected_attempt_count - pass_count
1248         elif self.transaction_type == u"tcp_cps":
1249             if not self.transaction_scale:
1250                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1251             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1252             partial_attempt_count = ctca
1253             # We do not care whether TG is slow, it should have attempted all.
1254             expected_attempt_count = self.transaction_scale
1255             unsent = expected_attempt_count - partial_attempt_count
1256             # From TCP point of view, server/connects counts full connections,
1257             # but we are testing NAT session so client/connects counts that
1258             # (half connections from TCP point of view).
1259             pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
1260             fail_count = expected_attempt_count - pass_count
1261         elif self.transaction_type == u"udp_pps":
1262             if not self.transaction_scale:
1263                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1264             partial_attempt_count = self._sent
1265             expected_attempt_count = self.transaction_scale * self.ppta
1266             unsent = expected_attempt_count - self._sent
1267             fail_count = self._loss + unsent
1268         elif self.transaction_type == u"tcp_pps":
1269             if not self.transaction_scale:
1270                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1271             partial_attempt_count = self._sent
1272             expected_attempt_count = self.transaction_scale * self.ppta
1273             # One loss-like scenario happens when TRex receives all packets
1274             # on L2 level, but is not fast enough to process them all
1275             # at L7 level, which leads to retransmissions.
1276             # Those manifest as opackets larger than expected.
1277             # A simple workaround is to add absolute difference.
1278             # Probability of retransmissions exactly cancelling
1279             # packets unsent due to duration stretching is quite low.
1280             unsent = abs(expected_attempt_count - self._sent)
1281             fail_count = self._loss + unsent
1282         else:
1283             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1284         if unsent and isinstance(self._approximated_duration, float):
1285             # Do not report unsent for "manual".
1286             logger.debug(f"Unsent packets/transactions: {unsent}")
1287         if fail_count < 0 and not self.negative_loss:
1288             fail_count = 0
1289         measurement = ReceiveRateMeasurement(
1290             duration=target_duration,
1291             target_tr=transmit_rate,
1292             transmit_count=expected_attempt_count,
1293             loss_count=fail_count,
1294             approximated_duration=approximated_duration,
1295             partial_transmit_count=partial_attempt_count,
1296         )
1297         measurement.latency = self.get_latency_int()
1298         return measurement
1299
1300     def measure(self, duration, transmit_rate):
1301         """Run trial measurement, parse and return results.
1302
1303         The input rate is for transactions. Stateles bidirectional traffic
1304         is understood as sequence of (asynchronous) transactions,
1305         two packets each.
1306
1307         The result units depend on test type, generally
1308         the count either transactions or packets (aggregated over directions).
1309
1310         Optionally, this method sleeps if measurement finished before
1311         the time specified as duration.
1312
1313         :param duration: Trial duration [s].
1314         :param transmit_rate: Target rate in transactions per second.
1315         :type duration: float
1316         :type transmit_rate: float
1317         :returns: Structure containing the result of the measurement.
1318         :rtype: ReceiveRateMeasurement
1319         :raises RuntimeError: If TG is not set or if node is not TG
1320             or if subtype is not specified.
1321         :raises NotImplementedError: If TG is not supported.
1322         """
1323         duration = float(duration)
1324         time_start = time.monotonic()
1325         time_stop = time_start + duration
1326         if self.resetter:
1327             self.resetter()
1328         result = self._send_traffic_on_tg_with_ramp_up(
1329             duration=duration,
1330             rate=transmit_rate,
1331             async_call=False,
1332         )
1333         logger.debug(f"trial measurement result: {result!r}")
1334         # In PLRsearch, computation needs the specified time to complete.
1335         if self.sleep_till_duration:
1336             sleeptime = time_stop - time.monotonic()
1337             if sleeptime > 0.0:
1338                 time.sleep(sleeptime)
1339         return result
1340
1341     def set_rate_provider_defaults(
1342             self,
1343             frame_size,
1344             traffic_profile,
1345             ppta=1,
1346             resetter=None,
1347             traffic_directions=2,
1348             transaction_duration=0.0,
1349             transaction_scale=0,
1350             transaction_type=u"packet",
1351             duration_limit=0.0,
1352             negative_loss=True,
1353             sleep_till_duration=False,
1354             use_latency=False,
1355             ramp_up_rate=None,
1356             ramp_up_duration=None,
1357             state_timeout=240.0,
1358         ):
1359         """Store values accessed by measure().
1360
1361         :param frame_size: Frame size identifier or value [B].
1362         :param traffic_profile: Module name as a traffic profile identifier.
1363             See GPL/traffic_profiles/trex for implemented modules.
1364         :param ppta: Packets per transaction, aggregated over directions.
1365             Needed for udp_pps which does not have a good transaction counter,
1366             so we need to compute expected number of packets.
1367             Default: 1.
1368         :param resetter: Callable to reset DUT state for repeated trials.
1369         :param traffic_directions: Traffic from packet counting point of view
1370             is bi- (2) or uni- (1) directional.
1371             Default: 2
1372         :param transaction_duration: Total expected time to close transaction.
1373         :param transaction_scale: Number of transactions to perform.
1374             0 (default) means unlimited.
1375         :param transaction_type: An identifier specifying which counters
1376             and formulas to use when computing attempted and failed
1377             transactions. Default: "packet".
1378         :param duration_limit: Zero or maximum limit for computed (or given)
1379             duration.
1380         :param negative_loss: If false, negative loss is reported as zero loss.
1381         :param sleep_till_duration: If true and measurement returned faster,
1382             sleep until it matches duration. Needed for PLRsearch.
1383         :param use_latency: Whether to measure latency during the trial.
1384             Default: False.
1385         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1386         :param ramp_up_duration: Duration of ramp-up trials [s].
1387         :param state_timeout: Time of life of DUT state [s].
1388         :type frame_size: str or int
1389         :type traffic_profile: str
1390         :type ppta: int
1391         :type resetter: Optional[Callable[[], None]]
1392         :type traffic_directions: int
1393         :type transaction_duration: float
1394         :type transaction_scale: int
1395         :type transaction_type: str
1396         :type duration_limit: float
1397         :type negative_loss: bool
1398         :type sleep_till_duration: bool
1399         :type use_latency: bool
1400         :type ramp_up_rate: float
1401         :type ramp_up_duration: float
1402         :type state_timeout: float
1403         """
1404         self.frame_size = frame_size
1405         self.traffic_profile = str(traffic_profile)
1406         self.resetter = resetter
1407         self.ppta = ppta
1408         self.traffic_directions = int(traffic_directions)
1409         self.transaction_duration = float(transaction_duration)
1410         self.transaction_scale = int(transaction_scale)
1411         self.transaction_type = str(transaction_type)
1412         self.duration_limit = float(duration_limit)
1413         self.negative_loss = bool(negative_loss)
1414         self.sleep_till_duration = bool(sleep_till_duration)
1415         self.use_latency = bool(use_latency)
1416         self.ramp_up_rate = float(ramp_up_rate)
1417         self.ramp_up_duration = float(ramp_up_duration)
1418         self.state_timeout = float(state_timeout)
1419
1420
1421 class OptimizedSearch:
1422     """Class to be imported as Robot Library, containing search keywords.
1423
1424     Aside of setting up measurer and forwarding arguments,
1425     the main business is to translate min/max rate from unidir to aggregated.
1426     """
1427
1428     @staticmethod
1429     def perform_optimized_ndrpdr_search(
1430             frame_size,
1431             traffic_profile,
1432             minimum_transmit_rate,
1433             maximum_transmit_rate,
1434             packet_loss_ratio=0.005,
1435             final_relative_width=0.005,
1436             final_trial_duration=30.0,
1437             initial_trial_duration=1.0,
1438             number_of_intermediate_phases=2,
1439             timeout=1200.0,
1440             ppta=1,
1441             resetter=None,
1442             traffic_directions=2,
1443             transaction_duration=0.0,
1444             transaction_scale=0,
1445             transaction_type=u"packet",
1446             use_latency=False,
1447             ramp_up_rate=None,
1448             ramp_up_duration=None,
1449             state_timeout=240.0,
1450             expansion_coefficient=4.0,
1451     ):
1452         """Setup initialized TG, perform optimized search, return intervals.
1453
1454         If transaction_scale is nonzero, all init and non-init trial durations
1455         are set to 1.0 (as they do not affect the real trial duration)
1456         and zero intermediate phases are used.
1457         This way no re-measurement happens.
1458         Warmup has to be handled via resetter or ramp-up mechanisms.
1459
1460         :param frame_size: Frame size identifier or value [B].
1461         :param traffic_profile: Module name as a traffic profile identifier.
1462             See GPL/traffic_profiles/trex for implemented modules.
1463         :param minimum_transmit_rate: Minimal load in transactions per second.
1464         :param maximum_transmit_rate: Maximal load in transactions per second.
1465         :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
1466         :param final_relative_width: Final lower bound transmit rate
1467             cannot be more distant that this multiple of upper bound [1].
1468         :param final_trial_duration: Trial duration for the final phase [s].
1469         :param initial_trial_duration: Trial duration for the initial phase
1470             and also for the first intermediate phase [s].
1471         :param number_of_intermediate_phases: Number of intermediate phases
1472             to perform before the final phase [1].
1473         :param timeout: The search will fail itself when not finished
1474             before this overall time [s].
1475         :param ppta: Packets per transaction, aggregated over directions.
1476             Needed for udp_pps which does not have a good transaction counter,
1477             so we need to compute expected number of packets.
1478             Default: 1.
1479         :param resetter: Callable to reset DUT state for repeated trials.
1480         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1481             Default: 2
1482         :param transaction_duration: Total expected time to close transaction.
1483         :param transaction_scale: Number of transactions to perform.
1484             0 (default) means unlimited.
1485         :param transaction_type: An identifier specifying which counters
1486             and formulas to use when computing attempted and failed
1487             transactions. Default: "packet".
1488         :param use_latency: Whether to measure latency during the trial.
1489             Default: False.
1490         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1491         :param ramp_up_duration: Duration of ramp-up trials [s].
1492         :param state_timeout: Time of life of DUT state [s].
1493         :param expansion_coefficient: In external search multiply width by this.
1494         :type frame_size: str or int
1495         :type traffic_profile: str
1496         :type minimum_transmit_rate: float
1497         :type maximum_transmit_rate: float
1498         :type packet_loss_ratio: float
1499         :type final_relative_width: float
1500         :type final_trial_duration: float
1501         :type initial_trial_duration: float
1502         :type number_of_intermediate_phases: int
1503         :type timeout: float
1504         :type ppta: int
1505         :type resetter: Optional[Callable[[], None]]
1506         :type traffic_directions: int
1507         :type transaction_duration: float
1508         :type transaction_scale: int
1509         :type transaction_type: str
1510         :type use_latency: bool
1511         :type ramp_up_rate: float
1512         :type ramp_up_duration: float
1513         :type state_timeout: float
1514         :type expansion_coefficient: float
1515         :returns: Structure containing narrowed down NDR and PDR intervals
1516             and their measurements.
1517         :rtype: List[Receiverateinterval]
1518         :raises RuntimeError: If total duration is larger than timeout.
1519         """
1520         # we need instance of TrafficGenerator instantiated by Robot Framework
1521         # to be able to use trex_stl-*()
1522         tg_instance = BuiltIn().get_library_instance(
1523             u"resources.libraries.python.TrafficGenerator"
1524         )
1525         # Overrides for fixed transaction amount.
1526         if transaction_scale:
1527             initial_trial_duration = 1.0
1528             final_trial_duration = 1.0
1529             number_of_intermediate_phases = 0
1530             timeout += transaction_scale * 3e-4
1531         tg_instance.set_rate_provider_defaults(
1532             frame_size=frame_size,
1533             traffic_profile=traffic_profile,
1534             sleep_till_duration=False,
1535             ppta=ppta,
1536             resetter=resetter,
1537             traffic_directions=traffic_directions,
1538             transaction_duration=transaction_duration,
1539             transaction_scale=transaction_scale,
1540             transaction_type=transaction_type,
1541             use_latency=use_latency,
1542             ramp_up_rate=ramp_up_rate,
1543             ramp_up_duration=ramp_up_duration,
1544             state_timeout=state_timeout,
1545         )
1546         algorithm = MultipleLossRatioSearch(
1547             measurer=tg_instance,
1548             final_trial_duration=final_trial_duration,
1549             final_relative_width=final_relative_width,
1550             number_of_intermediate_phases=number_of_intermediate_phases,
1551             initial_trial_duration=initial_trial_duration,
1552             timeout=timeout,
1553             debug=logger.debug,
1554             expansion_coefficient=expansion_coefficient,
1555         )
1556         if packet_loss_ratio:
1557             packet_loss_ratios = [0.0, packet_loss_ratio]
1558         else:
1559             # Happens in reconf tests.
1560             packet_loss_ratios = [packet_loss_ratio]
1561         results = algorithm.narrow_down_intervals(
1562             min_rate=minimum_transmit_rate,
1563             max_rate=maximum_transmit_rate,
1564             packet_loss_ratios=packet_loss_ratios,
1565         )
1566         return results
1567
1568     @staticmethod
1569     def perform_soak_search(
1570             frame_size,
1571             traffic_profile,
1572             minimum_transmit_rate,
1573             maximum_transmit_rate,
1574             plr_target=1e-7,
1575             tdpt=0.1,
1576             initial_count=50,
1577             timeout=7200.0,
1578             ppta=1,
1579             resetter=None,
1580             trace_enabled=False,
1581             traffic_directions=2,
1582             transaction_duration=0.0,
1583             transaction_scale=0,
1584             transaction_type=u"packet",
1585             use_latency=False,
1586             ramp_up_rate=None,
1587             ramp_up_duration=None,
1588             state_timeout=240.0,
1589     ):
1590         """Setup initialized TG, perform soak search, return avg and stdev.
1591
1592         :param frame_size: Frame size identifier or value [B].
1593         :param traffic_profile: Module name as a traffic profile identifier.
1594             See GPL/traffic_profiles/trex for implemented modules.
1595         :param minimum_transmit_rate: Minimal load in transactions per second.
1596         :param maximum_transmit_rate: Maximal load in transactions per second.
1597         :param plr_target: Ratio of packets lost to achieve [1].
1598         :param tdpt: Trial duration per trial.
1599             The algorithm linearly increases trial duration with trial number,
1600             this is the increment between succesive trials, in seconds.
1601         :param initial_count: Offset to apply before the first trial.
1602             For example initial_count=50 makes first trial to be 51*tdpt long.
1603             This is needed because initial "search" phase of integrator
1604             takes significant time even without any trial results.
1605         :param timeout: The search will stop after this overall time [s].
1606         :param ppta: Packets per transaction, aggregated over directions.
1607             Needed for udp_pps which does not have a good transaction counter,
1608             so we need to compute expected number of packets.
1609             Default: 1.
1610         :param resetter: Callable to reset DUT state for repeated trials.
1611         :param trace_enabled: True if trace enabled else False.
1612             This is very verbose tracing on numeric computations,
1613             do not use in production.
1614             Default: False
1615         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1616             Default: 2
1617         :param transaction_duration: Total expected time to close transaction.
1618         :param transaction_scale: Number of transactions to perform.
1619             0 (default) means unlimited.
1620         :param transaction_type: An identifier specifying which counters
1621             and formulas to use when computing attempted and failed
1622             transactions. Default: "packet".
1623         :param use_latency: Whether to measure latency during the trial.
1624             Default: False.
1625         :param ramp_up_rate: Rate to use in ramp-up trials [pps].
1626         :param ramp_up_duration: Duration of ramp-up trials [s].
1627         :param state_timeout: Time of life of DUT state [s].
1628         :type frame_size: str or int
1629         :type traffic_profile: str
1630         :type minimum_transmit_rate: float
1631         :type maximum_transmit_rate: float
1632         :type plr_target: float
1633         :type initial_count: int
1634         :type timeout: float
1635         :type ppta: int
1636         :type resetter: Optional[Callable[[], None]]
1637         :type trace_enabled: bool
1638         :type traffic_directions: int
1639         :type transaction_duration: float
1640         :type transaction_scale: int
1641         :type transaction_type: str
1642         :type use_latency: bool
1643         :type ramp_up_rate: float
1644         :type ramp_up_duration: float
1645         :type state_timeout: float
1646         :returns: Average and stdev of estimated aggregated rate giving PLR.
1647         :rtype: 2-tuple of float
1648         """
1649         tg_instance = BuiltIn().get_library_instance(
1650             u"resources.libraries.python.TrafficGenerator"
1651         )
1652         # Overrides for fixed transaction amount.
1653         if transaction_scale:
1654             timeout = 7200.0
1655         tg_instance.set_rate_provider_defaults(
1656             frame_size=frame_size,
1657             traffic_profile=traffic_profile,
1658             negative_loss=False,
1659             sleep_till_duration=True,
1660             ppta=ppta,
1661             resetter=resetter,
1662             traffic_directions=traffic_directions,
1663             transaction_duration=transaction_duration,
1664             transaction_scale=transaction_scale,
1665             transaction_type=transaction_type,
1666             use_latency=use_latency,
1667             ramp_up_rate=ramp_up_rate,
1668             ramp_up_duration=ramp_up_duration,
1669             state_timeout=state_timeout,
1670         )
1671         algorithm = PLRsearch(
1672             measurer=tg_instance,
1673             trial_duration_per_trial=tdpt,
1674             packet_loss_ratio_target=plr_target,
1675             trial_number_offset=initial_count,
1676             timeout=timeout,
1677             trace_enabled=trace_enabled,
1678         )
1679         result = algorithm.search(
1680             min_rate=minimum_transmit_rate,
1681             max_rate=maximum_transmit_rate,
1682         )
1683         return result