Support existing test types with ASTF
[csit.git] / resources / libraries / python / TrafficGenerator.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Performance testing traffic generator library."""
15
16 import time
17
18 from robot.api import logger
19 from robot.libraries.BuiltIn import BuiltIn
20
21 from .Constants import Constants
22 from .CpuUtils import CpuUtils
23 from .DropRateSearch import DropRateSearch
24 from .MLRsearch.AbstractMeasurer import AbstractMeasurer
25 from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
26 from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
27 from .PLRsearch.PLRsearch import PLRsearch
28 from .OptionString import OptionString
29 from .ssh import exec_cmd_no_error, exec_cmd
30 from .topology import NodeType
31 from .topology import NodeSubTypeTG
32 from .topology import Topology
33
34 __all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
35
36
37 def check_subtype(node):
38     """Return supported subtype of given node, or raise an exception.
39
40     Currently only one subtype is supported,
41     but we want our code to be ready for other ones.
42
43     :param node: Topology node to check. Can be None.
44     :type node: dict or NoneType
45     :returns: Subtype detected.
46     :rtype: NodeSubTypeTG
47     :raises RuntimeError: If node is not supported, message explains how.
48     """
49     if node.get(u"type") is None:
50         msg = u"Node type is not defined"
51     elif node[u"type"] != NodeType.TG:
52         msg = f"Node type is {node[u'type']!r}, not a TG"
53     elif node.get(u"subtype") is None:
54         msg = u"TG subtype is not defined"
55     elif node[u"subtype"] != NodeSubTypeTG.TREX:
56         msg = f"TG subtype {node[u'subtype']!r} is not supported"
57     else:
58         return NodeSubTypeTG.TREX
59     raise RuntimeError(msg)
60
61
62 class TGDropRateSearchImpl(DropRateSearch):
63     """Drop Rate Search implementation."""
64
65     # def __init__(self):
66     #     super(TGDropRateSearchImpl, self).__init__()
67
68     def measure_loss(
69             self, rate, frame_size, loss_acceptance, loss_acceptance_type,
70             traffic_profile):
71         """Runs the traffic and evaluate the measured results.
72
73         :param rate: Offered traffic load.
74         :param frame_size: Size of frame.
75         :param loss_acceptance: Permitted drop ratio or frames count.
76         :param loss_acceptance_type: Type of permitted loss.
77         :param traffic_profile: Module name as a traffic profile identifier.
78             See GPL/traffic_profiles/trex for implemented modules.
79         :type rate: float
80         :type frame_size: str
81         :type loss_acceptance: float
82         :type loss_acceptance_type: LossAcceptanceType
83         :type traffic_profile: str
84         :returns: Drop threshold exceeded? (True/False)
85         :rtype: bool
86         :raises NotImplementedError: If TG is not supported.
87         :raises RuntimeError: If TG is not specified.
88         """
89         # we need instance of TrafficGenerator instantiated by Robot Framework
90         # to be able to use trex_stl-*()
91         tg_instance = BuiltIn().get_library_instance(
92             u"resources.libraries.python.TrafficGenerator"
93         )
94         subtype = check_subtype(tg_instance.node)
95         if subtype == NodeSubTypeTG.TREX:
96             unit_rate = str(rate) + self.get_rate_type_str()
97             tg_instance.trex_stl_start_remote_exec(
98                 self.get_duration(), unit_rate, frame_size, traffic_profile
99             )
100             loss = tg_instance.get_loss()
101             sent = tg_instance.get_sent()
102             if self.loss_acceptance_type_is_percentage():
103                 loss = (float(loss) / float(sent)) * 100
104             logger.trace(
105                 f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}"
106             )
107             return float(loss) <= float(loss_acceptance)
108         return False
109
110     def get_latency(self):
111         """Returns min/avg/max latency.
112
113         :returns: Latency stats.
114         :rtype: list
115         """
116         tg_instance = BuiltIn().get_library_instance(
117             u"resources.libraries.python.TrafficGenerator"
118         )
119         return tg_instance.get_latency_int()
120
121
122 class TrexMode:
123     """Defines mode of T-Rex traffic generator."""
124     # Advanced stateful mode
125     ASTF = u"ASTF"
126     # Stateless mode
127     STL = u"STL"
128
129
130 # TODO: Pylint says too-many-instance-attributes.
131 class TrafficGenerator(AbstractMeasurer):
132     """Traffic Generator."""
133
134     # TODO: Remove "trex" from lines which could work with other TGs.
135
136     # Use one instance of TrafficGenerator for all tests in test suite
137     ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
138
139     def __init__(self):
140         # TODO: Separate into few dataclasses/dicts.
141         #       Pylint dislikes large unstructured state, and it is right.
142         self._node = None
143         self._mode = None
144         # TG interface order mapping
145         self._ifaces_reordered = False
146         # Result holding fields, to be removed.
147         self._result = None
148         self._loss = None
149         self._sent = None
150         self._latency = None
151         self._received = None
152         self._approximated_rate = None
153         self._approximated_duration = None
154         self._l7_data = None
155         # Measurement input fields, needed for async stop result.
156         self._start_time = None
157         self._stop_time = None
158         self._rate = None
159         self._target_duration = None
160         self._duration = None
161         # Other input parameters, not knowable from measure() signature.
162         self.frame_size = None
163         self.traffic_profile = None
164         self.traffic_directions = None
165         self.negative_loss = None
166         self.use_latency = None
167         self.ppta = None
168         self.resetter = None
169         self.transaction_scale = None
170         self.transaction_duration = None
171         self.sleep_till_duration = None
172         self.transaction_type = None
173         self.duration_limit = None
174         # Transient data needed for async measurements.
175         self._xstats = (None, None)
176         # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
177
178     @property
179     def node(self):
180         """Getter.
181
182         :returns: Traffic generator node.
183         :rtype: dict
184         """
185         return self._node
186
187     def get_loss(self):
188         """Return number of lost packets.
189
190         :returns: Number of lost packets.
191         :rtype: str
192         """
193         return self._loss
194
195     def get_sent(self):
196         """Return number of sent packets.
197
198         :returns: Number of sent packets.
199         :rtype: str
200         """
201         return self._sent
202
203     def get_received(self):
204         """Return number of received packets.
205
206         :returns: Number of received packets.
207         :rtype: str
208         """
209         return self._received
210
211     def get_latency_int(self):
212         """Return rounded min/avg/max latency.
213
214         :returns: Latency stats.
215         :rtype: list
216         """
217         return self._latency
218
219     def get_approximated_rate(self):
220         """Return approximated rate computed as ratio of transmitted packets
221         over duration of trial.
222
223         :returns: Approximated rate.
224         :rtype: str
225         """
226         return self._approximated_rate
227
228     def get_l7_data(self):
229         """Return L7 data.
230
231         :returns: Number of received packets.
232         :rtype: dict
233         """
234         return self._l7_data
235
236     def check_mode(self, expected_mode):
237         """Check TG mode.
238
239         :param expected_mode: Expected traffic generator mode.
240         :type expected_mode: object
241         :raises RuntimeError: In case of unexpected TG mode.
242         """
243         if self._mode == expected_mode:
244             return
245         raise RuntimeError(
246             f"{self._node[u'subtype']} not running in {expected_mode} mode!"
247         )
248
249     # TODO: pylint says disable=too-many-locals.
250     def initialize_traffic_generator(
251             self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
252             tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
253             tg_if2_dst_mac=None):
254         """TG initialization.
255
256         TODO: Document why do we need (and how do we use) _ifaces_reordered.
257
258         :param tg_node: Traffic generator node.
259         :param tg_if1: TG - name of first interface.
260         :param tg_if2: TG - name of second interface.
261         :param tg_if1_adj_node: TG if1 adjecent node.
262         :param tg_if1_adj_if: TG if1 adjecent interface.
263         :param tg_if2_adj_node: TG if2 adjecent node.
264         :param tg_if2_adj_if: TG if2 adjecent interface.
265         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
266         :param tg_if1_dst_mac: Interface 1 destination MAC address.
267         :param tg_if2_dst_mac: Interface 2 destination MAC address.
268         :type tg_node: dict
269         :type tg_if1: str
270         :type tg_if2: str
271         :type tg_if1_adj_node: dict
272         :type tg_if1_adj_if: str
273         :type tg_if2_adj_node: dict
274         :type tg_if2_adj_if: str
275         :type osi_layer: str
276         :type tg_if1_dst_mac: str
277         :type tg_if2_dst_mac: str
278         :returns: nothing
279         :raises RuntimeError: In case of issue during initialization.
280         """
281         subtype = check_subtype(tg_node)
282         if subtype == NodeSubTypeTG.TREX:
283             self._node = tg_node
284             self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
285             if1 = dict()
286             if2 = dict()
287             if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
288             if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
289             if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
290             if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
291
292             if osi_layer == u"L2":
293                 if1[u"adj_addr"] = if2[u"addr"]
294                 if2[u"adj_addr"] = if1[u"addr"]
295             elif osi_layer in (u"L3", u"L7"):
296                 if1[u"adj_addr"] = Topology().get_interface_mac(
297                     tg_if1_adj_node, tg_if1_adj_if
298                 )
299                 if2[u"adj_addr"] = Topology().get_interface_mac(
300                     tg_if2_adj_node, tg_if2_adj_if
301                 )
302             else:
303                 raise ValueError(u"Unknown OSI layer!")
304
305             # in case of switched environment we can override MAC addresses
306             if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
307                 if1[u"adj_addr"] = tg_if1_dst_mac
308                 if2[u"adj_addr"] = tg_if2_dst_mac
309
310             if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
311                 if1, if2 = if2, if1
312                 self._ifaces_reordered = True
313
314             master_thread_id, latency_thread_id, socket, threads = \
315                 CpuUtils.get_affinity_trex(
316                     self._node, tg_if1, tg_if2,
317                     tg_dtc=Constants.TREX_CORE_COUNT)
318
319             if osi_layer in (u"L2", u"L3", u"L7"):
320                 exec_cmd_no_error(
321                     self._node,
322                     f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
323                     f"- version: 2\n"
324                     f"  c: {len(threads)}\n"
325                     f"  limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
326                     f"  interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
327                     f"  port_info:\n"
328                     f"      - dest_mac: \'{if1[u'adj_addr']}\'\n"
329                     f"        src_mac: \'{if1[u'addr']}\'\n"
330                     f"      - dest_mac: \'{if2[u'adj_addr']}\'\n"
331                     f"        src_mac: \'{if2[u'addr']}\'\n"
332                     f"  platform :\n"
333                     f"      master_thread_id: {master_thread_id}\n"
334                     f"      latency_thread_id: {latency_thread_id}\n"
335                     f"      dual_if:\n"
336                     f"          - socket: {socket}\n"
337                     f"            threads: {threads}\n"
338                     f"EOF'",
339                     sudo=True, message=u"T-Rex config generation!"
340                 )
341             else:
342                 raise ValueError(u"Unknown OSI layer!")
343
344             TrafficGenerator.startup_trex(
345                 self._node, osi_layer, subtype=subtype
346             )
347
348     @staticmethod
349     def startup_trex(tg_node, osi_layer, subtype=None):
350         """Startup sequence for the TRex traffic generator.
351
352         :param tg_node: Traffic generator node.
353         :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
354         :param subtype: Traffic generator sub-type.
355         :type tg_node: dict
356         :type osi_layer: str
357         :type subtype: NodeSubTypeTG
358         :raises RuntimeError: If T-Rex startup failed.
359         :raises ValueError: If OSI layer is not supported.
360         """
361         if not subtype:
362             subtype = check_subtype(tg_node)
363         if subtype == NodeSubTypeTG.TREX:
364             for _ in range(0, 3):
365                 # Kill TRex only if it is already running.
366                 cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
367                 exec_cmd_no_error(
368                     tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
369                 )
370
371                 # Configure TRex.
372                 ports = ''
373                 for port in tg_node[u"interfaces"].values():
374                     if u'Mellanox' not in port.get(u'model'):
375                         ports += f" {port.get(u'pci_address')}"
376
377                 cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
378                     f"./dpdk_nic_bind.py -u {ports} || true\""
379                 exec_cmd_no_error(
380                     tg_node, cmd, sudo=True,
381                     message=u"Unbind PCI ports from driver failed!"
382                 )
383
384                 # Start TRex.
385                 cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
386                 trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
387                 trex_cmd.add(u"-i")
388                 trex_cmd.add(u"--prefix $(hostname)")
389                 trex_cmd.add(u"--hdrh")
390                 trex_cmd.add(u"--no-scapy-server")
391                 trex_cmd.add_if(u"--astf", osi_layer == u"L7")
392                 # OptionString does not create double space if extra is empty.
393                 trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
394                 inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
395                 cmd = f"sh -c \"{inner_command}\" > /dev/null"
396                 try:
397                     exec_cmd_no_error(tg_node, cmd, sudo=True)
398                 except RuntimeError:
399                     cmd = u"sh -c \"cat /tmp/trex.log\""
400                     exec_cmd_no_error(
401                         tg_node, cmd, sudo=True,
402                         message=u"Get TRex logs failed!"
403                     )
404                     raise RuntimeError(u"Start TRex failed!")
405
406                 # Test T-Rex API responsiveness.
407                 cmd = u"python3"
408                 cmd += f" {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
409                 if osi_layer in (u"L2", u"L3"):
410                     cmd += f"trex_stl_assert.py"
411                 elif osi_layer == u"L7":
412                     cmd += f"trex_astf_assert.py"
413                 else:
414                     raise ValueError(u"Unknown OSI layer!")
415                 try:
416                     exec_cmd_no_error(
417                         tg_node, cmd, sudo=True,
418                         message=u"T-Rex API is not responding!", retries=20
419                     )
420                 except RuntimeError:
421                     continue
422                 return
423             # After max retries TRex is still not responding to API critical
424             # error occurred.
425             exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
426             raise RuntimeError(u"Start T-Rex failed after multiple retries!")
427
428     @staticmethod
429     def is_trex_running(node):
430         """Check if T-Rex is running using pidof.
431
432         :param node: Traffic generator node.
433         :type node: dict
434         :returns: True if T-Rex is running otherwise False.
435         :rtype: bool
436         """
437         ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
438         return bool(int(ret) == 0)
439
440     @staticmethod
441     def teardown_traffic_generator(node):
442         """TG teardown.
443
444         :param node: Traffic generator node.
445         :type node: dict
446         :returns: nothing
447         :raises RuntimeError: If node type is not a TG,
448             or if T-Rex teardown fails.
449         """
450         subtype = check_subtype(node)
451         if subtype == NodeSubTypeTG.TREX:
452             exec_cmd_no_error(
453                 node,
454                 u"sh -c "
455                 u"\"if pgrep t-rex; then sudo pkill t-rex && sleep 3; fi\"",
456                 sudo=False,
457                 message=u"T-Rex kill failed!"
458             )
459
460     def trex_astf_stop_remote_exec(self, node):
461         """Execute T-Rex ASTF script on remote node over ssh to stop running
462         traffic.
463
464         Internal state is updated with measurement results.
465
466         :param node: T-Rex generator node.
467         :type node: dict
468         :raises RuntimeError: If stop traffic script fails.
469         """
470         command_line = OptionString().add(u"python3")
471         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
472         command_line.add(f"'{dirname}/trex_astf_stop.py'")
473         command_line.change_prefix(u"--")
474         for index, value in enumerate(self._xstats):
475             if value is not None:
476                 value = value.replace(u"'", u"\"")
477                 command_line.add_equals(f"xstat{index}", f"'{value}'")
478         stdout, _ = exec_cmd_no_error(
479             node, command_line,
480             message=u"T-Rex ASTF runtime error!"
481         )
482         self._parse_traffic_results(stdout)
483
484     def trex_stl_stop_remote_exec(self, node):
485         """Execute T-Rex STL script on remote node over ssh to stop running
486         traffic.
487
488         Internal state is updated with measurement results.
489
490         :param node: T-Rex generator node.
491         :type node: dict
492         :raises RuntimeError: If stop traffic script fails.
493         """
494         command_line = OptionString().add(u"python3")
495         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
496         command_line.add(f"'{dirname}/trex_stl_stop.py'")
497         command_line.change_prefix(u"--")
498         for index, value in enumerate(self._xstats):
499             if value is not None:
500                 value = value.replace(u"'", u"\"")
501                 command_line.add_equals(f"xstat{index}", f"'{value}'")
502         stdout, _ = exec_cmd_no_error(
503             node, command_line,
504             message=u"T-Rex STL runtime error!"
505         )
506         self._parse_traffic_results(stdout)
507
508     def stop_traffic_on_tg(self):
509         """Stop all traffic on TG.
510
511         :returns: Structure containing the result of the measurement.
512         :rtype: ReceiveRateMeasurement
513         :raises ValueError: If TG traffic profile is not supported.
514         """
515         subtype = check_subtype(self._node)
516         if subtype != NodeSubTypeTG.TREX:
517             raise ValueError(f"Unsupported TG subtype: {subtype!r}")
518         if u"trex-astf" in self.traffic_profile:
519             self.trex_astf_stop_remote_exec(self._node)
520         elif u"trex-stl" in self.traffic_profile:
521             self.trex_stl_stop_remote_exec(self._node)
522         else:
523             raise ValueError(u"Unsupported T-Rex traffic profile!")
524         self._stop_time = time.monotonic()
525
526         return self.get_measurement_result()
527
528     def trex_astf_start_remote_exec(
529             self, duration, multiplier, async_call=False):
530         """Execute T-Rex ASTF script on remote node over ssh to start running
531         traffic.
532
533         In sync mode, measurement results are stored internally.
534         In async mode, initial data including xstats are stored internally.
535
536         This method contains the logic to compute duration as maximum time
537         if transaction_scale is nonzero.
538         The transaction_scale argument defines (limits) how many transactions
539         will be started in total. As that amount of transaction can take
540         considerable time (sometimes due to explicit delays in the profile),
541         the real time a trial needs to finish is computed here. For now,
542         in that case the duration argument is ignored, assuming it comes
543         from ASTF-unaware search algorithm. The overall time a single
544         transaction needs is given in parameter transaction_duration,
545         it includes both explicit delays and implicit time it takes
546         to transfer data (or whatever the transaction does).
547
548         Currently it is observed TRex does not start the ASTF traffic
549         immediately, an ad-hoc constant is added to the computed duration
550         to compensate for that.
551
552         If transaction_scale is zero, duration is not recomputed.
553         It is assumed the subsequent result parsing gets the real duration
554         if the traffic stops sooner for any reason.
555
556         Currently, it is assumed traffic profile defines a single transaction.
557         To avoid heavy logic here, the input rate is expected to be in
558         transactions per second, as that directly translates to TRex multiplier,
559         (assuming the profile does not override the default cps value of one).
560
561         :param duration: Time expressed in seconds for how long to send traffic.
562         :param multiplier: Traffic rate in transactions per second.
563         :param async_call: If enabled then don't wait for all incoming traffic.
564         :type duration: float
565         :type multiplier: int
566         :type async_call: bool
567         :raises RuntimeError: In case of T-Rex driver issue.
568         """
569         self.check_mode(TrexMode.ASTF)
570         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
571         if not isinstance(duration, (float, int)):
572             duration = float(duration)
573
574         # Duration logic.
575         computed_duration = duration
576         if duration > 0.0:
577             if self.transaction_scale:
578                 computed_duration = self.transaction_scale / multiplier
579                 # Log the computed duration,
580                 # so we can compare with what telemetry suggests
581                 # the real duration was.
582                 logger.debug(f"Expected duration {computed_duration}")
583                 computed_duration += 0.1115
584         # Else keep -1.
585         if self.duration_limit:
586             computed_duration = min(computed_duration, self.duration_limit)
587
588         command_line = OptionString().add(u"python3")
589         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
590         command_line.add(f"'{dirname}/trex_astf_profile.py'")
591         command_line.change_prefix(u"--")
592         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
593         command_line.add_with_value(
594             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
595         )
596         command_line.add_with_value(u"duration", f"{computed_duration!r}")
597         command_line.add_with_value(u"frame_size", self.frame_size)
598         command_line.add_with_value(u"multiplier", multiplier)
599         command_line.add_with_value(u"port_0", p_0)
600         command_line.add_with_value(u"port_1", p_1)
601         command_line.add_with_value(
602             u"traffic_directions", self.traffic_directions
603         )
604         command_line.add_if(u"async_start", async_call)
605         command_line.add_if(u"latency", self.use_latency)
606         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
607
608         self._start_time = time.monotonic()
609         self._rate = multiplier
610         stdout, _ = exec_cmd_no_error(
611             self._node, command_line, timeout=computed_duration + 10.0,
612             message=u"T-Rex ASTF runtime error!"
613         )
614
615         if async_call:
616             # no result
617             self._target_duration = None
618             self._duration = None
619             self._received = None
620             self._sent = None
621             self._loss = None
622             self._latency = None
623             xstats = [None, None]
624             self._l7_data = dict()
625             self._l7_data[u"client"] = dict()
626             self._l7_data[u"client"][u"active_flows"] = None
627             self._l7_data[u"client"][u"established_flows"] = None
628             self._l7_data[u"client"][u"traffic_duration"] = None
629             self._l7_data[u"server"] = dict()
630             self._l7_data[u"server"][u"active_flows"] = None
631             self._l7_data[u"server"][u"established_flows"] = None
632             self._l7_data[u"server"][u"traffic_duration"] = None
633             if u"udp" in self.traffic_profile:
634                 self._l7_data[u"client"][u"udp"] = dict()
635                 self._l7_data[u"client"][u"udp"][u"connects"] = None
636                 self._l7_data[u"client"][u"udp"][u"closed_flows"] = None
637                 self._l7_data[u"client"][u"udp"][u"err_cwf"] = None
638                 self._l7_data[u"server"][u"udp"] = dict()
639                 self._l7_data[u"server"][u"udp"][u"accepted_flows"] = None
640                 self._l7_data[u"server"][u"udp"][u"closed_flows"] = None
641             elif u"tcp" in self.traffic_profile:
642                 self._l7_data[u"client"][u"tcp"] = dict()
643                 self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = None
644                 self._l7_data[u"client"][u"tcp"][u"connects"] = None
645                 self._l7_data[u"client"][u"tcp"][u"closed_flows"] = None
646                 self._l7_data[u"client"][u"tcp"][u"connattempt"] = None
647                 self._l7_data[u"server"][u"tcp"] = dict()
648                 self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = None
649                 self._l7_data[u"server"][u"tcp"][u"connects"] = None
650                 self._l7_data[u"server"][u"tcp"][u"closed_flows"] = None
651             else:
652                 logger.warn(u"Unsupported T-Rex ASTF traffic profile!")
653             index = 0
654             for line in stdout.splitlines():
655                 if f"Xstats snapshot {index}: " in line:
656                     xstats[index] = line[19:]
657                     index += 1
658                 if index == 2:
659                     break
660             self._xstats = tuple(xstats)
661         else:
662             self._target_duration = duration
663             self._duration = computed_duration
664             self._parse_traffic_results(stdout)
665
666     def trex_stl_start_remote_exec(self, duration, rate, async_call=False):
667         """Execute T-Rex STL script on remote node over ssh to start running
668         traffic.
669
670         In sync mode, measurement results are stored internally.
671         In async mode, initial data including xstats are stored internally.
672
673         Mode-unaware code (e.g. in search algorithms) works with transactions.
674         To keep the logic simple, multiplier is set to that value.
675         As bidirectional traffic profiles send packets in both directions,
676         they are treated as transactions with two packets (one per direction).
677
678         :param duration: Time expressed in seconds for how long to send traffic.
679         :param rate: Traffic rate in transactions per second.
680         :param async_call: If enabled then don't wait for all incoming traffic.
681         :type duration: float
682         :type rate: str
683         :type async_call: bool
684         :raises RuntimeError: In case of T-Rex driver issue.
685         """
686         self.check_mode(TrexMode.STL)
687         p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
688         if not isinstance(duration, (float, int)):
689             duration = float(duration)
690         if self.duration_limit:
691             duration = min(duration, self.duration_limit)
692
693         command_line = OptionString().add(u"python3")
694         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
695         command_line.add(f"'{dirname}/trex_stl_profile.py'")
696         command_line.change_prefix(u"--")
697         dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
698         command_line.add_with_value(
699             u"profile", f"'{dirname}/{self.traffic_profile}.py'"
700         )
701         command_line.add_with_value(u"duration", f"{duration!r}")
702         command_line.add_with_value(u"frame_size", self.frame_size)
703         command_line.add_with_value(u"rate", f"{rate!r}")
704         command_line.add_with_value(u"port_0", p_0)
705         command_line.add_with_value(u"port_1", p_1)
706         command_line.add_with_value(
707             u"traffic_directions", self.traffic_directions
708         )
709         command_line.add_if(u"async_start", async_call)
710         command_line.add_if(u"latency", self.use_latency)
711         command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
712
713         # TODO: This is ugly. Handle parsing better.
714         self._start_time = time.monotonic()
715         self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
716         stdout, _ = exec_cmd_no_error(
717             self._node, command_line, timeout=int(duration) + 60,
718             message=u"T-Rex STL runtime error"
719         )
720
721         if async_call:
722             # no result
723             self._target_duration = None
724             self._duration = None
725             self._received = None
726             self._sent = None
727             self._loss = None
728             self._latency = None
729
730             xstats = [None, None]
731             index = 0
732             for line in stdout.splitlines():
733                 if f"Xstats snapshot {index}: " in line:
734                     xstats[index] = line[19:]
735                     index += 1
736                 if index == 2:
737                     break
738             self._xstats = tuple(xstats)
739         else:
740             self._target_duration = duration
741             self._duration = duration
742             self._parse_traffic_results(stdout)
743
744     def send_traffic_on_tg(
745             self,
746             duration,
747             rate,
748             frame_size,
749             traffic_profile,
750             async_call=False,
751             ppta=1,
752             traffic_directions=2,
753             transaction_duration=0.0,
754             transaction_scale=0,
755             transaction_type=u"packet",
756             duration_limit=0.0,
757             use_latency=False,
758         ):
759         """Send traffic from all configured interfaces on TG.
760
761         In async mode, xstats is stored internally,
762         to enable getting correct result when stopping the traffic.
763         In both modes, stdout is returned,
764         but _parse_traffic_results only works in sync output.
765
766         Note that traffic generator uses DPDK driver which might
767         reorder port numbers based on wiring and PCI numbering.
768         This method handles that, so argument values are invariant,
769         but you can see swapped valued in debug logs.
770
771         When transaction_scale is specified, the duration value is ignored
772         and the needed time is computed. For cases where this results in
773         to too long measurement (e.g. teardown trial with small rate),
774         duration_limit is applied (of non-zero), so the trial is stopped sooner.
775
776         Bidirectional STL profiles are treated as transactions with two packets.
777
778         :param duration: Duration of test traffic generation in seconds.
779         :param rate: Traffic rate in transactions per second.
780         :param frame_size: Frame size (L2) in Bytes.
781         :param traffic_profile: Module name as a traffic profile identifier.
782             See GPL/traffic_profiles/trex for implemented modules.
783         :param async_call: Async mode.
784         :param ppta: Packets per transaction, aggregated over directions.
785             Needed for udp_pps which does not have a good transaction counter,
786             so we need to compute expected number of packets.
787             Default: 1.
788         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
789             Default: 2
790         :param transaction_duration: Total expected time to close transaction.
791         :param transaction_scale: Number of transactions to perform.
792             0 (default) means unlimited.
793         :param transaction_type: An identifier specifying which counters
794             and formulas to use when computing attempted and failed
795             transactions. Default: "packet".
796         :param duration_limit: Zero or maximum limit for computed (or given)
797             duration.
798         :param use_latency: Whether to measure latency during the trial.
799             Default: False.
800         :type duration: float
801         :type rate: float
802         :type frame_size: str
803         :type traffic_profile: str
804         :type async_call: bool
805         :type ppta: int
806         :type traffic_directions: int
807         :type transaction_duration: float
808         :type transaction_scale: int
809         :type transaction_type: str
810         :type duration_limit: float
811         :type use_latency: bool
812         :returns: TG results.
813         :rtype: str
814         :raises ValueError: If TG traffic profile is not supported.
815         """
816         self.set_rate_provider_defaults(
817             frame_size=frame_size,
818             traffic_profile=traffic_profile,
819             ppta=ppta,
820             traffic_directions=traffic_directions,
821             transaction_duration=transaction_duration,
822             transaction_scale=transaction_scale,
823             transaction_type=transaction_type,
824             duration_limit=duration_limit,
825             use_latency=use_latency,
826         )
827         self._send_traffic_on_tg_internal(duration, rate, async_call)
828
829     def _send_traffic_on_tg_internal(self, duration, rate, async_call=False):
830         """Send traffic from all configured interfaces on TG.
831
832         This is an internal function, it assumes set_rate_provider_defaults
833         has been called to remember most values.
834         The reason why need to remember various values is that
835         the traffic can be asynchronous, and parsing needs those values.
836         The reason why this is is a separate function from the one
837         which calls set_rate_provider_defaults is that some search algorithms
838         need to specify their own values, and we do not want the measure call
839         to overwrite them with defaults.
840
841         :param duration: Duration of test traffic generation in seconds.
842         :param rate: Traffic rate in transactions per second.
843         :param async_call: Async mode.
844         :type duration: float
845         :type rate: float
846         :type async_call: bool
847         :returns: TG results.
848         :rtype: str
849         :raises ValueError: If TG traffic profile is not supported.
850         """
851         subtype = check_subtype(self._node)
852         if subtype == NodeSubTypeTG.TREX:
853             if u"trex-astf" in self.traffic_profile:
854                 self.trex_astf_start_remote_exec(
855                     duration, float(rate), async_call
856                 )
857             elif u"trex-stl" in self.traffic_profile:
858                 unit_rate_str = str(rate) + u"pps"
859                 # TODO: Suport transaction_scale et al?
860                 self.trex_stl_start_remote_exec(
861                     duration, unit_rate_str, async_call
862                 )
863             else:
864                 raise ValueError(u"Unsupported T-Rex traffic profile!")
865
866         return self._result
867
868     def no_traffic_loss_occurred(self):
869         """Fail if loss occurred in traffic run.
870
871         :returns: nothing
872         :raises Exception: If loss occured.
873         """
874         if self._loss is None:
875             raise RuntimeError(u"The traffic generation has not been issued")
876         if self._loss != u"0":
877             raise RuntimeError(f"Traffic loss occurred: {self._loss}")
878
879     def fail_if_no_traffic_forwarded(self):
880         """Fail if no traffic forwarded.
881
882         TODO: Check number of passed transactions instead.
883
884         :returns: nothing
885         :raises Exception: If no traffic forwarded.
886         """
887         if self._received is None:
888             raise RuntimeError(u"The traffic generation has not been issued")
889         if self._received == u"0":
890             raise RuntimeError(u"No traffic forwarded")
891
892     def partial_traffic_loss_accepted(
893             self, loss_acceptance, loss_acceptance_type):
894         """Fail if loss is higher then accepted in traffic run.
895
896         :param loss_acceptance: Permitted drop ratio or frames count.
897         :param loss_acceptance_type: Type of permitted loss.
898         :type loss_acceptance: float
899         :type loss_acceptance_type: LossAcceptanceType
900         :returns: nothing
901         :raises Exception: If loss is above acceptance criteria.
902         """
903         if self._loss is None:
904             raise Exception(u"The traffic generation has not been issued")
905
906         if loss_acceptance_type == u"percentage":
907             loss = (float(self._loss) / float(self._sent)) * 100
908         elif loss_acceptance_type == u"frames":
909             loss = float(self._loss)
910         else:
911             raise Exception(u"Loss acceptance type not supported")
912
913         if loss > float(loss_acceptance):
914             raise Exception(
915                 f"Traffic loss {loss} above loss acceptance: {loss_acceptance}"
916             )
917
918     def _parse_traffic_results(self, stdout):
919         """Parse stdout of scripts into fields of self.
920
921         Block of code to reuse, by sync start, or stop after async.
922
923         :param stdout: Text containing the standard output.
924         :type stdout: str
925         """
926         subtype = check_subtype(self._node)
927         if subtype == NodeSubTypeTG.TREX:
928             # Last line from console output
929             line = stdout.splitlines()[-1]
930             results = line.split(u";")
931             if results[-1] in (u" ", u""):
932                 results.pop(-1)
933             self._result = dict()
934             for result in results:
935                 key, value = result.split(u"=", maxsplit=1)
936                 self._result[key.strip()] = value
937             logger.info(f"TrafficGen results:\n{self._result}")
938             self._received = int(self._result.get(u"total_received"), 0)
939             self._sent = int(self._result.get(u"total_sent", 0))
940             self._loss = int(self._result.get(u"frame_loss", 0))
941             self._approximated_duration = \
942                 self._result.get(u"approximated_duration", 0.0)
943             if u"manual" not in str(self._approximated_duration):
944                 self._approximated_duration = float(self._approximated_duration)
945             self._latency = list()
946             self._latency.append(self._result.get(u"latency_stream_0(usec)"))
947             self._latency.append(self._result.get(u"latency_stream_1(usec)"))
948             if self._mode == TrexMode.ASTF:
949                 self._l7_data = dict()
950                 self._l7_data[u"client"] = dict()
951                 self._l7_data[u"client"][u"sent"] = \
952                     int(self._result.get(u"client_sent", 0))
953                 self._l7_data[u"client"][u"received"] = \
954                     int(self._result.get(u"client_received", 0))
955                 self._l7_data[u"client"][u"active_flows"] = \
956                     int(self._result.get(u"client_active_flows", 0))
957                 self._l7_data[u"client"][u"established_flows"] = \
958                     int(self._result.get(u"client_established_flows", 0))
959                 self._l7_data[u"client"][u"traffic_duration"] = \
960                     float(self._result.get(u"client_traffic_duration", 0.0))
961                 self._l7_data[u"client"][u"err_rx_throttled"] = \
962                     int(self._result.get(u"client_err_rx_throttled", 0))
963                 self._l7_data[u"client"][u"err_c_nf_throttled"] = \
964                     int(self._result.get(u"client_err_nf_throttled", 0))
965                 self._l7_data[u"client"][u"err_flow_overflow"] = \
966                     int(self._result.get(u"client_err_flow_overflow", 0))
967                 self._l7_data[u"server"] = dict()
968                 self._l7_data[u"server"][u"active_flows"] = \
969                     int(self._result.get(u"server_active_flows", 0))
970                 self._l7_data[u"server"][u"established_flows"] = \
971                     int(self._result.get(u"server_established_flows", 0))
972                 self._l7_data[u"server"][u"traffic_duration"] = \
973                     float(self._result.get(u"server_traffic_duration", 0.0))
974                 self._l7_data[u"server"][u"err_rx_throttled"] = \
975                     int(self._result.get(u"client_err_rx_throttled", 0))
976                 if u"udp" in self.traffic_profile:
977                     self._l7_data[u"client"][u"udp"] = dict()
978                     self._l7_data[u"client"][u"udp"][u"connects"] = \
979                         int(self._result.get(u"client_udp_connects", 0))
980                     self._l7_data[u"client"][u"udp"][u"closed_flows"] = \
981                         int(self._result.get(u"client_udp_closed", 0))
982                     self._l7_data[u"client"][u"udp"][u"tx_bytes"] = \
983                         int(self._result.get(u"client_udp_tx_bytes", 0))
984                     self._l7_data[u"client"][u"udp"][u"rx_bytes"] = \
985                         int(self._result.get(u"client_udp_rx_bytes", 0))
986                     self._l7_data[u"client"][u"udp"][u"tx_packets"] = \
987                         int(self._result.get(u"client_udp_tx_packets", 0))
988                     self._l7_data[u"client"][u"udp"][u"rx_packets"] = \
989                         int(self._result.get(u"client_udp_rx_packets", 0))
990                     self._l7_data[u"client"][u"udp"][u"keep_drops"] = \
991                         int(self._result.get(u"client_udp_keep_drops", 0))
992                     self._l7_data[u"client"][u"udp"][u"err_cwf"] = \
993                         int(self._result.get(u"client_err_cwf", 0))
994                     self._l7_data[u"server"][u"udp"] = dict()
995                     self._l7_data[u"server"][u"udp"][u"accepted_flows"] = \
996                         int(self._result.get(u"server_udp_accepts", 0))
997                     self._l7_data[u"server"][u"udp"][u"closed_flows"] = \
998                         int(self._result.get(u"server_udp_closed", 0))
999                     self._l7_data[u"server"][u"udp"][u"tx_bytes"] = \
1000                         int(self._result.get(u"server_udp_tx_bytes", 0))
1001                     self._l7_data[u"server"][u"udp"][u"rx_bytes"] = \
1002                         int(self._result.get(u"server_udp_rx_bytes", 0))
1003                     self._l7_data[u"server"][u"udp"][u"tx_packets"] = \
1004                         int(self._result.get(u"server_udp_tx_packets", 0))
1005                     self._l7_data[u"server"][u"udp"][u"rx_packets"] = \
1006                         int(self._result.get(u"server_udp_rx_packets", 0))
1007                 elif u"tcp" in self.traffic_profile:
1008                     self._l7_data[u"client"][u"tcp"] = dict()
1009                     self._l7_data[u"client"][u"tcp"][u"initiated_flows"] = \
1010                         int(self._result.get(u"client_tcp_connect_inits", 0))
1011                     self._l7_data[u"client"][u"tcp"][u"connects"] = \
1012                         int(self._result.get(u"client_tcp_connects", 0))
1013                     self._l7_data[u"client"][u"tcp"][u"closed_flows"] = \
1014                         int(self._result.get(u"client_tcp_closed", 0))
1015                     self._l7_data[u"client"][u"tcp"][u"connattempt"] = \
1016                         int(self._result.get(u"client_tcp_connattempt", 0))
1017                     self._l7_data[u"client"][u"tcp"][u"tx_bytes"] = \
1018                         int(self._result.get(u"client_tcp_tx_bytes", 0))
1019                     self._l7_data[u"client"][u"tcp"][u"rx_bytes"] = \
1020                         int(self._result.get(u"client_tcp_rx_bytes", 0))
1021                     self._l7_data[u"server"][u"tcp"] = dict()
1022                     self._l7_data[u"server"][u"tcp"][u"accepted_flows"] = \
1023                         int(self._result.get(u"server_tcp_accepts", 0))
1024                     self._l7_data[u"server"][u"tcp"][u"connects"] = \
1025                         int(self._result.get(u"server_tcp_connects", 0))
1026                     self._l7_data[u"server"][u"tcp"][u"closed_flows"] = \
1027                         int(self._result.get(u"server_tcp_closed", 0))
1028                     self._l7_data[u"server"][u"tcp"][u"tx_bytes"] = \
1029                         int(self._result.get(u"server_tcp_tx_bytes", 0))
1030                     self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
1031                         int(self._result.get(u"server_tcp_rx_bytes", 0))
1032
1033     def get_measurement_result(self):
1034         """Return the result of last measurement as ReceiveRateMeasurement.
1035
1036         Separate function, as measurements can end either by time
1037         or by explicit call, this is the common block at the end.
1038
1039         The target_tr field of ReceiveRateMeasurement is in
1040         transactions per second. Transmit count and loss count units
1041         depend on the transaction type. Usually they are in transactions
1042         per second, or aggregate packets per second.
1043
1044         TODO: Fail on running or already reported measurement.
1045
1046         :returns: Structure containing the result of the measurement.
1047         :rtype: ReceiveRateMeasurement
1048         """
1049         try:
1050             # Client duration seems to include a setup period
1051             # where TRex does not send any packets yet.
1052             # Server duration does not include it.
1053             server_data = self._l7_data[u"server"]
1054             approximated_duration = float(server_data[u"traffic_duration"])
1055         except (KeyError, AttributeError, ValueError, TypeError):
1056             approximated_duration = None
1057         try:
1058             if not approximated_duration:
1059                 approximated_duration = float(self._approximated_duration)
1060         except ValueError:  # "manual"
1061             approximated_duration = None
1062         if not approximated_duration:
1063             if self._duration and self._duration > 0:
1064                 # Known recomputed or target duration.
1065                 approximated_duration = self._duration
1066             else:
1067                 # It was an explicit stop.
1068                 if not self._stop_time:
1069                     raise RuntimeError(u"Unable to determine duration.")
1070                 approximated_duration = self._stop_time - self._start_time
1071         target_duration = self._target_duration
1072         if not target_duration:
1073             target_duration = approximated_duration
1074         transmit_rate = self._rate
1075         if self.transaction_type == u"packet":
1076             partial_attempt_count = self._sent
1077             expected_attempt_count = self._sent
1078             fail_count = self._loss
1079         elif self.transaction_type == u"udp_cps":
1080             if not self.transaction_scale:
1081                 raise RuntimeError(u"Add support for no-limit udp_cps.")
1082             partial_attempt_count = self._l7_data[u"client"][u"sent"]
1083             # We do not care whether TG is slow, it should have attempted all.
1084             expected_attempt_count = self.transaction_scale
1085             pass_count = self._l7_data[u"client"][u"received"]
1086             fail_count = expected_attempt_count - pass_count
1087         elif self.transaction_type == u"tcp_cps":
1088             if not self.transaction_scale:
1089                 raise RuntimeError(u"Add support for no-limit tcp_cps.")
1090             ctca = self._l7_data[u"client"][u"tcp"][u"connattempt"]
1091             partial_attempt_count = ctca
1092             # We do not care whether TG is slow, it should have attempted all.
1093             expected_attempt_count = self.transaction_scale
1094             # TODO: Is there a better packet-based counter?
1095             pass_count = self._l7_data[u"server"][u"tcp"][u"connects"]
1096             fail_count = expected_attempt_count - pass_count
1097         elif self.transaction_type == u"udp_pps":
1098             if not self.transaction_scale:
1099                 raise RuntimeError(u"Add support for no-limit udp_pps.")
1100             partial_attempt_count = self._sent
1101             expected_attempt_count = self.transaction_scale * self.ppta
1102             fail_count = self._loss + (expected_attempt_count - self._sent)
1103         elif self.transaction_type == u"tcp_pps":
1104             if not self.transaction_scale:
1105                 raise RuntimeError(u"Add support for no-limit tcp_pps.")
1106             partial_attempt_count = self._sent
1107             expected_attempt_count = self.transaction_scale * self.ppta
1108             # One loss-like scenario happens when TRex receives all packets
1109             # on L2 level, but is not fast enough to process them all
1110             # at L7 level, which leads to retransmissions.
1111             # Those manifest as opackets larger than expected.
1112             # A simple workaround is to add absolute difference.
1113             # Probability of retransmissions exactly cancelling
1114             # packets unsent due to duration stretching is quite low.
1115             fail_count = self._loss + abs(expected_attempt_count - self._sent)
1116         else:
1117             raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
1118         if fail_count < 0 and not self.negative_loss:
1119             fail_count = 0
1120         measurement = ReceiveRateMeasurement(
1121             duration=target_duration,
1122             target_tr=transmit_rate,
1123             transmit_count=expected_attempt_count,
1124             loss_count=fail_count,
1125             approximated_duration=approximated_duration,
1126             partial_transmit_count=partial_attempt_count,
1127         )
1128         measurement.latency = self.get_latency_int()
1129         return measurement
1130
1131     def measure(self, duration, transmit_rate):
1132         """Run trial measurement, parse and return results.
1133
1134         The input rate is for transactions. Stateles bidirectional traffic
1135         is understood as sequence of (asynchronous) transactions,
1136         two packets each.
1137
1138         The result units depend on test type, generally
1139         the count either transactions or packets (aggregated over directions).
1140
1141         Optionally, this method sleeps if measurement finished before
1142         the time specified as duration.
1143
1144         :param duration: Trial duration [s].
1145         :param transmit_rate: Target rate in transactions per second.
1146         :type duration: float
1147         :type transmit_rate: float
1148         :returns: Structure containing the result of the measurement.
1149         :rtype: ReceiveRateMeasurement
1150         :raises RuntimeError: If TG is not set or if node is not TG
1151             or if subtype is not specified.
1152         :raises NotImplementedError: If TG is not supported.
1153         """
1154         duration = float(duration)
1155         time_start = time.monotonic()
1156         time_stop = time_start + duration
1157         if self.resetter:
1158             self.resetter()
1159         self._send_traffic_on_tg_internal(
1160             duration=duration,
1161             rate=transmit_rate,
1162             async_call=False,
1163         )
1164         result = self.get_measurement_result()
1165         logger.debug(f"trial measurement result: {result!r}")
1166         # In PLRsearch, computation needs the specified time to complete.
1167         if self.sleep_till_duration:
1168             sleeptime = time_stop - time.monotonic()
1169             if sleeptime > 0.0:
1170                 # TODO: Sometimes we have time to do additional trials here,
1171                 # adapt PLRsearch to accept all the results.
1172                 time.sleep(sleeptime)
1173         return result
1174
1175     def set_rate_provider_defaults(
1176             self,
1177             frame_size,
1178             traffic_profile,
1179             ppta=1,
1180             resetter=None,
1181             traffic_directions=2,
1182             transaction_duration=0.0,
1183             transaction_scale=0,
1184             transaction_type=u"packet",
1185             duration_limit=0.0,
1186             negative_loss=True,
1187             sleep_till_duration=False,
1188             use_latency=False,
1189         ):
1190         """Store values accessed by measure().
1191
1192         :param frame_size: Frame size identifier or value [B].
1193         :param traffic_profile: Module name as a traffic profile identifier.
1194             See GPL/traffic_profiles/trex for implemented modules.
1195         :param ppta: Packets per transaction, aggregated over directions.
1196             Needed for udp_pps which does not have a good transaction counter,
1197             so we need to compute expected number of packets.
1198             Default: 1.
1199         :param resetter: Callable to reset DUT state for repeated trials.
1200         :param traffic_directions: Traffic from packet counting point of view
1201             is bi- (2) or uni- (1) directional.
1202             Default: 2
1203         :param transaction_duration: Total expected time to close transaction.
1204         :param transaction_scale: Number of transactions to perform.
1205             0 (default) means unlimited.
1206         :param transaction_type: An identifier specifying which counters
1207             and formulas to use when computing attempted and failed
1208             transactions. Default: "packet".
1209             TODO: Does this also specify parsing for the measured duration?
1210         :param duration_limit: Zero or maximum limit for computed (or given)
1211             duration.
1212         :param negative_loss: If false, negative loss is reported as zero loss.
1213         :param sleep_till_duration: If true and measurement returned faster,
1214             sleep until it matches duration. Needed for PLRsearch.
1215         :param use_latency: Whether to measure latency during the trial.
1216             Default: False.
1217         :type frame_size: str or int
1218         :type traffic_profile: str
1219         :type ppta: int
1220         :type resetter: Optional[Callable[[], None]]
1221         :type traffic_directions: int
1222         :type transaction_duration: float
1223         :type transaction_scale: int
1224         :type transaction_type: str
1225         :type duration_limit: float
1226         :type negative_loss: bool
1227         :type sleep_till_duration: bool
1228         :type use_latency: bool
1229         """
1230         self.frame_size = frame_size
1231         self.traffic_profile = str(traffic_profile)
1232         self.resetter = resetter
1233         self.ppta = ppta
1234         self.traffic_directions = int(traffic_directions)
1235         self.transaction_duration = float(transaction_duration)
1236         self.transaction_scale = int(transaction_scale)
1237         self.transaction_type = str(transaction_type)
1238         self.duration_limit = float(duration_limit)
1239         self.negative_loss = bool(negative_loss)
1240         self.sleep_till_duration = bool(sleep_till_duration)
1241         self.use_latency = bool(use_latency)
1242
1243
1244 class OptimizedSearch:
1245     """Class to be imported as Robot Library, containing search keywords.
1246
1247     Aside of setting up measurer and forwarding arguments,
1248     the main business is to translate min/max rate from unidir to aggregate.
1249     """
1250
1251     @staticmethod
1252     def perform_optimized_ndrpdr_search(
1253             frame_size,
1254             traffic_profile,
1255             minimum_transmit_rate,
1256             maximum_transmit_rate,
1257             packet_loss_ratio=0.005,
1258             final_relative_width=0.005,
1259             final_trial_duration=30.0,
1260             initial_trial_duration=1.0,
1261             number_of_intermediate_phases=2,
1262             timeout=720.0,
1263             doublings=1,
1264             ppta=1,
1265             resetter=None,
1266             traffic_directions=2,
1267             transaction_duration=0.0,
1268             transaction_scale=0,
1269             transaction_type=u"packet",
1270             use_latency=False,
1271     ):
1272         """Setup initialized TG, perform optimized search, return intervals.
1273
1274         If transaction_scale is nonzero, all non-init trial durations
1275         are set to 2.0 (as they do not affect the real trial duration)
1276         and zero intermediate phases are used.
1277         The initial phase still uses 1.0 seconds, to force remeasurement.
1278         That makes initial phase act as a warmup.
1279
1280         :param frame_size: Frame size identifier or value [B].
1281         :param traffic_profile: Module name as a traffic profile identifier.
1282             See GPL/traffic_profiles/trex for implemented modules.
1283         :param minimum_transmit_rate: Minimal load in transactions per second.
1284         :param maximum_transmit_rate: Maximal load in transactions per second.
1285         :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
1286         :param final_relative_width: Final lower bound transmit rate
1287             cannot be more distant that this multiple of upper bound [1].
1288         :param final_trial_duration: Trial duration for the final phase [s].
1289         :param initial_trial_duration: Trial duration for the initial phase
1290             and also for the first intermediate phase [s].
1291         :param number_of_intermediate_phases: Number of intermediate phases
1292             to perform before the final phase [1].
1293         :param timeout: The search will fail itself when not finished
1294             before this overall time [s].
1295         :param doublings: How many doublings to do in external search step.
1296             Default 1 is suitable for fairly stable tests,
1297             less stable tests might get better overal duration with 2 or more.
1298         :param ppta: Packets per transaction, aggregated over directions.
1299             Needed for udp_pps which does not have a good transaction counter,
1300             so we need to compute expected number of packets.
1301             Default: 1.
1302         :param resetter: Callable to reset DUT state for repeated trials.
1303         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1304             Default: 2
1305         :param transaction_duration: Total expected time to close transaction.
1306         :param transaction_scale: Number of transactions to perform.
1307             0 (default) means unlimited.
1308         :param transaction_type: An identifier specifying which counters
1309             and formulas to use when computing attempted and failed
1310             transactions. Default: "packet".
1311         :param use_latency: Whether to measure latency during the trial.
1312             Default: False.
1313         :type frame_size: str or int
1314         :type traffic_profile: str
1315         :type minimum_transmit_rate: float
1316         :type maximum_transmit_rate: float
1317         :type packet_loss_ratio: float
1318         :type final_relative_width: float
1319         :type final_trial_duration: float
1320         :type initial_trial_duration: float
1321         :type number_of_intermediate_phases: int
1322         :type timeout: float
1323         :type doublings: int
1324         :type ppta: int
1325         :type resetter: Optional[Callable[[], None]]
1326         :type traffic_directions: int
1327         :type transaction_duration: float
1328         :type transaction_scale: int
1329         :type transaction_type: str
1330         :type use_latency: bool
1331         :returns: Structure containing narrowed down NDR and PDR intervals
1332             and their measurements.
1333         :rtype: NdrPdrResult
1334         :raises RuntimeError: If total duration is larger than timeout.
1335         """
1336         # we need instance of TrafficGenerator instantiated by Robot Framework
1337         # to be able to use trex_stl-*()
1338         tg_instance = BuiltIn().get_library_instance(
1339             u"resources.libraries.python.TrafficGenerator"
1340         )
1341         # Overrides for fixed transaction amount.
1342         # TODO: Move to robot code? We have two call sites, so this saves space,
1343         #       even though this is surprising for log readers.
1344         if transaction_scale:
1345             initial_trial_duration = 1.0
1346             final_trial_duration = 2.0
1347             number_of_intermediate_phases = 0
1348             timeout = 3600.0
1349         tg_instance.set_rate_provider_defaults(
1350             frame_size=frame_size,
1351             traffic_profile=traffic_profile,
1352             sleep_till_duration=False,
1353             ppta=ppta,
1354             resetter=resetter,
1355             traffic_directions=traffic_directions,
1356             transaction_duration=transaction_duration,
1357             transaction_scale=transaction_scale,
1358             transaction_type=transaction_type,
1359             use_latency=use_latency,
1360         )
1361         algorithm = MultipleLossRatioSearch(
1362             measurer=tg_instance,
1363             final_trial_duration=final_trial_duration,
1364             final_relative_width=final_relative_width,
1365             number_of_intermediate_phases=number_of_intermediate_phases,
1366             initial_trial_duration=initial_trial_duration,
1367             timeout=timeout,
1368             doublings=doublings,
1369         )
1370         result = algorithm.narrow_down_ndr_and_pdr(
1371             min_rate=minimum_transmit_rate,
1372             max_rate=maximum_transmit_rate,
1373             packet_loss_ratio=packet_loss_ratio,
1374         )
1375         return result
1376
1377     @staticmethod
1378     def perform_soak_search(
1379             frame_size,
1380             traffic_profile,
1381             minimum_transmit_rate,
1382             maximum_transmit_rate,
1383             plr_target=1e-7,
1384             tdpt=0.1,
1385             initial_count=50,
1386             timeout=7200.0,
1387             ppta=1,
1388             resetter=None,
1389             trace_enabled=False,
1390             traffic_directions=2,
1391             transaction_duration=0.0,
1392             transaction_scale=0,
1393             transaction_type=u"packet",
1394             use_latency=False,
1395     ):
1396         """Setup initialized TG, perform soak search, return avg and stdev.
1397
1398         :param frame_size: Frame size identifier or value [B].
1399         :param traffic_profile: Module name as a traffic profile identifier.
1400             See GPL/traffic_profiles/trex for implemented modules.
1401         :param minimum_transmit_rate: Minimal load in transactions per second.
1402         :param maximum_transmit_rate: Maximal load in transactions per second.
1403         :param plr_target: Fraction of packets lost to achieve [1].
1404         :param tdpt: Trial duration per trial.
1405             The algorithm linearly increases trial duration with trial number,
1406             this is the increment between succesive trials, in seconds.
1407         :param initial_count: Offset to apply before the first trial.
1408             For example initial_count=50 makes first trial to be 51*tdpt long.
1409             This is needed because initial "search" phase of integrator
1410             takes significant time even without any trial results.
1411         :param timeout: The search will stop after this overall time [s].
1412         :param ppta: Packets per transaction, aggregated over directions.
1413             Needed for udp_pps which does not have a good transaction counter,
1414             so we need to compute expected number of packets.
1415             Default: 1.
1416         :param resetter: Callable to reset DUT state for repeated trials.
1417         :param trace_enabled: True if trace enabled else False.
1418             This is very verbose tracing on numeric computations,
1419             do not use in production.
1420             Default: False
1421         :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
1422             Default: 2
1423         :param transaction_duration: Total expected time to close transaction.
1424         :param transaction_scale: Number of transactions to perform.
1425             0 (default) means unlimited.
1426         :param transaction_type: An identifier specifying which counters
1427             and formulas to use when computing attempted and failed
1428             transactions. Default: "packet".
1429         :param use_latency: Whether to measure latency during the trial.
1430             Default: False.
1431         :type frame_size: str or int
1432         :type traffic_profile: str
1433         :type minimum_transmit_rate: float
1434         :type maximum_transmit_rate: float
1435         :type plr_target: float
1436         :type initial_count: int
1437         :type timeout: float
1438         :type ppta: int
1439         :type resetter: Optional[Callable[[], None]]
1440         :type trace_enabled: bool
1441         :type traffic_directions: int
1442         :type transaction_duration: float
1443         :type transaction_scale: int
1444         :type transaction_type: str
1445         :type use_latency: bool
1446         :returns: Average and stdev of estimated aggregate rate giving PLR.
1447         :rtype: 2-tuple of float
1448         """
1449         tg_instance = BuiltIn().get_library_instance(
1450             u"resources.libraries.python.TrafficGenerator"
1451         )
1452         # Overrides for fixed transaction amount.
1453         # TODO: Move to robot code? We have a single call site
1454         #       but MLRsearch has two and we want the two to be used similarly.
1455         if transaction_scale:
1456             timeout = 7200.0
1457         tg_instance.set_rate_provider_defaults(
1458             frame_size=frame_size,
1459             traffic_profile=traffic_profile,
1460             negative_loss=False,
1461             sleep_till_duration=True,
1462             ppta=ppta,
1463             resetter=resetter,
1464             traffic_directions=traffic_directions,
1465             transaction_duration=transaction_duration,
1466             transaction_scale=transaction_scale,
1467             transaction_type=transaction_type,
1468             use_latency=use_latency,
1469         )
1470         algorithm = PLRsearch(
1471             measurer=tg_instance,
1472             trial_duration_per_trial=tdpt,
1473             packet_loss_ratio_target=plr_target,
1474             trial_number_offset=initial_count,
1475             timeout=timeout,
1476             trace_enabled=trace_enabled,
1477         )
1478         result = algorithm.search(
1479             min_rate=minimum_transmit_rate,
1480             max_rate=maximum_transmit_rate,
1481         )
1482         return result