Update T-rex to v2.29
[csit.git] / resources / tools / trex / trex_stateless_profile.py
1 #!/usr/bin/python
2
3 # Copyright (c) 2017 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
18 latency.
19 """
20
21 import sys
22 import argparse
23 import json
24
25 sys.path.insert(0, "/opt/trex-core-2.29/scripts/automation/"
26                    "trex_control_plane/stl/")
27
28 from trex_stl_lib.api import *
29
30
31 def fmt_latency(lat_min, lat_avg, lat_max):
32     """Return formatted, rounded latency.
33
34     :param lat_min: Min latency
35     :param lat_avg: Average latency
36     :param lat_max: Max latency
37     :type lat_min: string
38     :type lat_avg: string
39     :type lat_max: string
40     :return: Formatted and rounded output "min/avg/max"
41     :rtype: string
42     """
43
44     try:
45         t_min = int(round(float(lat_min)))
46     except ValueError:
47         t_min = int(-1)
48     try:
49         t_avg = int(round(float(lat_avg)))
50     except ValueError:
51         t_avg = int(-1)
52     try:
53         t_max = int(round(float(lat_max)))
54     except ValueError:
55         t_max = int(-1)
56
57     return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
58
59
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61                  port_1, latency, async_start=False):
62     """Send the traffic and measure packet loss and latency.
63
64     Procedure:
65      - reads the given traffic profile with streams,
66      - connects to the T-rex client,
67      - resets the ports,
68      - removes all existing streams,
69      - adds streams from the traffic profile to the ports,
70      - if the warm-up time is more than 0, sends the warm-up traffic, reads the
71        statistics,
72      - clears the statistics from the client,
73      - starts the traffic,
74      - waits for the defined time (or runs forever if async mode is defined),
75      - stops the traffic,
76      - reads and displays the statistics and
77      - disconnects from the client.
78
79     :param profile_file: A python module with T-rex traffic profile.
80     :param framesize: Frame size.
81     :param duration: Duration of traffic run in seconds (-1=infinite).
82     :param rate: Traffic rate [percentage, pps, bps].
83     :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84     :param port_0: Port 0 on the traffic generator.
85     :param port_1: Port 1 on the traffic generator.
86     :param latency: With latency stats.
87     :param async_start: Start the traffic and exit.
88     :type profile_file: str
89     :type framesize: int or str
90     :type duration: int
91     :type rate: str
92     :type warmup_time: int
93     :type port_0: int
94     :type port_1: int
95     :type latency: boo;
96     :type async_start: bool
97     """
98
99     client = None
100     total_rcvd = 0
101     total_sent = 0
102     lost_a = 0
103     lost_b = 0
104     lat_a = "-1/-1/-1"
105     lat_b = "-1/-1/-1"
106
107     # Read the profile:
108     try:
109         print("### Profile file:\n{}".format(profile_file))
110         profile = STLProfile.load(profile_file, direction=0, port_id=0,
111                                   framesize=framesize)
112         print("\n### Profiles ###\n")
113         print(profile.dump_to_yaml())
114         streams = profile.get_streams()
115     except STLError:
116         print("Error while loading profile '{0}'\n".format(profile_file))
117         sys.exit(1)
118
119     try:
120         # Create the client:
121         client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
122         # Connect to server:
123         client.connect()
124         # Prepare our ports (the machine has 0 <--> 1 with static route):
125         client.reset(ports=[port_0, port_1])
126         client.remove_all_streams(ports=[port_0, port_1])
127
128         if isinstance(framesize, int):
129             client.add_streams(streams[0], ports=[port_0])
130             client.add_streams(streams[1], ports=[port_1])
131         elif isinstance(framesize, str):
132             client.add_streams(streams[0:3], ports=[port_0])
133             client.add_streams(streams[3:6], ports=[port_1])
134         if latency:
135             try:
136                 if isinstance(framesize, int):
137                     client.add_streams(streams[2], ports=[port_0])
138                     client.add_streams(streams[3], ports=[port_1])
139                 elif isinstance(framesize, str):
140                     latency = False
141             except STLError:
142                 # Disable latency if NIC does not support requested stream type
143                 print("##### FAILED to add latency streams #####")
144                 latency = False
145         # Warm-up phase:
146         if warmup_time > 0:
147             # Clear the stats before injecting:
148             client.clear_stats()
149
150             # Choose rate and start traffic:
151             client.start(ports=[port_0, port_1], mult=rate,
152                          duration=warmup_time)
153
154             # Block until done:
155             client.wait_on_traffic(ports=[port_0, port_1],
156                                    timeout=warmup_time+30)
157
158             if client.get_warnings():
159                 for warning in client.get_warnings():
160                     print(warning)
161
162             # Read the stats after the test:
163             stats = client.get_stats()
164
165             print("##### Warmup statistics #####")
166             print(json.dumps(stats, indent=4, separators=(',', ': '),
167                              sort_keys=True))
168
169             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
170             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
171
172             print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
173             print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
174
175         # Clear the stats before injecting:
176         client.clear_stats()
177         lost_a = 0
178         lost_b = 0
179
180         # Choose rate and start traffic:
181         client.start(ports=[port_0, port_1], mult=rate, duration=duration)
182
183         if not async_start:
184             # Block until done:
185             client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30)
186
187             if client.get_warnings():
188                 for warning in client.get_warnings():
189                     print(warning)
190
191             # Read the stats after the test
192             stats = client.get_stats()
193
194             print("##### Statistics #####")
195             print(json.dumps(stats, indent=4, separators=(',', ': '),
196                              sort_keys=True))
197
198             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
199             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
200
201             if latency:
202                 lat_a = fmt_latency(
203                     str(stats["latency"][0]["latency"]["total_min"]),
204                     str(stats["latency"][0]["latency"]["average"]),
205                     str(stats["latency"][0]["latency"]["total_max"]))
206                 lat_b = fmt_latency(
207                     str(stats["latency"][1]["latency"]["total_min"]),
208                     str(stats["latency"][1]["latency"]["average"]),
209                     str(stats["latency"][1]["latency"]["total_max"]))
210
211             total_sent = stats[0]["opackets"] + stats[1]["opackets"]
212             total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
213
214             print("\npackets lost from 0 --> 1:   {0} pkts".format(lost_a))
215             print("packets lost from 1 --> 0:   {0} pkts".format(lost_b))
216
217     except STLError as err:
218         sys.stderr.write("{0}\n".format(err))
219         sys.exit(1)
220
221     finally:
222         if async_start:
223             if client:
224                 client.disconnect(stop_traffic=False, release_ports=True)
225         else:
226             if client:
227                 client.disconnect()
228             print("rate={0}, totalReceived={1}, totalSent={2}, "
229                   "frameLoss={3}, latencyStream0(usec)={4}, "
230                   "latencyStream1(usec)={5}".
231                   format(rate, total_rcvd, total_sent, lost_a + lost_b,
232                          lat_a, lat_b))
233
234
235 def main():
236     """Main function for the traffic generator using T-rex.
237
238     It verifies the given command line arguments and runs "simple_burst"
239     function.
240     """
241
242     parser = argparse.ArgumentParser()
243     parser.add_argument("-p", "--profile",
244                         required=True,
245                         type=str,
246                         help="Python traffic profile.")
247     parser.add_argument("-d", "--duration",
248                         required=True,
249                         type=int,
250                         help="Duration of traffic run.")
251     parser.add_argument("-s", "--frame_size",
252                         required=True,
253                         help="Size of a Frame without padding and IPG.")
254     parser.add_argument("-r", "--rate",
255                         required=True,
256                         help="Traffic rate with included units (%, pps).")
257     parser.add_argument("-w", "--warmup_time",
258                         type=int,
259                         default=5,
260                         help="Traffic warm-up time in seconds, 0 = disable.")
261     parser.add_argument("--port_0",
262                         required=True,
263                         type=int,
264                         help="Port 0 on the traffic generator.")
265     parser.add_argument("--port_1",
266                         required=True,
267                         type=int,
268                         help="Port 1 on the traffic generator.")
269     parser.add_argument("--async",
270                         action="store_true",
271                         default=False,
272                         help="Non-blocking call of the script.")
273     parser.add_argument("--latency",
274                         action="store_true",
275                         default=False,
276                         help="Add latency stream")
277     args = parser.parse_args()
278
279     try:
280         framesize = int(args.frame_size)
281     except ValueError:
282         framesize = args.frame_size
283
284     simple_burst(profile_file=args.profile,
285                  duration=int(args.duration),
286                  framesize=framesize,
287                  rate=args.rate,
288                  warmup_time=int(args.warmup_time),
289                  port_0=int(args.port_0),
290                  port_1=int(args.port_1),
291                  latency=args.latency,
292                  async_start=args.async)
293
294
295 if __name__ == '__main__':
296     main()

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.