Upgrade to T-rex v2.34
[csit.git] / resources / tools / trex / trex_stateless_profile.py
1 #!/usr/bin/python
2
3 # Copyright (c) 2017 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
18 latency.
19 """
20
21 import sys
22 import argparse
23 import json
24
25 sys.path.insert(0, "/opt/trex-core-2.34/scripts/automation/"
26                    "trex_control_plane/stl/")
27
28 from trex_stl_lib.api import *
29
30
31 def fmt_latency(lat_min, lat_avg, lat_max):
32     """Return formatted, rounded latency.
33
34     :param lat_min: Min latency
35     :param lat_avg: Average latency
36     :param lat_max: Max latency
37     :type lat_min: string
38     :type lat_avg: string
39     :type lat_max: string
40     :return: Formatted and rounded output "min/avg/max"
41     :rtype: string
42     """
43
44     try:
45         t_min = int(round(float(lat_min)))
46     except ValueError:
47         t_min = int(-1)
48     try:
49         t_avg = int(round(float(lat_avg)))
50     except ValueError:
51         t_avg = int(-1)
52     try:
53         t_max = int(round(float(lat_max)))
54     except ValueError:
55         t_max = int(-1)
56
57     return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
58
59
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61                  port_1, latency, async_start=False):
62     """Send the traffic and measure packet loss and latency.
63
64     Procedure:
65      - reads the given traffic profile with streams,
66      - connects to the T-rex client,
67      - resets the ports,
68      - removes all existing streams,
69      - adds streams from the traffic profile to the ports,
70      - if the warm-up time is more than 0, sends the warm-up traffic, reads the
71        statistics,
72      - clears the statistics from the client,
73      - starts the traffic,
74      - waits for the defined time (or runs forever if async mode is defined),
75      - stops the traffic,
76      - reads and displays the statistics and
77      - disconnects from the client.
78
79     :param profile_file: A python module with T-rex traffic profile.
80     :param framesize: Frame size.
81     :param duration: Duration of traffic run in seconds (-1=infinite).
82     :param rate: Traffic rate [percentage, pps, bps].
83     :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84     :param port_0: Port 0 on the traffic generator.
85     :param port_1: Port 1 on the traffic generator.
86     :param latency: With latency stats.
87     :param async_start: Start the traffic and exit.
88     :type profile_file: str
89     :type framesize: int or str
90     :type duration: int
91     :type rate: str
92     :type warmup_time: int
93     :type port_0: int
94     :type port_1: int
95     :type latency: boo;
96     :type async_start: bool
97     """
98
99     client = None
100     total_rcvd = 0
101     total_sent = 0
102     lost_a = 0
103     lost_b = 0
104     lat_a = "-1/-1/-1"
105     lat_b = "-1/-1/-1"
106
107     # Read the profile:
108     try:
109         print("### Profile file:\n{}".format(profile_file))
110         profile = STLProfile.load(profile_file, direction=0, port_id=0,
111                                   framesize=framesize)
112         streams = profile.get_streams()
113     except STLError as err:
114         print("Error while loading profile '{0}' {1}".format(profile_file, err))
115         sys.exit(1)
116
117     try:
118         # Create the client:
119         client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
120         # Connect to server:
121         client.connect()
122         # Prepare our ports (the machine has 0 <--> 1 with static route):
123         client.reset(ports=[port_0, port_1])
124         client.remove_all_streams(ports=[port_0, port_1])
125
126         if "macsrc" in profile_file:
127             client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
128                                  resolve=False)
129         if isinstance(framesize, int):
130             client.add_streams(streams[0], ports=[port_0])
131             client.add_streams(streams[1], ports=[port_1])
132         elif isinstance(framesize, str):
133             client.add_streams(streams[0:3], ports=[port_0])
134             client.add_streams(streams[3:6], ports=[port_1])
135         if latency:
136             try:
137                 if isinstance(framesize, int):
138                     client.add_streams(streams[2], ports=[port_0])
139                     client.add_streams(streams[3], ports=[port_1])
140                 elif isinstance(framesize, str):
141                     latency = False
142             except STLError:
143                 # Disable latency if NIC does not support requested stream type
144                 print("##### FAILED to add latency streams #####")
145                 latency = False
146         # Warm-up phase:
147         if warmup_time > 0:
148             # Clear the stats before injecting:
149             client.clear_stats()
150
151             # Choose rate and start traffic:
152             client.start(ports=[port_0, port_1], mult=rate,
153                          duration=warmup_time)
154
155             # Block until done:
156             client.wait_on_traffic(ports=[port_0, port_1],
157                                    timeout=warmup_time+30)
158
159             if client.get_warnings():
160                 for warning in client.get_warnings():
161                     print(warning)
162
163             # Read the stats after the test:
164             stats = client.get_stats()
165
166             print("##### Warmup statistics #####")
167             print(json.dumps(stats, indent=4, separators=(',', ': '),
168                              sort_keys=True))
169
170             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
171             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
172
173             print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
174             print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
175
176         # Clear the stats before injecting:
177         client.clear_stats()
178         lost_a = 0
179         lost_b = 0
180
181         # Choose rate and start traffic:
182         client.start(ports=[port_0, port_1], mult=rate, duration=duration)
183
184         if not async_start:
185             # Block until done:
186             client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30)
187
188             if client.get_warnings():
189                 for warning in client.get_warnings():
190                     print(warning)
191
192             # Read the stats after the test
193             stats = client.get_stats()
194
195             print("##### Statistics #####")
196             print(json.dumps(stats, indent=4, separators=(',', ': '),
197                              sort_keys=True))
198
199             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
200             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
201
202             if latency:
203                 lat_a = fmt_latency(
204                     str(stats["latency"][0]["latency"]["total_min"]),
205                     str(stats["latency"][0]["latency"]["average"]),
206                     str(stats["latency"][0]["latency"]["total_max"]))
207                 lat_b = fmt_latency(
208                     str(stats["latency"][1]["latency"]["total_min"]),
209                     str(stats["latency"][1]["latency"]["average"]),
210                     str(stats["latency"][1]["latency"]["total_max"]))
211
212             total_sent = stats[0]["opackets"] + stats[1]["opackets"]
213             total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
214
215             print("\npackets lost from 0 --> 1:   {0} pkts".format(lost_a))
216             print("packets lost from 1 --> 0:   {0} pkts".format(lost_b))
217
218     except STLError as err:
219         sys.stderr.write("{0}\n".format(err))
220         sys.exit(1)
221
222     finally:
223         if async_start:
224             if client:
225                 client.disconnect(stop_traffic=False, release_ports=True)
226         else:
227             if client:
228                 client.disconnect()
229             print("rate={0}, totalReceived={1}, totalSent={2}, "
230                   "frameLoss={3}, latencyStream0(usec)={4}, "
231                   "latencyStream1(usec)={5}".
232                   format(rate, total_rcvd, total_sent, lost_a + lost_b,
233                          lat_a, lat_b))
234
235
236 def main():
237     """Main function for the traffic generator using T-rex.
238
239     It verifies the given command line arguments and runs "simple_burst"
240     function.
241     """
242
243     parser = argparse.ArgumentParser()
244     parser.add_argument("-p", "--profile",
245                         required=True,
246                         type=str,
247                         help="Python traffic profile.")
248     parser.add_argument("-d", "--duration",
249                         required=True,
250                         type=int,
251                         help="Duration of traffic run.")
252     parser.add_argument("-s", "--frame_size",
253                         required=True,
254                         help="Size of a Frame without padding and IPG.")
255     parser.add_argument("-r", "--rate",
256                         required=True,
257                         help="Traffic rate with included units (%, pps).")
258     parser.add_argument("-w", "--warmup_time",
259                         type=int,
260                         default=5,
261                         help="Traffic warm-up time in seconds, 0 = disable.")
262     parser.add_argument("--port_0",
263                         required=True,
264                         type=int,
265                         help="Port 0 on the traffic generator.")
266     parser.add_argument("--port_1",
267                         required=True,
268                         type=int,
269                         help="Port 1 on the traffic generator.")
270     parser.add_argument("--async",
271                         action="store_true",
272                         default=False,
273                         help="Non-blocking call of the script.")
274     parser.add_argument("--latency",
275                         action="store_true",
276                         default=False,
277                         help="Add latency stream")
278     args = parser.parse_args()
279
280     try:
281         framesize = int(args.frame_size)
282     except ValueError:
283         framesize = args.frame_size
284
285     simple_burst(profile_file=args.profile,
286                  duration=int(args.duration),
287                  framesize=framesize,
288                  rate=args.rate,
289                  warmup_time=int(args.warmup_time),
290                  port_0=int(args.port_0),
291                  port_1=int(args.port_1),
292                  latency=args.latency,
293                  async_start=args.async)
294
295
296 if __name__ == '__main__':
297     main()

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.