CSIT-786 L2FIB scale testing
[csit.git] / resources / tools / trex / trex_stateless_profile.py
1 #!/usr/bin/python
2
3 # Copyright (c) 2017 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
18 latency.
19 """
20
21 import sys
22 import argparse
23 import json
24
25 sys.path.insert(0, "/opt/trex-core-2.29/scripts/automation/"
26                    "trex_control_plane/stl/")
27
28 from trex_stl_lib.api import *
29
30
31 def fmt_latency(lat_min, lat_avg, lat_max):
32     """Return formatted, rounded latency.
33
34     :param lat_min: Min latency
35     :param lat_avg: Average latency
36     :param lat_max: Max latency
37     :type lat_min: string
38     :type lat_avg: string
39     :type lat_max: string
40     :return: Formatted and rounded output "min/avg/max"
41     :rtype: string
42     """
43
44     try:
45         t_min = int(round(float(lat_min)))
46     except ValueError:
47         t_min = int(-1)
48     try:
49         t_avg = int(round(float(lat_avg)))
50     except ValueError:
51         t_avg = int(-1)
52     try:
53         t_max = int(round(float(lat_max)))
54     except ValueError:
55         t_max = int(-1)
56
57     return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
58
59
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61                  port_1, latency, async_start=False):
62     """Send the traffic and measure packet loss and latency.
63
64     Procedure:
65      - reads the given traffic profile with streams,
66      - connects to the T-rex client,
67      - resets the ports,
68      - removes all existing streams,
69      - adds streams from the traffic profile to the ports,
70      - if the warm-up time is more than 0, sends the warm-up traffic, reads the
71        statistics,
72      - clears the statistics from the client,
73      - starts the traffic,
74      - waits for the defined time (or runs forever if async mode is defined),
75      - stops the traffic,
76      - reads and displays the statistics and
77      - disconnects from the client.
78
79     :param profile_file: A python module with T-rex traffic profile.
80     :param framesize: Frame size.
81     :param duration: Duration of traffic run in seconds (-1=infinite).
82     :param rate: Traffic rate [percentage, pps, bps].
83     :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84     :param port_0: Port 0 on the traffic generator.
85     :param port_1: Port 1 on the traffic generator.
86     :param latency: With latency stats.
87     :param async_start: Start the traffic and exit.
88     :type profile_file: str
89     :type framesize: int or str
90     :type duration: int
91     :type rate: str
92     :type warmup_time: int
93     :type port_0: int
94     :type port_1: int
95     :type latency: boo;
96     :type async_start: bool
97     """
98
99     client = None
100     total_rcvd = 0
101     total_sent = 0
102     lost_a = 0
103     lost_b = 0
104     lat_a = "-1/-1/-1"
105     lat_b = "-1/-1/-1"
106
107     # Read the profile:
108     try:
109         print("### Profile file:\n{}".format(profile_file))
110         profile = STLProfile.load(profile_file, direction=0, port_id=0,
111                                   framesize=framesize)
112         print("\n### Profiles ###\n")
113         print(profile.dump_to_yaml())
114         streams = profile.get_streams()
115     except STLError:
116         print("Error while loading profile '{0}'\n".format(profile_file))
117         sys.exit(1)
118
119     try:
120         # Create the client:
121         client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
122         # Connect to server:
123         client.connect()
124         # Prepare our ports (the machine has 0 <--> 1 with static route):
125         client.reset(ports=[port_0, port_1])
126         client.remove_all_streams(ports=[port_0, port_1])
127
128         if "macsrc" in profile_file:
129             client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
130                                  resolve=False)
131         if isinstance(framesize, int):
132             client.add_streams(streams[0], ports=[port_0])
133             client.add_streams(streams[1], ports=[port_1])
134         elif isinstance(framesize, str):
135             client.add_streams(streams[0:3], ports=[port_0])
136             client.add_streams(streams[3:6], ports=[port_1])
137         if latency:
138             try:
139                 if isinstance(framesize, int):
140                     client.add_streams(streams[2], ports=[port_0])
141                     client.add_streams(streams[3], ports=[port_1])
142                 elif isinstance(framesize, str):
143                     latency = False
144             except STLError:
145                 # Disable latency if NIC does not support requested stream type
146                 print("##### FAILED to add latency streams #####")
147                 latency = False
148         # Warm-up phase:
149         if warmup_time > 0:
150             # Clear the stats before injecting:
151             client.clear_stats()
152
153             # Choose rate and start traffic:
154             client.start(ports=[port_0, port_1], mult=rate,
155                          duration=warmup_time)
156
157             # Block until done:
158             client.wait_on_traffic(ports=[port_0, port_1],
159                                    timeout=warmup_time+30)
160
161             if client.get_warnings():
162                 for warning in client.get_warnings():
163                     print(warning)
164
165             # Read the stats after the test:
166             stats = client.get_stats()
167
168             print("##### Warmup statistics #####")
169             print(json.dumps(stats, indent=4, separators=(',', ': '),
170                              sort_keys=True))
171
172             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
173             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
174
175             print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
176             print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
177
178         # Clear the stats before injecting:
179         client.clear_stats()
180         lost_a = 0
181         lost_b = 0
182
183         # Choose rate and start traffic:
184         client.start(ports=[port_0, port_1], mult=rate, duration=duration)
185
186         if not async_start:
187             # Block until done:
188             client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30)
189
190             if client.get_warnings():
191                 for warning in client.get_warnings():
192                     print(warning)
193
194             # Read the stats after the test
195             stats = client.get_stats()
196
197             print("##### Statistics #####")
198             print(json.dumps(stats, indent=4, separators=(',', ': '),
199                              sort_keys=True))
200
201             lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
202             lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
203
204             if latency:
205                 lat_a = fmt_latency(
206                     str(stats["latency"][0]["latency"]["total_min"]),
207                     str(stats["latency"][0]["latency"]["average"]),
208                     str(stats["latency"][0]["latency"]["total_max"]))
209                 lat_b = fmt_latency(
210                     str(stats["latency"][1]["latency"]["total_min"]),
211                     str(stats["latency"][1]["latency"]["average"]),
212                     str(stats["latency"][1]["latency"]["total_max"]))
213
214             total_sent = stats[0]["opackets"] + stats[1]["opackets"]
215             total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
216
217             print("\npackets lost from 0 --> 1:   {0} pkts".format(lost_a))
218             print("packets lost from 1 --> 0:   {0} pkts".format(lost_b))
219
220     except STLError as err:
221         sys.stderr.write("{0}\n".format(err))
222         sys.exit(1)
223
224     finally:
225         if async_start:
226             if client:
227                 client.disconnect(stop_traffic=False, release_ports=True)
228         else:
229             if client:
230                 client.disconnect()
231             print("rate={0}, totalReceived={1}, totalSent={2}, "
232                   "frameLoss={3}, latencyStream0(usec)={4}, "
233                   "latencyStream1(usec)={5}".
234                   format(rate, total_rcvd, total_sent, lost_a + lost_b,
235                          lat_a, lat_b))
236
237
238 def main():
239     """Main function for the traffic generator using T-rex.
240
241     It verifies the given command line arguments and runs "simple_burst"
242     function.
243     """
244
245     parser = argparse.ArgumentParser()
246     parser.add_argument("-p", "--profile",
247                         required=True,
248                         type=str,
249                         help="Python traffic profile.")
250     parser.add_argument("-d", "--duration",
251                         required=True,
252                         type=int,
253                         help="Duration of traffic run.")
254     parser.add_argument("-s", "--frame_size",
255                         required=True,
256                         help="Size of a Frame without padding and IPG.")
257     parser.add_argument("-r", "--rate",
258                         required=True,
259                         help="Traffic rate with included units (%, pps).")
260     parser.add_argument("-w", "--warmup_time",
261                         type=int,
262                         default=5,
263                         help="Traffic warm-up time in seconds, 0 = disable.")
264     parser.add_argument("--port_0",
265                         required=True,
266                         type=int,
267                         help="Port 0 on the traffic generator.")
268     parser.add_argument("--port_1",
269                         required=True,
270                         type=int,
271                         help="Port 1 on the traffic generator.")
272     parser.add_argument("--async",
273                         action="store_true",
274                         default=False,
275                         help="Non-blocking call of the script.")
276     parser.add_argument("--latency",
277                         action="store_true",
278                         default=False,
279                         help="Add latency stream")
280     args = parser.parse_args()
281
282     try:
283         framesize = int(args.frame_size)
284     except ValueError:
285         framesize = args.frame_size
286
287     simple_burst(profile_file=args.profile,
288                  duration=int(args.duration),
289                  framesize=framesize,
290                  rate=args.rate,
291                  warmup_time=int(args.warmup_time),
292                  port_0=int(args.port_0),
293                  port_1=int(args.port_1),
294                  latency=args.latency,
295                  async_start=args.async)
296
297
298 if __name__ == '__main__':
299     main()

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.