3 # Copyright (c) 2017 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
25 sys.path.insert(0, "/opt/trex-core-2.35/scripts/automation/"
26 "trex_control_plane/stl/")
28 from trex_stl_lib.api import *
31 def fmt_latency(lat_min, lat_avg, lat_max):
32 """Return formatted, rounded latency.
34 :param lat_min: Min latency
35 :param lat_avg: Average latency
36 :param lat_max: Max latency
40 :return: Formatted and rounded output "min/avg/max"
45 t_min = int(round(float(lat_min)))
49 t_avg = int(round(float(lat_avg)))
53 t_max = int(round(float(lat_max)))
57 return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61 port_1, latency, async_start=False, unidirection=False):
62 """Send traffic and measure packet loss and latency.
65 - reads the given traffic profile with streams,
66 - connects to the T-rex client,
68 - removes all existing streams,
69 - adds streams from the traffic profile to the ports,
70 - if the warm-up time is more than 0, sends the warm-up traffic, reads the
72 - clears the statistics from the client,
74 - waits for the defined time (or runs forever if async mode is defined),
76 - reads and displays the statistics and
77 - disconnects from the client.
79 :param profile_file: A python module with T-rex traffic profile.
80 :param framesize: Frame size.
81 :param duration: Duration of traffic run in seconds (-1=infinite).
82 :param rate: Traffic rate [percentage, pps, bps].
83 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84 :param port_0: Port 0 on the traffic generator.
85 :param port_1: Port 1 on the traffic generator.
86 :param latency: With latency stats.
87 :param async_start: Start the traffic and exit.
88 :param unidirection: Traffic is unidirectional.
89 :type profile_file: str
90 :type framesize: int or str
93 :type warmup_time: float
97 :type async_start: bool
98 :type unidirection: bool
101 #unidirection traffic
103 send_traffic_unidirection(profile_file, duration, framesize, rate,
104 warmup_time, port_0, port_1, latency,
108 send_traffic_bidirection(profile_file, duration, framesize, rate,
109 warmup_time, port_0, port_1, latency,
113 def send_traffic_bidirection(profile_file, duration, framesize, rate,
114 warmup_time, port_0, port_1, latency,
116 """Send traffic bidirection and measure packet loss and latency.
118 :param profile_file: A python module with T-rex traffic profile.
119 :param framesize: Frame size.
120 :param duration: Duration of traffic run in seconds (-1=infinite).
121 :param rate: Traffic rate [percentage, pps, bps].
122 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
123 :param port_0: Port 0 on the traffic generator.
124 :param port_1: Port 1 on the traffic generator.
125 :param latency: With latency stats.
126 :param async_start: Start the traffic and exit.
127 :type profile_file: str
128 :type framesize: int or str
129 :type duration: float
131 :type warmup_time: float
135 :type async_start: bool
148 print("### Profile file:\n{}".format(profile_file))
149 profile = STLProfile.load(profile_file, direction=0, port_id=0,
151 streams = profile.get_streams()
152 except STLError as err:
153 print("Error while loading profile '{0}' {1}".format(profile_file, err))
158 client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
161 # Prepare our ports (the machine has 0 <--> 1 with static route):
162 client.reset(ports=[port_0, port_1])
163 client.remove_all_streams(ports=[port_0, port_1])
165 if "macsrc" in profile_file:
166 client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
168 if isinstance(framesize, int):
169 client.add_streams(streams[0], ports=[port_0])
170 client.add_streams(streams[1], ports=[port_1])
171 elif isinstance(framesize, str):
172 client.add_streams(streams[0:3], ports=[port_0])
173 client.add_streams(streams[3:6], ports=[port_1])
176 if isinstance(framesize, int):
177 client.add_streams(streams[2], ports=[port_0])
178 client.add_streams(streams[3], ports=[port_1])
179 elif isinstance(framesize, str):
182 # Disable latency if NIC does not support requested stream type
183 print("##### FAILED to add latency streams #####")
187 # Clear the stats before injecting:
190 # Choose rate and start traffic:
191 client.start(ports=[port_0, port_1], mult=rate,
192 duration=warmup_time)
195 client.wait_on_traffic(ports=[port_0, port_1],
196 timeout=warmup_time+30)
198 if client.get_warnings():
199 for warning in client.get_warnings():
202 # Read the stats after the test:
203 stats = client.get_stats()
205 print("##### Warmup statistics #####")
206 print(json.dumps(stats, indent=4, separators=(',', ': '),
209 lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
210 lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
212 print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
213 print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
215 # Clear the stats before injecting:
220 # Choose rate and start traffic:
221 client.start(ports=[port_0, port_1], mult=rate, duration=duration)
225 client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30)
227 if client.get_warnings():
228 for warning in client.get_warnings():
231 # Read the stats after the test
232 stats = client.get_stats()
234 print("##### Statistics #####")
235 print(json.dumps(stats, indent=4, separators=(',', ': '),
238 lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
239 lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
243 str(stats["latency"][0]["latency"]["total_min"]),
244 str(stats["latency"][0]["latency"]["average"]),
245 str(stats["latency"][0]["latency"]["total_max"]))
247 str(stats["latency"][1]["latency"]["total_min"]),
248 str(stats["latency"][1]["latency"]["average"]),
249 str(stats["latency"][1]["latency"]["total_max"]))
251 total_sent = stats[0]["opackets"] + stats[1]["opackets"]
252 total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
254 print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
255 print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
257 except STLError as err:
258 sys.stderr.write("{0}\n".format(err))
264 client.disconnect(stop_traffic=False, release_ports=True)
268 print("rate={0}, totalReceived={1}, totalSent={2}, "
269 "frameLoss={3}, latencyStream0(usec)={4}, "
270 "latencyStream1(usec)={5}".
271 format(rate, total_rcvd, total_sent, lost_a + lost_b,
275 def send_traffic_unidirection(profile_file, duration, framesize, rate,
276 warmup_time, port_0, port_1, latency,
278 """Send traffic unidirection and measure packet loss and latency.
280 :param profile_file: A python module with T-rex traffic profile.
281 :param framesize: Frame size.
282 :param duration: Duration of traffic run in seconds (-1=infinite).
283 :param rate: Traffic rate [percentage, pps, bps].
284 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
285 :param port_0: Port 0 on the traffic generator.
286 :param port_1: Port 1 on the traffic generator.
287 :param latency: With latency stats.
288 :param async_start: Start the traffic and exit.
289 :type profile_file: str
290 :type framesize: int or str
291 :type duration: float
293 :type warmup_time: float
297 :type async_start: bool
308 print("### Profile file:\n{}".format(profile_file))
309 profile = STLProfile.load(profile_file, direction=0, port_id=0,
311 streams = profile.get_streams()
312 except STLError as err:
313 print("Error while loading profile '{0}' {1}".format(profile_file, err))
318 client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
323 client.reset(ports=[port_0])
324 client.remove_all_streams(ports=[port_0])
326 if "macsrc" in profile_file:
327 client.set_port_attr(ports=[port_0], promiscuous=True,
330 client.reset(ports=[port_0, port_1])
331 client.remove_all_streams(ports=[port_0, port_1])
333 if "macsrc" in profile_file:
334 client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
337 if isinstance(framesize, int):
338 client.add_streams(streams[0], ports=[port_0])
339 elif isinstance(framesize, str):
340 client.add_streams(streams[0:3], ports=[port_0])
343 if isinstance(framesize, int):
344 client.add_streams(streams[2], ports=[port_0])
345 elif isinstance(framesize, str):
348 # Disable latency if NIC does not support requested stream type
349 print("##### FAILED to add latency streams #####")
354 # Clear the stats before injecting:
357 # Choose rate and start traffic:
358 client.start(ports=[port_0], mult=rate,
359 duration=warmup_time)
362 client.wait_on_traffic(ports=[port_0],
363 timeout=warmup_time+30)
365 if client.get_warnings():
366 for warning in client.get_warnings():
369 # Read the stats after the test:
370 stats = client.get_stats()
372 print("##### Warmup statistics #####")
373 print(json.dumps(stats, indent=4, separators=(',', ': '),
376 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
377 print("\npackets lost : {0} pkts".format(lost_a))
379 # Clear the stats before injecting:
383 # Choose rate and start traffic:
384 client.start(ports=[port_0], mult=rate, duration=duration)
388 client.wait_on_traffic(ports=[port_0], timeout=duration+30)
390 if client.get_warnings():
391 for warning in client.get_warnings():
394 # Read the stats after the test
395 stats = client.get_stats()
397 print("##### Statistics #####")
398 print(json.dumps(stats, indent=4, separators=(',', ': '),
401 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
405 str(stats["latency"][0]["latency"]["total_min"]),
406 str(stats["latency"][0]["latency"]["average"]),
407 str(stats["latency"][0]["latency"]["total_max"]))
409 total_sent = stats[port_0]["opackets"]
410 total_rcvd = stats[port_1]["ipackets"]
412 print("\npackets lost : {0} pkts".format(lost_a))
414 except STLError as err:
415 sys.stderr.write("{0}\n".format(err))
421 client.disconnect(stop_traffic=False, release_ports=True)
425 print("rate={0}, totalReceived={1}, totalSent={2}, "
426 "frameLoss={3}, latencyStream0(usec)={4}".
427 format(rate, total_rcvd, total_sent, lost_a, lat_a))
431 """Main function for the traffic generator using T-rex.
433 It verifies the given command line arguments and runs "simple_burst"
437 parser = argparse.ArgumentParser()
438 parser.add_argument("-p", "--profile",
441 help="Python traffic profile.")
442 parser.add_argument("-d", "--duration",
445 help="Duration of traffic run.")
446 parser.add_argument("-s", "--frame_size",
448 help="Size of a Frame without padding and IPG.")
449 parser.add_argument("-r", "--rate",
451 help="Traffic rate with included units (%, pps).")
452 parser.add_argument("-w", "--warmup_time",
455 help="Traffic warm-up time in seconds, 0 = disable.")
456 parser.add_argument("--port_0",
459 help="Port 0 on the traffic generator.")
460 parser.add_argument("--port_1",
463 help="Port 1 on the traffic generator.")
464 parser.add_argument("--async",
467 help="Non-blocking call of the script.")
468 parser.add_argument("--latency",
471 help="Add latency stream.")
472 parser.add_argument("--unidirection",
475 help="Send unidirection traffic.")
477 args = parser.parse_args()
480 framesize = int(args.frame_size)
482 framesize = args.frame_size
484 simple_burst(profile_file=args.profile,
485 duration=args.duration,
488 warmup_time=args.warmup_time,
491 latency=args.latency,
492 async_start=args.async,
493 unidirection=args.unidirection)
496 if __name__ == '__main__':