3 # Copyright (c) 2019 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
25 sys.path.insert(0, "/opt/trex-core-2.35/scripts/automation/"
26 "trex_control_plane/stl/")
28 from trex_stl_lib.api import *
31 def fmt_latency(lat_min, lat_avg, lat_max):
32 """Return formatted, rounded latency.
34 :param lat_min: Min latency
35 :param lat_avg: Average latency
36 :param lat_max: Max latency
40 :return: Formatted and rounded output "min/avg/max"
45 t_min = int(round(float(lat_min)))
49 t_avg = int(round(float(lat_avg)))
53 t_max = int(round(float(lat_max)))
57 return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61 port_1, latency, async_start=False, unidirection=False):
62 """Send traffic and measure packet loss and latency.
65 - reads the given traffic profile with streams,
66 - connects to the T-rex client,
68 - removes all existing streams,
69 - adds streams from the traffic profile to the ports,
70 - if the warm-up time is more than 0, sends the warm-up traffic, reads the
72 - clears the statistics from the client,
74 - waits for the defined time (or runs forever if async mode is defined),
76 - reads and displays the statistics and
77 - disconnects from the client.
79 :param profile_file: A python module with T-rex traffic profile.
80 :param framesize: Frame size.
81 :param duration: Duration of traffic run in seconds (-1=infinite).
82 :param rate: Traffic rate [percentage, pps, bps].
83 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84 :param port_0: Port 0 on the traffic generator.
85 :param port_1: Port 1 on the traffic generator.
86 :param latency: With latency stats.
87 :param async_start: Start the traffic and exit.
88 :param unidirection: Traffic is unidirectional.
89 :type profile_file: str
90 :type framesize: int or str
93 :type warmup_time: float
97 :type async_start: bool
98 :type unidirection: bool
111 print("### Profile file:\n{}".format(profile_file))
112 profile = STLProfile.load(profile_file, direction=0, port_id=0,
114 streams = profile.get_streams()
115 except STLError as err:
116 print("Error while loading profile '{0}' {1}".format(profile_file, err))
121 client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
124 # Prepare our ports (the machine has 0 <--> 1 with static route):
125 client.reset(ports=[port_0, port_1])
126 client.remove_all_streams(ports=[port_0, port_1])
128 if "macsrc" in profile_file:
129 client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
131 if isinstance(framesize, int):
132 client.add_streams(streams[0], ports=[port_0])
134 client.add_streams(streams[1], ports=[port_1])
135 elif isinstance(framesize, str):
136 client.add_streams(streams[0:3], ports=[port_0])
138 client.add_streams(streams[3:6], ports=[port_1])
141 if isinstance(framesize, int):
142 client.add_streams(streams[2], ports=[port_0])
144 client.add_streams(streams[3], ports=[port_1])
145 elif isinstance(framesize, str):
148 # Disable latency if NIC does not support requested stream type
149 print("##### FAILED to add latency streams #####")
156 # Clear the stats before injecting:
159 # Choose rate and start traffic:
160 client.start(ports=ports, mult=rate, duration=warmup_time)
163 client.wait_on_traffic(ports=ports, timeout=warmup_time+30)
165 if client.get_warnings():
166 for warning in client.get_warnings():
169 # Read the stats after the test:
170 stats = client.get_stats()
172 print("##### Warmup statistics #####")
173 print(json.dumps(stats, indent=4, separators=(',', ': '),
176 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
178 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
180 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
181 p_0=port_0, p_1=port_1, v=lost_a))
183 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
184 p_0=port_0, p_1=port_1, v=lost_b))
186 # Clear the stats before injecting:
191 # Choose rate and start traffic:
192 client.start(ports=ports, mult=rate, duration=duration)
196 client.wait_on_traffic(ports=ports, timeout=duration+30)
198 if client.get_warnings():
199 for warning in client.get_warnings():
202 # Read the stats after the test
203 stats = client.get_stats()
205 print("##### Statistics #####")
206 print(json.dumps(stats, indent=4, separators=(',', ': '),
209 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
211 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
215 str(stats["latency"][port_0]["latency"]["total_min"]),
216 str(stats["latency"][port_0]["latency"]["average"]),
217 str(stats["latency"][port_0]["latency"]["total_max"]))
220 str(stats["latency"][port_1]["latency"]["total_min"]),
221 str(stats["latency"][port_1]["latency"]["average"]),
222 str(stats["latency"][port_1]["latency"]["total_max"]))
225 total_sent = stats[0]["opackets"] + stats[1]["opackets"]
226 total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
228 total_sent = stats[port_0]["opackets"]
229 total_rcvd = stats[port_1]["ipackets"]
231 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
232 p_0=port_0, p_1=port_1, v=lost_a))
234 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
235 p_0=port_0, p_1=port_1, v=lost_b))
237 except STLError as err:
238 sys.stderr.write("{0}\n".format(err))
244 client.disconnect(stop_traffic=False, release_ports=True)
248 print("rate={0}, totalReceived={1}, totalSent={2}, "
249 "frameLoss={3}, latencyStream0(usec)={4}, "
250 "latencyStream1(usec)={5}".
251 format(rate, total_rcvd, total_sent, lost_a + lost_b,
256 """Main function for the traffic generator using T-rex.
258 It verifies the given command line arguments and runs "simple_burst"
262 parser = argparse.ArgumentParser()
263 parser.add_argument("-p", "--profile",
266 help="Python traffic profile.")
267 parser.add_argument("-d", "--duration",
270 help="Duration of traffic run.")
271 parser.add_argument("-s", "--frame_size",
273 help="Size of a Frame without padding and IPG.")
274 parser.add_argument("-r", "--rate",
276 help="Traffic rate with included units (%, pps).")
277 parser.add_argument("-w", "--warmup_time",
280 help="Traffic warm-up time in seconds, 0 = disable.")
281 parser.add_argument("--port_0",
284 help="Port 0 on the traffic generator.")
285 parser.add_argument("--port_1",
288 help="Port 1 on the traffic generator.")
289 parser.add_argument("--async",
292 help="Non-blocking call of the script.")
293 parser.add_argument("--latency",
296 help="Add latency stream.")
297 parser.add_argument("--unidirection",
300 help="Send unidirection traffic.")
302 args = parser.parse_args()
305 framesize = int(args.frame_size)
307 framesize = args.frame_size
309 simple_burst(profile_file=args.profile,
310 duration=args.duration,
313 warmup_time=args.warmup_time,
316 latency=args.latency,
317 async_start=args.async,
318 unidirection=args.unidirection)
321 if __name__ == '__main__':