3 # Copyright (c) 2019 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
25 sys.path.insert(0, "/opt/trex-core-2.54/scripts/automation/"
26 "trex_control_plane/interactive/")
27 from trex.stl.api import *
30 def fmt_latency(lat_min, lat_avg, lat_max):
31 """Return formatted, rounded latency.
33 :param lat_min: Min latency
34 :param lat_avg: Average latency
35 :param lat_max: Max latency
39 :return: Formatted and rounded output "min/avg/max"
44 t_min = int(round(float(lat_min)))
48 t_avg = int(round(float(lat_avg)))
52 t_max = int(round(float(lat_max)))
56 return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
59 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
60 port_1, latency, async_start=False, unidirection=False):
61 """Send traffic and measure packet loss and latency.
64 - reads the given traffic profile with streams,
65 - connects to the T-rex client,
67 - removes all existing streams,
68 - adds streams from the traffic profile to the ports,
69 - if the warm-up time is more than 0, sends the warm-up traffic, reads the
71 - clears the statistics from the client,
73 - waits for the defined time (or runs forever if async mode is defined),
75 - reads and displays the statistics and
76 - disconnects from the client.
78 :param profile_file: A python module with T-rex traffic profile.
79 :param framesize: Frame size.
80 :param duration: Duration of traffic run in seconds (-1=infinite).
81 :param rate: Traffic rate [percentage, pps, bps].
82 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
83 :param port_0: Port 0 on the traffic generator.
84 :param port_1: Port 1 on the traffic generator.
85 :param latency: With latency stats.
86 :param async_start: Start the traffic and exit.
87 :param unidirection: Traffic is unidirectional.
88 :type profile_file: str
89 :type framesize: int or str
92 :type warmup_time: float
96 :type async_start: bool
97 :type unidirection: bool
110 print("### Profile file:\n{}".format(profile_file))
111 profile = STLProfile.load(profile_file, direction=0, port_id=0,
113 streams = profile.get_streams()
114 except STLError as err:
115 print("Error while loading profile '{0}' {1}".format(profile_file, err))
123 # Prepare our ports (the machine has 0 <--> 1 with static route):
124 client.reset(ports=[port_0, port_1])
125 client.remove_all_streams(ports=[port_0, port_1])
127 if "macsrc" in profile_file:
128 client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
130 if isinstance(framesize, int):
131 client.add_streams(streams[0], ports=[port_0])
133 client.add_streams(streams[1], ports=[port_1])
134 elif isinstance(framesize, str):
135 client.add_streams(streams[0:3], ports=[port_0])
137 client.add_streams(streams[3:6], ports=[port_1])
140 if isinstance(framesize, int):
141 client.add_streams(streams[2], ports=[port_0])
143 client.add_streams(streams[3], ports=[port_1])
144 elif isinstance(framesize, str):
147 # Disable latency if NIC does not support requested stream type
148 print("##### FAILED to add latency streams #####")
155 # Clear the stats before injecting:
158 # Choose rate and start traffic:
159 client.start(ports=ports, mult=rate, duration=warmup_time)
162 client.wait_on_traffic(ports=ports, timeout=warmup_time+30)
164 if client.get_warnings():
165 for warning in client.get_warnings():
168 # Read the stats after the test:
169 stats = client.get_stats()
171 print("##### Warmup statistics #####")
172 print(json.dumps(stats, indent=4, separators=(',', ': '),
175 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
177 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
179 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
180 p_0=port_0, p_1=port_1, v=lost_a))
182 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
183 p_0=port_0, p_1=port_1, v=lost_b))
185 # Clear the stats before injecting:
190 # Choose rate and start traffic:
191 client.start(ports=ports, mult=rate, duration=duration)
195 client.wait_on_traffic(ports=ports, timeout=duration+30)
197 if client.get_warnings():
198 for warning in client.get_warnings():
201 # Read the stats after the test
202 stats = client.get_stats()
204 print("##### Statistics #####")
205 print(json.dumps(stats, indent=4, separators=(',', ': '),
208 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
210 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
214 str(stats["latency"][port_0]["latency"]["total_min"]),
215 str(stats["latency"][port_0]["latency"]["average"]),
216 str(stats["latency"][port_0]["latency"]["total_max"]))
219 str(stats["latency"][port_1]["latency"]["total_min"]),
220 str(stats["latency"][port_1]["latency"]["average"]),
221 str(stats["latency"][port_1]["latency"]["total_max"]))
224 total_sent = stats[0]["opackets"] + stats[1]["opackets"]
225 total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
227 total_sent = stats[port_0]["opackets"]
228 total_rcvd = stats[port_1]["ipackets"]
230 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
231 p_0=port_0, p_1=port_1, v=lost_a))
233 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
234 p_0=port_0, p_1=port_1, v=lost_b))
236 except STLError as err:
237 sys.stderr.write("{0}\n".format(err))
243 client.disconnect(stop_traffic=False, release_ports=True)
247 if isinstance(rate, unicode):
248 rate = rate.encode("utf-8")
249 if isinstance(duration, unicode):
250 duration = duration.encode("utf-8")
251 print("rate={0!r}, totalReceived={1}, totalSent={2}, "
252 "frameLoss={3}, latencyStream0(usec)={4}, "
253 "latencyStream1(usec)={5}, targetDuration={d!r}".
254 format(rate, total_rcvd, total_sent, lost_a + lost_b,
255 lat_a, lat_b, d=duration))
259 """Main function for the traffic generator using T-rex.
261 It verifies the given command line arguments and runs "simple_burst"
265 parser = argparse.ArgumentParser()
266 parser.add_argument("-p", "--profile",
269 help="Python traffic profile.")
270 parser.add_argument("-d", "--duration",
273 help="Duration of traffic run.")
274 parser.add_argument("-s", "--frame_size",
276 help="Size of a Frame without padding and IPG.")
277 parser.add_argument("-r", "--rate",
279 help="Traffic rate with included units (%, pps).")
280 parser.add_argument("-w", "--warmup_time",
283 help="Traffic warm-up time in seconds, 0 = disable.")
284 parser.add_argument("--port_0",
287 help="Port 0 on the traffic generator.")
288 parser.add_argument("--port_1",
291 help="Port 1 on the traffic generator.")
292 parser.add_argument("--async",
295 help="Non-blocking call of the script.")
296 parser.add_argument("--latency",
299 help="Add latency stream.")
300 parser.add_argument("--unidirection",
303 help="Send unidirection traffic.")
305 args = parser.parse_args()
308 framesize = int(args.frame_size)
310 framesize = args.frame_size
312 simple_burst(profile_file=args.profile,
313 duration=args.duration,
316 warmup_time=args.warmup_time,
319 latency=args.latency,
320 async_start=args.async,
321 unidirection=args.unidirection)
324 if __name__ == '__main__':