3 # Copyright (c) 2019 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
25 sys.path.insert(0, "/opt/trex-core-2.61/scripts/automation/"
26 "trex_control_plane/interactive/")
27 from trex.stl.api import *
30 def fmt_latency(lat_min, lat_avg, lat_max):
31 """Return formatted, rounded latency.
33 :param lat_min: Min latency
34 :param lat_avg: Average latency
35 :param lat_max: Max latency
39 :return: Formatted and rounded output "min/avg/max"
43 t_min = int(round(float(lat_min)))
47 t_avg = int(round(float(lat_avg)))
51 t_max = int(round(float(lat_max)))
55 return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
58 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
59 port_1, latency, async_start=False, traffic_directions=2):
60 """Send traffic and measure packet loss and latency.
63 - reads the given traffic profile with streams,
64 - connects to the T-rex client,
66 - removes all existing streams,
67 - adds streams from the traffic profile to the ports,
68 - if the warm-up time is more than 0, sends the warm-up traffic, reads the
70 - clears the statistics from the client,
72 - waits for the defined time (or runs forever if async mode is defined),
74 - reads and displays the statistics and
75 - disconnects from the client.
77 :param profile_file: A python module with T-rex traffic profile.
78 :param framesize: Frame size.
79 :param duration: Duration of traffic run in seconds (-1=infinite).
80 :param rate: Traffic rate [percentage, pps, bps].
81 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
82 :param port_0: Port 0 on the traffic generator.
83 :param port_1: Port 1 on the traffic generator.
84 :param latency: With latency stats.
85 :param async_start: Start the traffic and exit.
86 :param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
87 :type profile_file: str
88 :type framesize: int or str
91 :type warmup_time: float
95 :type async_start: bool
96 :type traffic_directions: int
108 print("### Profile file:\n{}".format(profile_file))
109 profile = STLProfile.load(profile_file, direction=0, port_id=0,
111 streams = profile.get_streams()
112 except STLError as err:
113 print("Error while loading profile '{0}' {1}".format(profile_file, err))
121 # Prepare our ports (the machine has 0 <--> 1 with static route):
122 client.reset(ports=[port_0, port_1])
123 client.remove_all_streams(ports=[port_0, port_1])
125 if "macsrc" in profile_file:
126 client.set_port_attr(ports=[port_0, port_1], promiscuous=True)
127 if isinstance(framesize, int):
128 client.add_streams(streams[0], ports=[port_0])
129 if traffic_directions > 1:
130 client.add_streams(streams[1], ports=[port_1])
131 elif isinstance(framesize, str):
132 client.add_streams(streams[0:3], ports=[port_0])
133 if traffic_directions > 1:
134 client.add_streams(streams[3:6], ports=[port_1])
137 if isinstance(framesize, int):
138 client.add_streams(streams[2], ports=[port_0])
139 if traffic_directions > 1:
140 client.add_streams(streams[3], ports=[port_1])
141 elif isinstance(framesize, str):
144 # Disable latency if NIC does not support requested stream type
145 print("##### FAILED to add latency streams #####")
148 if traffic_directions > 1:
152 # Clear the stats before injecting:
155 # Choose rate and start traffic:
156 client.start(ports=ports, mult=rate, duration=warmup_time)
159 client.wait_on_traffic(ports=ports, timeout=warmup_time+30)
161 if client.get_warnings():
162 for warning in client.get_warnings():
165 # Read the stats after the test:
166 stats = client.get_stats()
168 print("##### Warmup statistics #####")
169 print(json.dumps(stats, indent=4, separators=(',', ': ')))
171 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
172 if traffic_directions > 1:
173 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
175 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
176 p_0=port_0, p_1=port_1, v=lost_a))
177 if traffic_directions > 1:
178 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
179 p_0=port_0, p_1=port_1, v=lost_b))
181 # Clear the stats before injecting:
186 # Choose rate and start traffic:
187 client.start(ports=ports, mult=rate, duration=duration)
191 client.wait_on_traffic(ports=ports, timeout=duration+30)
193 if client.get_warnings():
194 for warning in client.get_warnings():
197 # Read the stats after the test
198 stats = client.get_stats()
200 print("##### Statistics #####")
201 print(json.dumps(stats, indent=4, separators=(',', ': ')))
203 lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
204 if traffic_directions > 1:
205 lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"]
207 # Stats index is not a port number, but "pgid".
208 # TODO: Find out what "pgid" means.
211 str(stats["latency"][0]["latency"]["total_min"]),
212 str(stats["latency"][0]["latency"]["average"]),
213 str(stats["latency"][0]["latency"]["total_max"]))
214 if traffic_directions > 1:
216 str(stats["latency"][1]["latency"]["total_min"]),
217 str(stats["latency"][1]["latency"]["average"]),
218 str(stats["latency"][1]["latency"]["total_max"]))
220 if traffic_directions > 1:
221 total_sent = stats[0]["opackets"] + stats[1]["opackets"]
222 total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
224 total_sent = stats[port_0]["opackets"]
225 total_rcvd = stats[port_1]["ipackets"]
227 print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format(
228 p_0=port_0, p_1=port_1, v=lost_a))
229 if traffic_directions > 1:
230 print("packets lost from {p_1} --> {p_0}: {v} pkts".format(
231 p_0=port_0, p_1=port_1, v=lost_b))
233 except STLError as ex_error:
234 print(ex_error, file=sys.stderr)
240 client.disconnect(stop_traffic=False, release_ports=True)
244 print("rate={0!r}, totalReceived={1}, totalSent={2}, "
245 "frameLoss={3}, latencyStream0(usec)={4}, "
246 "latencyStream1(usec)={5}, targetDuration={d!r}".
247 format(rate, total_rcvd, total_sent, lost_a + lost_b,
248 lat_a, lat_b, d=duration))
252 """Main function for the traffic generator using T-rex.
254 It verifies the given command line arguments and runs "simple_burst"
257 parser = argparse.ArgumentParser()
258 parser.add_argument("-p", "--profile",
261 help="Python traffic profile.")
262 parser.add_argument("-d", "--duration",
265 help="Duration of traffic run.")
266 parser.add_argument("-s", "--frame_size",
268 help="Size of a Frame without padding and IPG.")
269 parser.add_argument("-r", "--rate",
271 help="Traffic rate with included units (%, pps).")
272 parser.add_argument("-w", "--warmup_time",
275 help="Traffic warm-up time in seconds, 0 = disable.")
276 parser.add_argument("--port_0",
279 help="Port 0 on the traffic generator.")
280 parser.add_argument("--port_1",
283 help="Port 1 on the traffic generator.")
284 parser.add_argument("--async",
287 help="Non-blocking call of the script.")
288 parser.add_argument("--latency",
291 help="Add latency stream.")
292 parser.add_argument("--traffic_directions",
295 help="Send bi- (2) or uni- (1) directional traffic.")
297 args = parser.parse_args()
300 framesize = int(args.frame_size)
302 framesize = args.frame_size
304 simple_burst(profile_file=args.profile,
305 duration=args.duration,
308 warmup_time=args.warmup_time,
311 latency=args.latency,
312 async_start=args.async,
313 traffic_directions=args.traffic_directions)
316 if __name__ == '__main__':