3 # Copyright (c) 2017 Cisco and/or its affiliates.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at:
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """This module gets a traffic profile together with other parameters, reads
17 the profile and sends the traffic. At the end, it measures the packet loss and
25 sys.path.insert(0, "/opt/trex-core-2.29/scripts/automation/"
26 "trex_control_plane/stl/")
28 from trex_stl_lib.api import *
31 def fmt_latency(lat_min, lat_avg, lat_max):
32 """Return formatted, rounded latency.
34 :param lat_min: Min latency
35 :param lat_avg: Average latency
36 :param lat_max: Max latency
40 :return: Formatted and rounded output "min/avg/max"
45 t_min = int(round(float(lat_min)))
49 t_avg = int(round(float(lat_avg)))
53 t_max = int(round(float(lat_max)))
57 return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
60 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
61 port_1, latency, async_start=False):
62 """Send the traffic and measure packet loss and latency.
65 - reads the given traffic profile with streams,
66 - connects to the T-rex client,
68 - removes all existing streams,
69 - adds streams from the traffic profile to the ports,
70 - if the warm-up time is more than 0, sends the warm-up traffic, reads the
72 - clears the statistics from the client,
74 - waits for the defined time (or runs forever if async mode is defined),
76 - reads and displays the statistics and
77 - disconnects from the client.
79 :param profile_file: A python module with T-rex traffic profile.
80 :param framesize: Frame size.
81 :param duration: Duration of traffic run in seconds (-1=infinite).
82 :param rate: Traffic rate [percentage, pps, bps].
83 :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
84 :param port_0: Port 0 on the traffic generator.
85 :param port_1: Port 1 on the traffic generator.
86 :param latency: With latency stats.
87 :param async_start: Start the traffic and exit.
88 :type profile_file: str
89 :type framesize: int or str
92 :type warmup_time: int
96 :type async_start: bool
109 print("### Profile file:\n{}".format(profile_file))
110 profile = STLProfile.load(profile_file, direction=0, port_id=0,
112 print("\n### Profiles ###\n")
113 print(profile.dump_to_yaml())
114 streams = profile.get_streams()
116 print("Error while loading profile '{0}'\n".format(profile_file))
121 client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
124 # Prepare our ports (the machine has 0 <--> 1 with static route):
125 client.reset(ports=[port_0, port_1])
126 client.remove_all_streams(ports=[port_0, port_1])
128 if "macsrc" in profile_file:
129 client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
131 if isinstance(framesize, int):
132 client.add_streams(streams[0], ports=[port_0])
133 client.add_streams(streams[1], ports=[port_1])
134 elif isinstance(framesize, str):
135 client.add_streams(streams[0:3], ports=[port_0])
136 client.add_streams(streams[3:6], ports=[port_1])
139 if isinstance(framesize, int):
140 client.add_streams(streams[2], ports=[port_0])
141 client.add_streams(streams[3], ports=[port_1])
142 elif isinstance(framesize, str):
145 # Disable latency if NIC does not support requested stream type
146 print("##### FAILED to add latency streams #####")
150 # Clear the stats before injecting:
153 # Choose rate and start traffic:
154 client.start(ports=[port_0, port_1], mult=rate,
155 duration=warmup_time)
158 client.wait_on_traffic(ports=[port_0, port_1],
159 timeout=warmup_time+30)
161 if client.get_warnings():
162 for warning in client.get_warnings():
165 # Read the stats after the test:
166 stats = client.get_stats()
168 print("##### Warmup statistics #####")
169 print(json.dumps(stats, indent=4, separators=(',', ': '),
172 lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
173 lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
175 print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
176 print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
178 # Clear the stats before injecting:
183 # Choose rate and start traffic:
184 client.start(ports=[port_0, port_1], mult=rate, duration=duration)
188 client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30)
190 if client.get_warnings():
191 for warning in client.get_warnings():
194 # Read the stats after the test
195 stats = client.get_stats()
197 print("##### Statistics #####")
198 print(json.dumps(stats, indent=4, separators=(',', ': '),
201 lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
202 lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
206 str(stats["latency"][0]["latency"]["total_min"]),
207 str(stats["latency"][0]["latency"]["average"]),
208 str(stats["latency"][0]["latency"]["total_max"]))
210 str(stats["latency"][1]["latency"]["total_min"]),
211 str(stats["latency"][1]["latency"]["average"]),
212 str(stats["latency"][1]["latency"]["total_max"]))
214 total_sent = stats[0]["opackets"] + stats[1]["opackets"]
215 total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"]
217 print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
218 print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
220 except STLError as err:
221 sys.stderr.write("{0}\n".format(err))
227 client.disconnect(stop_traffic=False, release_ports=True)
231 print("rate={0}, totalReceived={1}, totalSent={2}, "
232 "frameLoss={3}, latencyStream0(usec)={4}, "
233 "latencyStream1(usec)={5}".
234 format(rate, total_rcvd, total_sent, lost_a + lost_b,
239 """Main function for the traffic generator using T-rex.
241 It verifies the given command line arguments and runs "simple_burst"
245 parser = argparse.ArgumentParser()
246 parser.add_argument("-p", "--profile",
249 help="Python traffic profile.")
250 parser.add_argument("-d", "--duration",
253 help="Duration of traffic run.")
254 parser.add_argument("-s", "--frame_size",
256 help="Size of a Frame without padding and IPG.")
257 parser.add_argument("-r", "--rate",
259 help="Traffic rate with included units (%, pps).")
260 parser.add_argument("-w", "--warmup_time",
263 help="Traffic warm-up time in seconds, 0 = disable.")
264 parser.add_argument("--port_0",
267 help="Port 0 on the traffic generator.")
268 parser.add_argument("--port_1",
271 help="Port 1 on the traffic generator.")
272 parser.add_argument("--async",
275 help="Non-blocking call of the script.")
276 parser.add_argument("--latency",
279 help="Add latency stream")
280 args = parser.parse_args()
283 framesize = int(args.frame_size)
285 framesize = args.frame_size
287 simple_burst(profile_file=args.profile,
288 duration=int(args.duration),
291 warmup_time=int(args.warmup_time),
292 port_0=int(args.port_0),
293 port_1=int(args.port_1),
294 latency=args.latency,
295 async_start=args.async)
298 if __name__ == '__main__':