# client commands
prog_c = ASTFProgram()
prog_c.connect()
- prog_c.send(u"1" * data_size)
- prog_c.recv(data_size)
+ prog_c.set_var(u"var1", self.n_data_frames)
+ prog_c.set_label(u"a1:")
+ prog_c.send(u"1" * real_mss)
+ prog_c.recv(real_mss)
+ prog_c.jmp_nz(u"var1", u"a1:")
# server commands
prog_s = ASTFProgram()
prog_s.accept()
- prog_s.recv(data_size)
- prog_s.send(u"1" * data_size)
+ prog_s.set_var(u"var2", self.n_data_frames)
+ prog_s.set_label(u"a2:")
+ prog_s.recv(real_mss)
+ prog_s.send(u"1" * real_mss)
+ prog_s.jmp_nz(u"var2", u"a2:")
# ip generators
ip_gen_c = ASTFIPGenDist(
globinfo = ASTFGlobalInfo()
# Ensure correct data frame size.
globinfo.tcp.mss = trex_mss
- # Ensure the whole transaction is a single burst (per direction).
- globinfo.tcp.initwnd = self.n_data_frames
- # Ensure buffers are large enough so starting window works.
- globinfo.tcp.txbufsize = data_size
- globinfo.tcp.rxbufsize = data_size
+ globinfo.tcp.txbufsize = trex_mss
+ globinfo.tcp.rxbufsize = trex_mss
kwargs = dict(
default_c_glob_info=globinfo,
default_s_glob_info=globinfo,
# client commands
prog_c = ASTFProgram()
prog_c.connect()
- prog_c.send(u"1" * data_size)
- prog_c.recv(data_size)
+ prog_c.set_var(u"var1", self.n_data_frames)
+ prog_c.set_label(u"a1:")
+ prog_c.send(u"1" * real_mss)
+ prog_c.recv(real_mss)
+ prog_c.jmp_nz(u"var1", u"a1:")
# server commands
prog_s = ASTFProgram()
prog_s.accept()
- prog_s.recv(data_size)
- prog_s.send(u"1" * data_size)
+ prog_s.set_var(u"var2", self.n_data_frames)
+ prog_s.set_label(u"a2:")
+ prog_s.recv(real_mss)
+ prog_s.send(u"1" * real_mss)
+ prog_s.jmp_nz(u"var2", u"a2:")
# ip generators
ip_gen_c = ASTFIPGenDist(
globinfo = ASTFGlobalInfo()
# Ensure correct data frame size.
globinfo.tcp.mss = trex_mss
- # Ensure the whole transaction is a single burst (per direction).
- globinfo.tcp.initwnd = self.n_data_frames
- # Ensure buffers are large enough so starting window works.
- globinfo.tcp.txbufsize = data_size
- globinfo.tcp.rxbufsize = data_size
+ globinfo.tcp.txbufsize = trex_mss
+ globinfo.tcp.rxbufsize = trex_mss
kwargs = dict(
default_c_glob_info=globinfo,
default_s_glob_info=globinfo,
# client commands
prog_c = ASTFProgram()
prog_c.connect()
- prog_c.send(u"1" * data_size)
- prog_c.recv(data_size)
+ prog_c.set_var(u"var1", self.n_data_frames)
+ prog_c.set_label(u"a1:")
+ prog_c.send(u"1" * real_mss)
+ prog_c.recv(real_mss)
+ prog_c.jmp_nz(u"var1", u"a1:")
# server commands
prog_s = ASTFProgram()
prog_s.accept()
- prog_s.recv(data_size)
- prog_s.send(u"1" * data_size)
+ prog_s.set_var(u"var2", self.n_data_frames)
+ prog_s.set_label(u"a2:")
+ prog_s.recv(real_mss)
+ prog_s.send(u"1" * real_mss)
+ prog_s.jmp_nz(u"var2", u"a2:")
# ip generators
ip_gen_c = ASTFIPGenDist(
globinfo = ASTFGlobalInfo()
# Ensure correct data frame size.
globinfo.tcp.mss = trex_mss
- # Ensure the whole transaction is a single burst (per direction).
- globinfo.tcp.initwnd = self.n_data_frames
- # Ensure buffers are large enough so starting window works.
- globinfo.tcp.txbufsize = data_size
- globinfo.tcp.rxbufsize = data_size
+ globinfo.tcp.txbufsize = trex_mss
+ globinfo.tcp.rxbufsize = trex_mss
kwargs = dict(
default_c_glob_info=globinfo,
default_s_glob_info=globinfo,
# client commands
prog_c = ASTFProgram()
prog_c.connect()
- prog_c.send(u"1" * data_size)
- prog_c.recv(data_size)
+ prog_c.set_var(u"var1", self.n_data_frames)
+ prog_c.set_label(u"a1:")
+ prog_c.send(u"1" * real_mss)
+ prog_c.recv(real_mss)
+ prog_c.jmp_nz(u"var1", u"a1:")
# server commands
prog_s = ASTFProgram()
prog_s.accept()
- prog_s.recv(data_size)
- prog_s.send(u"1" * data_size)
+ prog_s.set_var(u"var2", self.n_data_frames)
+ prog_s.set_label(u"a2:")
+ prog_s.recv(real_mss)
+ prog_s.send(u"1" * real_mss)
+ prog_s.jmp_nz(u"var2", u"a2:")
# ip generators
ip_gen_c = ASTFIPGenDist(
globinfo = ASTFGlobalInfo()
# Ensure correct data frame size.
globinfo.tcp.mss = trex_mss
- # Ensure the whole transaction is a single burst (per direction).
- globinfo.tcp.initwnd = self.n_data_frames
- # Ensure buffers are large enough so starting window works.
- globinfo.tcp.txbufsize = data_size
- globinfo.tcp.rxbufsize = data_size
+ globinfo.tcp.txbufsize = trex_mss
+ globinfo.tcp.rxbufsize = trex_mss
kwargs = dict(
default_c_glob_info=globinfo,
default_s_glob_info=globinfo,
# client commands
prog_c = ASTFProgram()
prog_c.connect()
- prog_c.send(u"1" * data_size)
- prog_c.recv(data_size)
+ prog_c.set_var(u"var1", self.n_data_frames)
+ prog_c.set_label(u"a1:")
+ prog_c.send(u"1" * real_mss)
+ prog_c.recv(real_mss)
+ prog_c.jmp_nz(u"var1", u"a1:")
# server commands
prog_s = ASTFProgram()
prog_s.accept()
- prog_s.recv(data_size)
- prog_s.send(u"1" * data_size)
+ prog_s.set_var(u"var2", self.n_data_frames)
+ prog_s.set_label(u"a2:")
+ prog_s.recv(real_mss)
+ prog_s.send(u"1" * real_mss)
+ prog_s.jmp_nz(u"var2", u"a2:")
# ip generators
ip_gen_c = ASTFIPGenDist(
globinfo = ASTFGlobalInfo()
# Ensure correct data frame size.
globinfo.tcp.mss = trex_mss
- # Ensure the whole transaction is a single burst (per direction).
- globinfo.tcp.initwnd = self.n_data_frames
- # Ensure buffers are large enough so starting window works.
- globinfo.tcp.txbufsize = data_size
- globinfo.tcp.rxbufsize = data_size
+ globinfo.tcp.txbufsize = trex_mss
+ globinfo.tcp.rxbufsize = trex_mss
kwargs = dict(
default_c_glob_info=globinfo,
default_s_glob_info=globinfo,
This profile uses a small transaction of "request-response" type,
with some data amount to be transferred both ways.
+In CSIT release 22.06, TRex behavior changed, so we needed to edit
+the traffic profile. Let us describe the pre-22.06 profile first.
+
Client connects, sends 5 data packets worth of data,
receives 5 data packets worth of data and closes its side of the connection.
Server accepts connection, reads 5 data packets worth of data,
MRR results (frequently MRR is below NDR for this reason),
but NDR and PDR results tend to be stable enough.
+In 22.06, the "ACK from the receiving side" behavior changed,
+the receiving side started sending ACK sometimes
+also before receiving the full set of 5 data packets.
+If the previous profile is understood as a "single challenge, single response"
+where challenge (and also response) is sent as a burst of 5 data packets,
+the new profile uses "bursts" of 1 packet instead, but issues
+the challenge-response part 5 times sequentially
+(waiting for receiving the response before sending next challenge).
+This new profile happens to have the same overall packet count
+(when no re-transmissions are needed).
+Although it is possibly more taxing for TRex CPU,
+the results are comparable to the old traffic profile.
+
Ip4base tests
^^^^^^^^^^^^^