void
newreno_congestion (tcp_connection_t * tc)
{
- tc->prev_ssthresh = tc->ssthresh;
tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
}
}
else
{
- /* Round up to 1 if needed */
- tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1);
+ /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
+ tc->cwnd_acc_bytes += tc->bytes_acked;
+ if (tc->cwnd_acc_bytes >= tc->cwnd)
+ {
+ u32 inc = tc->cwnd_acc_bytes / tc->cwnd;
+ tc->cwnd += inc * tc->snd_mss;
+ tc->cwnd_acc_bytes -= inc * tc->cwnd;
+ }
+ tc->cwnd = clib_min (tc->cwnd,
+ transport_tx_fifo_size (&tc->connection));
}
}
{
if (ack_type == TCP_CC_DUPACK)
{
- tc->cwnd += tc->snd_mss;
+ if (!tcp_opts_sack_permitted (tc))
+ tc->cwnd += tc->snd_mss;
}
else if (ack_type == TCP_CC_PARTIALACK)
{
* window deflation" attempts to ensure that, when fast recovery
* eventually ends, approximately ssthresh amount of data will be
* outstanding in the network.*/
- tc->cwnd = (tc->cwnd > tc->bytes_acked) ?
- tc->cwnd - tc->bytes_acked : 0;
+ tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ?
+ tc->cwnd - tc->bytes_acked : tc->snd_mss;
if (tc->bytes_acked > tc->snd_mss)
tc->cwnd += tc->snd_mss;
}