X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=src%2Fvnet%2Ftcp%2Ftcp_newreno.c;h=c5ffc2a41091f6e35d53c077423bf0beb3daede7;hb=999840cf805f26a490e8e6b8acc1fe7a7c21a181;hp=c825e952c9b76e0077f7efa2770709c54c4c5bc3;hpb=f03a59ab008908f98fd7d1b187a8c0fb78b01add;p=vpp.git diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index c825e952c9b..c5ffc2a4109 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Cisco and/or its affiliates. + * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -14,21 +14,38 @@ */ #include +#include -void +typedef struct nwreno_cfg_ +{ + u32 ssthresh; +} newreno_cfg_t; + +static newreno_cfg_t newreno_cfg = { + .ssthresh = 0x7FFFFFFFU, +}; + +static void newreno_congestion (tcp_connection_t * tc) { tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); + tc->cwnd = tc->ssthresh; } -void +static void +newreno_loss (tcp_connection_t * tc) +{ + tc->cwnd = tcp_loss_wnd (tc); +} + +static void newreno_recovered (tcp_connection_t * tc) { tc->cwnd = tc->ssthresh; } -void -newreno_rcv_ack (tcp_connection_t * tc) +static void +newreno_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs) { if (tcp_in_slowstart (tc)) { @@ -36,13 +53,14 @@ newreno_rcv_ack (tcp_connection_t * tc) } else { - /* Round up to 1 if needed */ - tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); + /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */ + tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked); } } void -newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) +newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type, + tcp_rate_sample_t * rs) { if (ack_type == TCP_CC_DUPACK) { @@ -63,23 +81,46 @@ newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) * window deflation" attempts to ensure that, when fast recovery * eventually ends, approximately ssthresh amount of data will be * outstanding in the network.*/ - tc->cwnd = (tc->cwnd > tc->bytes_acked) ? - tc->cwnd - tc->bytes_acked : 0; + tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ? + tc->cwnd - tc->bytes_acked : tc->snd_mss; if (tc->bytes_acked > tc->snd_mss) tc->cwnd += tc->snd_mss; } } } -void +static void newreno_conn_init (tcp_connection_t * tc) { - tc->ssthresh = tc->snd_wnd; + tc->ssthresh = newreno_cfg.ssthresh; tc->cwnd = tcp_initial_cwnd (tc); } +static uword +newreno_unformat_config (unformat_input_t * input) +{ + u32 ssthresh = 0x7FFFFFFFU; + + if (!input) + return 0; + + unformat_skip_white_space (input); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "ssthresh %u", &ssthresh)) + newreno_cfg.ssthresh = ssthresh; + else + return 0; + } + return 1; +} + const static tcp_cc_algorithm_t tcp_newreno = { + .name = "newreno", + .unformat_cfg = newreno_unformat_config, .congestion = newreno_congestion, + .loss = newreno_loss, .recovered = newreno_recovered, .rcv_ack = newreno_rcv_ack, .rcv_cong_ack = newreno_rcv_cong_ack,