X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ftcp%2Ftcp_newreno.c;h=c5ffc2a41091f6e35d53c077423bf0beb3daede7;hb=999840cf805f26a490e8e6b8acc1fe7a7c21a181;hp=856dffe4af9c57a5c5c102651c0361e6b11ec2be;hpb=68b0fb0c620c7451ef1a6380c43c39de6614db51;p=vpp.git diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index 856dffe4af9..c5ffc2a4109 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Cisco and/or its affiliates. + * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -14,22 +14,38 @@ */ #include +#include -void +typedef struct nwreno_cfg_ +{ + u32 ssthresh; +} newreno_cfg_t; + +static newreno_cfg_t newreno_cfg = { + .ssthresh = 0x7FFFFFFFU, +}; + +static void newreno_congestion (tcp_connection_t * tc) { - tc->prev_ssthresh = tc->ssthresh; tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); + tc->cwnd = tc->ssthresh; } -void +static void +newreno_loss (tcp_connection_t * tc) +{ + tc->cwnd = tcp_loss_wnd (tc); +} + +static void newreno_recovered (tcp_connection_t * tc) { tc->cwnd = tc->ssthresh; } -void -newreno_rcv_ack (tcp_connection_t * tc) +static void +newreno_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs) { if (tcp_in_slowstart (tc)) { @@ -37,35 +53,74 @@ newreno_rcv_ack (tcp_connection_t * tc) } else { - /* Round up to 1 if needed */ - tc->cwnd += clib_max (tc->snd_mss * tc->snd_mss / tc->cwnd, 1); + /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */ + tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked); } } void -newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) +newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type, + tcp_rate_sample_t * rs) { if (ack_type == TCP_CC_DUPACK) { - tc->cwnd += tc->snd_mss; + if (!tcp_opts_sack_permitted (tc)) + tc->cwnd += tc->snd_mss; } else if (ack_type == TCP_CC_PARTIALACK) { - tc->cwnd -= tc->bytes_acked; - if (tc->bytes_acked > tc->snd_mss) - tc->bytes_acked += tc->snd_mss; + /* RFC 6582 Sec. 3.2 */ + if (!tcp_opts_sack_permitted (&tc->rcv_opts)) + { + /* Deflate the congestion window by the amount of new data + * acknowledged by the Cumulative Acknowledgment field. + * If the partial ACK acknowledges at least one SMSS of new data, + * then add back SMSS bytes to the congestion window. This + * artificially inflates the congestion window in order to reflect + * the additional segment that has left the network. This "partial + * window deflation" attempts to ensure that, when fast recovery + * eventually ends, approximately ssthresh amount of data will be + * outstanding in the network.*/ + tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ? + tc->cwnd - tc->bytes_acked : tc->snd_mss; + if (tc->bytes_acked > tc->snd_mss) + tc->cwnd += tc->snd_mss; + } } } -void +static void newreno_conn_init (tcp_connection_t * tc) { - tc->ssthresh = tc->snd_wnd; + tc->ssthresh = newreno_cfg.ssthresh; tc->cwnd = tcp_initial_cwnd (tc); } +static uword +newreno_unformat_config (unformat_input_t * input) +{ + u32 ssthresh = 0x7FFFFFFFU; + + if (!input) + return 0; + + unformat_skip_white_space (input); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "ssthresh %u", &ssthresh)) + newreno_cfg.ssthresh = ssthresh; + else + return 0; + } + return 1; +} + const static tcp_cc_algorithm_t tcp_newreno = { + .name = "newreno", + .unformat_cfg = newreno_unformat_config, .congestion = newreno_congestion, + .loss = newreno_loss, .recovered = newreno_recovered, .rcv_ack = newreno_rcv_ack, .rcv_cong_ack = newreno_rcv_cong_ack,