static void
tcp_cc_init (tcp_connection_t * tc)
{
- tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
+ tc->cc_algo = tcp_cc_algo_get (tcp_main.cc_algo);
tc->cc_algo->init (tc);
}
tcp_api_reference ();
tm->tx_pacing = 1;
+ tm->cc_algo = TCP_CC_NEWRENO;
return 0;
}
VLIB_INIT_FUNCTION (tcp_init);
+uword
+unformat_tcp_cc_algo (unformat_input_t * input, va_list * va)
+{
+ uword *result = va_arg (*va, uword *);
+
+ if (unformat (input, "newreno"))
+ *result = TCP_CC_NEWRENO;
+ else if (unformat (input, "cubic"))
+ *result = TCP_CC_CUBIC;
+ else
+ return 0;
+
+ return 1;
+}
+
static clib_error_t *
tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
{
;
else if (unformat (input, "no-tx-pacing"))
tm->tx_pacing = 0;
+ else if (unformat (input, "cc-algo %U", unformat_tcp_cc_algo,
+ &tm->cc_algo))
+ ;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
#define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */
#define TCP_FIB_RECHECK_PERIOD 1 * THZ /**< Recheck every 1s */
#define TCP_MAX_OPTION_SPACE 40
+#define TCP_CC_DATA_SZ 20
#define TCP_DUPACK_THRESHOLD 3
#define TCP_MAX_RX_FIFO_SIZE 32 << 20
typedef enum _tcp_cc_algorithm_type
{
TCP_CC_NEWRENO,
+ TCP_CC_CUBIC,
} tcp_cc_algorithm_type_e;
typedef struct _tcp_cc_algorithm tcp_cc_algorithm_t;
typedef struct _tcp_connection
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
transport_connection_t connection; /**< Common transport data. First! */
u8 state; /**< TCP state as per tcp_state_t */
u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */
u32 snd_congestion; /**< snd_una_max when congestion is detected */
tcp_cc_algorithm_t *cc_algo; /**< Congestion control algorithm */
+ u8 cc_data[TCP_CC_DATA_SZ]; /**< Congestion control algo private data */
/* RTT and RTO */
u32 rto; /**< Retransmission timeout */
u32 limited_transmit; /**< snd_nxt when limited transmit starts */
u32 last_fib_check; /**< Last time we checked fib route for peer */
u32 sw_if_index; /**< Interface for the connection */
+ u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */
} tcp_connection_t;
struct _tcp_cc_algorithm
/** fault-injection */
f64 buffer_fail_fraction;
+
+ u8 cc_algo;
} tcp_main_t;
extern tcp_main_t tcp_main;
return 4 * tc->snd_mss;
}
+/*
+ * Accumulate acked bytes for cwnd increase
+ *
+ * Once threshold bytes are accumulated, snd_mss bytes are added
+ * to the cwnd.
+ */
+always_inline void
+tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
+{
+ tc->cwnd_acc_bytes += bytes;
+ if (tc->cwnd_acc_bytes >= thresh)
+ {
+ u32 inc = tc->cwnd_acc_bytes / thresh;
+ tc->cwnd_acc_bytes -= inc * thresh;
+ tc->cwnd += inc * tc->snd_mss;
+ tc->cwnd = clib_min (tc->cwnd, tc->tx_fifo_size);
+ }
+}
+
always_inline u32
tcp_loss_wnd (const tcp_connection_t * tc)
{
tcp_cc_algorithm_t *tcp_cc_algo_get (tcp_cc_algorithm_type_e type);
+static inline void *
+tcp_cc_data (tcp_connection_t * tc)
+{
+ return (void *) tc->cc_data;
+}
+
+void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type);
+
/**
* Push TCP header to buffer
*
--- /dev/null
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/tcp/tcp.h>
+#include <math.h>
+
+#define beta_cubic 0.7
+#define cubic_c 0.4
+#define west_const (3 * (1 - beta_cubic) / (1 + beta_cubic))
+
+typedef struct cubic_data_
+{
+ /** time period (in seconds) needed to increase the current window
+ * size to W_max if there are no further congestion events */
+ f64 K;
+
+ /** time (in sec) since the start of current congestion avoidance */
+ f64 t_start;
+
+ /** Inflection point of the cubic function */
+ u32 w_max;
+
+} __clib_packed cubic_data_t;
+
+STATIC_ASSERT (sizeof (cubic_data_t) <= TCP_CC_DATA_SZ, "cubic data len");
+
+static inline f64
+cubic_time (u32 thread_index)
+{
+ return transport_time_now (thread_index);
+}
+
+/**
+ * RFC 8312 Eq. 1
+ *
+ * CUBIC window increase function. Time and K need to be provided in seconds.
+ */
+static inline u64
+W_cubic (cubic_data_t * cd, f64 t)
+{
+ f64 diff = t - cd->K;
+
+ /* W_cubic(t) = C*(t-K)^3 + W_max */
+ return cubic_c * diff * diff * diff + cd->w_max;
+}
+
+/**
+ * RFC 8312 Eq. 2
+ */
+static inline f64
+K_cubic (cubic_data_t * cd)
+{
+ /* K = cubic_root(W_max*(1-beta_cubic)/C) */
+ return pow (cd->w_max * (1 - beta_cubic) / cubic_c, 1 / 3.0);
+}
+
+/**
+ * RFC 8312 Eq. 4
+ *
+ * Estimates the window size of AIMD(alpha_aimd, beta_aimd) for
+ * alpha_aimd=3*(1-beta_cubic)/(1+beta_cubic) and beta_aimd=beta_cubic.
+ * Time (t) and rtt should be provided in seconds
+ */
+static inline u32
+W_est (cubic_data_t * cd, f64 t, f64 rtt)
+{
+ /* W_est(t) = W_max*beta_cubic+[3*(1-beta_cubic)/(1+beta_cubic)]*(t/RTT) */
+ return cd->w_max * beta_cubic + west_const * (t / rtt);
+}
+
+static void
+cubic_congestion (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+
+ cd->w_max = tc->cwnd / tc->snd_mss;
+ tc->ssthresh = clib_max (tc->cwnd * beta_cubic, 2 * tc->snd_mss);
+}
+
+static void
+cubic_recovered (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ cd->t_start = cubic_time (tc->c_thread_index);
+ cd->K = K_cubic (cd);
+ tc->cwnd = tc->ssthresh;
+}
+
+static void
+cubic_rcv_ack (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ u64 w_cubic, w_aimd;
+ f64 t, rtt_sec;
+ u32 thresh;
+
+ /* Constrained by tx fifo, can't grow further */
+ if (tc->cwnd >= tc->tx_fifo_size)
+ return;
+
+ if (tcp_in_slowstart (tc))
+ {
+ tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
+ return;
+ }
+
+ t = cubic_time (tc->c_thread_index) - cd->t_start;
+ rtt_sec = clib_min (tc->mrtt_us, (f64) tc->srtt * TCP_TICK);
+
+ w_cubic = W_cubic (cd, t + rtt_sec) * tc->snd_mss;
+ w_aimd = W_est (cd, t, rtt_sec) * tc->snd_mss;
+ if (w_cubic < w_aimd)
+ {
+ tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
+ }
+ else
+ {
+ if (w_cubic > tc->cwnd)
+ {
+ /* For NewReno and slow start, we increment cwnd based on the
+ * number of bytes acked, not the number of acks received. In
+ * particular, for NewReno we increment the cwnd by 1 snd_mss
+ * only after we accumulate 1 cwnd of acked bytes (RFC 3465).
+ *
+ * For Cubic, as per RFC 8312 we should increment cwnd by
+ * (w_cubic - cwnd)/cwnd for each ack. Instead of using that,
+ * we compute the number of packets that need to be acked
+ * before adding snd_mss to cwnd and compute the threshold
+ */
+ thresh = (tc->snd_mss * tc->cwnd) / (w_cubic - tc->cwnd);
+
+ /* Make sure we don't increase cwnd more often than every
+ * 2 segments */
+ thresh = clib_max (thresh, 2 * tc->snd_mss);
+ }
+ else
+ {
+ /* Practically we can't increment so just inflate threshold */
+ thresh = 1000 * tc->cwnd;
+ }
+ tcp_cwnd_accumulate (tc, thresh, tc->bytes_acked);
+ }
+}
+
+static void
+cubic_conn_init (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ tc->ssthresh = tc->snd_wnd;
+ tc->cwnd = tcp_initial_cwnd (tc);
+ cd->w_max = 0;
+ cd->K = 0;
+ cd->t_start = cubic_time (tc->c_thread_index);
+}
+
+const static tcp_cc_algorithm_t tcp_cubic = {
+ .congestion = cubic_congestion,
+ .recovered = cubic_recovered,
+ .rcv_ack = cubic_rcv_ack,
+ .rcv_cong_ack = newreno_rcv_cong_ack,
+ .init = cubic_conn_init
+};
+
+clib_error_t *
+cubic_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+
+ tcp_cc_algo_register (TCP_CC_CUBIC, &tcp_cubic);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (cubic_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
*/
/* XXX limit this only to first partial ack? */
- tcp_retransmit_timer_force_update (tc);
+ tcp_retransmit_timer_update (tc);
/* RFC6675: If the incoming ACK is a cumulative acknowledgment,
* reset dupacks to 0. Also needed if in congestion recovery */
goto drop;
}
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
/* Update rtt with the syn-ack sample */
tcp_estimate_initial_rtt (new_tc0);
TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
goto drop;
}
- tc0->rtt_ts = 0;
- tcp_init_snd_vars (tc0);
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
+ new_tc0->rtt_ts = 0;
+ tcp_init_snd_vars (new_tc0);
tcp_send_synack (new_tc0);
error0 = TCP_ERROR_SYNS_RCVD;
goto drop;
goto drop;
}
+ child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
tcp_send_synack (child0);
tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);