/*
- * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Copyright (c) 2018-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*/
#include <vnet/tcp/tcp.h>
+#include <vnet/tcp/tcp_inlines.h>
#include <math.h>
#define beta_cubic 0.7
typedef struct cubic_cfg_
{
u8 fast_convergence;
+ u32 ssthresh;
} cubic_cfg_t;
static cubic_cfg_t cubic_cfg = {
.fast_convergence = 1,
+ .ssthresh = 0x7FFFFFFFU,
};
typedef struct cubic_data_
* RFC 8312 Eq. 2
*/
static inline f64
-K_cubic (cubic_data_t * cd)
+K_cubic (cubic_data_t * cd, u32 wnd)
{
- /* K = cubic_root(W_max*(1-beta_cubic)/C) */
- return pow (cd->w_max * (1 - beta_cubic) / cubic_c, 1 / 3.0);
+ /* K = cubic_root(W_max*(1-beta_cubic)/C)
+ * Because the current window may be less than W_max * beta_cubic because
+ * of fast convergence, we pass it as parameter */
+ return pow ((f64) (cd->w_max - wnd) / cubic_c, 1 / 3.0);
}
/**
cd->w_max = w_max;
tc->ssthresh = clib_max (tc->cwnd * beta_cubic, 2 * tc->snd_mss);
+ tc->cwnd = tc->ssthresh;
+}
+
+static void
+cubic_loss (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+
+ tc->cwnd = tcp_loss_wnd (tc);
+ cd->t_start = cubic_time (tc->c_thread_index);
+ cd->K = 0;
+ cd->w_max = tc->cwnd / tc->snd_mss;
}
static void
{
cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
cd->t_start = cubic_time (tc->c_thread_index);
- cd->K = K_cubic (cd);
tc->cwnd = tc->ssthresh;
+ cd->K = K_cubic (cd, tc->cwnd / tc->snd_mss);
}
static void
-cubic_rcv_ack (tcp_connection_t * tc)
+cubic_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes_acked)
+{
+ /* We just updated the threshold and don't know how large the previous
+ * one was. Still, optimistically increase cwnd by one segment and
+ * clear the accumulated bytes. */
+ if (tc->cwnd_acc_bytes > thresh)
+ {
+ tc->cwnd += tc->snd_mss;
+ tc->cwnd_acc_bytes = 0;
+ }
+
+ tcp_cwnd_accumulate (tc, thresh, tc->bytes_acked);
+}
+
+static void
+cubic_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
{
cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
u64 w_cubic, w_aimd;
if (tcp_in_slowstart (tc))
{
- tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
+ tc->cwnd += tc->bytes_acked;
return;
}
w_aimd = (u64) W_est (cd, t, rtt_sec) * tc->snd_mss;
if (w_cubic < w_aimd)
{
- tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
+ cubic_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
}
else
{
*/
thresh = (tc->snd_mss * tc->cwnd) / (w_cubic - tc->cwnd);
- /* Make sure we don't increase cwnd more often than every
- * 2 segments */
- thresh = clib_max (thresh, 2 * tc->snd_mss);
+ /* Make sure we don't increase cwnd more often than every segment */
+ thresh = clib_max (thresh, tc->snd_mss);
}
else
{
/* Practically we can't increment so just inflate threshold */
thresh = 50 * tc->cwnd;
}
- tcp_cwnd_accumulate (tc, thresh, tc->bytes_acked);
+ cubic_cwnd_accumulate (tc, thresh, tc->bytes_acked);
}
}
cubic_conn_init (tcp_connection_t * tc)
{
cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
- tc->ssthresh = tc->snd_wnd;
+ tc->ssthresh = cubic_cfg.ssthresh;
tc->cwnd = tcp_initial_cwnd (tc);
cd->w_max = 0;
cd->K = 0;
static uword
cubic_unformat_config (unformat_input_t * input)
{
+ u32 ssthresh = 0x7FFFFFFFU;
+
if (!input)
return 0;
{
if (unformat (input, "no-fast-convergence"))
cubic_cfg.fast_convergence = 0;
+ else if (unformat (input, "ssthresh %u", &ssthresh))
+ cubic_cfg.ssthresh = ssthresh;
else
return 0;
}
.name = "cubic",
.unformat_cfg = cubic_unformat_config,
.congestion = cubic_congestion,
+ .loss = cubic_loss,
.recovered = cubic_recovered,
.rcv_ack = cubic_rcv_ack,
.rcv_cong_ack = newreno_rcv_cong_ack,