{
tcp_main_t *tm = vnet_get_tcp_main ();
clib_spinlock_lock_if_init (&tm->half_open_lock);
- pool_put_index (tm->half_open_connections, tc->c_c_index);
if (CLIB_DEBUG)
clib_memset (tc, 0xFA, sizeof (*tc));
+ pool_put (tm->half_open_connections, tc);
clib_spinlock_unlock_if_init (&tm->half_open_lock);
}
tcp_cc_cleanup (tc);
vec_free (tc->snd_sacks);
vec_free (tc->snd_sacks_fl);
+ vec_free (tc->rcv_opts.sacks);
+ pool_free (tc->sack_sb.holes);
if (tc->flags & TCP_CONN_RATE_SAMPLE)
tcp_bt_cleanup (tc);
{
tcp_connection_t *tc;
tc = tcp_connection_get (conn_index, thread_index);
+ if (!tc)
+ return;
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
tcp_connection_cleanup (tc);
}
return ((tmp >> 32) ^ (tmp & 0xffffffff));
}
+/**
+ * Initialize max segment size we're able to process.
+ *
+ * The value is constrained by the output interface's MTU and by the size
+ * of the IP and TCP headers (see RFC6691). It is also what we advertise
+ * to our peer.
+ */
+static void
+tcp_init_rcv_mss (tcp_connection_t * tc)
+{
+ u8 ip_hdr_len;
+
+ ip_hdr_len = tc->c_is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
+ tc->mss = tcp_cfg.default_mtu - sizeof (tcp_header_t) - ip_hdr_len;
+}
+
+static void
+tcp_init_mss (tcp_connection_t * tc)
+{
+ u16 default_min_mss = 536;
+
+ tcp_init_rcv_mss (tc);
+
+ /* TODO consider PMTU discovery */
+ tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss);
+
+ if (tc->snd_mss < 45)
+ {
+ /* Assume that at least the min default mss works */
+ tc->snd_mss = default_min_mss;
+ tc->rcv_opts.mss = default_min_mss;
+ }
+
+ /* We should have enough space for 40 bytes of options */
+ ASSERT (tc->snd_mss > 45);
+
+ /* If we use timestamp option, account for it */
+ if (tcp_opts_tstamp (&tc->rcv_opts))
+ tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
+}
+
/**
* Initialize connection send variables.
*/
*/
tcp_set_time_now (tcp_get_worker (vlib_get_thread_index ()));
+ tcp_init_rcv_mss (tc);
tc->iss = tcp_generate_random_iss (tc);
tc->snd_una = tc->iss;
tc->snd_nxt = tc->iss + 1;
tc->snd_una_max = tc->snd_nxt;
- tc->srtt = 0;
+ tc->srtt = 100; /* 100 ms */
}
void
tcp_enable_pacing (tcp_connection_t * tc)
{
- u32 initial_bucket, byte_rate;
- initial_bucket = 16 * tc->snd_mss;
- byte_rate = 2 << 16;
- transport_connection_tx_pacer_init (&tc->connection, byte_rate,
- initial_bucket);
+ u32 byte_rate;
+ byte_rate = tc->cwnd / (tc->srtt * TCP_TICK);
+ transport_connection_tx_pacer_init (&tc->connection, byte_rate, tc->cwnd);
tc->mrtt_us = (u32) ~ 0;
}
tcp_connection_timers_init (tc);
tcp_init_mss (tc);
scoreboard_init (&tc->sack_sb);
- tcp_cc_init (tc);
if (tc->state == TCP_STATE_SYN_RCVD)
tcp_init_snd_vars (tc);
+ tcp_cc_init (tc);
+
if (!tc->c_is_ip4 && ip6_address_is_link_local_unicast (&tc->c_rmt_ip6))
tcp_add_del_adjacency (tc, 1);
s = format (s, "%U ", format_tcp_congestion_status, tc);
s = format (s, "algo %s cwnd %u ssthresh %u bytes_acked %u\n",
tc->cc_algo->name, tc->cwnd, tc->ssthresh, tc->bytes_acked);
- s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u rtx_bytes %u\n",
+ s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u rxt_bytes %u\n",
format_white_space, indent, tcp_available_cc_snd_space (tc),
tc->prev_cwnd, tc->prev_ssthresh, tc->snd_rxt_bytes);
s = format (s, "%Usnd_congestion %u dupack %u limited_transmit %u\n",
sack_scoreboard_hole_t *hole;
u32 indent = format_get_indent (s);
- s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
- sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
- s = format (s, "%Ulast_bytes_delivered %u high_sacked %u snd_una_adv %u\n",
+ s = format (s, "sacked %u last_sacked %u lost %u last_lost %u\n",
+ sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes,
+ sb->last_lost_bytes);
+ s = format (s, "%Ulast_bytes_delivered %u high_sacked %u is_reneging %u\n",
format_white_space, indent, sb->last_bytes_delivered,
- sb->high_sacked - tc->iss, sb->snd_una_adv);
+ sb->high_sacked - tc->iss, sb->is_reneging);
s = format (s, "%Ucur_rxt_hole %u high_rxt %u rescue_rxt %u",
format_white_space, indent, sb->cur_rxt_hole,
sb->high_rxt - tc->iss, sb->rescue_rxt - tc->iss);
tcp_session_get_transport (u32 conn_index, u32 thread_index)
{
tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index);
+ if (PREDICT_FALSE (!tc))
+ return 0;
return &tc->connection;
}
return &tc->connection;
}
+static u16
+tcp_session_cal_goal_size (tcp_connection_t * tc)
+{
+ u16 goal_size = tc->snd_mss;
+
+ goal_size = TCP_MAX_GSO_SZ - tc->snd_mss % TCP_MAX_GSO_SZ;
+ goal_size = clib_min (goal_size, tc->snd_wnd / 2);
+
+ return goal_size > tc->snd_mss ? goal_size : tc->snd_mss;
+}
+
/**
* Compute maximum segment size for session layer.
*
* the current state of the connection. */
tcp_update_burst_snd_vars (tc);
+ if (PREDICT_FALSE (tc->is_tso))
+ {
+ return tcp_session_cal_goal_size (tc);
+ }
+
return tc->snd_mss;
}
static inline u32
tcp_snd_space_inline (tcp_connection_t * tc)
{
- int snd_space, snt_limited;
+ int snd_space;
if (PREDICT_FALSE (tcp_in_fastrecovery (tc)
|| tc->state == TCP_STATE_CLOSED))
snd_space = tcp_available_output_snd_space (tc);
- /* If we haven't gotten dupacks or if we did and have gotten sacked
- * bytes then we can still send as per Limited Transmit (RFC3042) */
- if (PREDICT_FALSE (tc->rcv_dupacks != 0
- && (tcp_opts_sack_permitted (tc)
- && tc->sack_sb.last_sacked_bytes == 0)))
+ /* If we got dupacks or sacked bytes but we're not yet in recovery, try
+ * to force the peer to send enough dupacks to start retransmitting as
+ * per Limited Transmit (RFC3042)
+ */
+ if (PREDICT_FALSE (tc->rcv_dupacks != 0 || tc->sack_sb.sacked_bytes))
{
- if (tc->rcv_dupacks == 1 && tc->limited_transmit != tc->snd_nxt)
+ if (tc->limited_transmit != tc->snd_nxt
+ && (seq_lt (tc->limited_transmit, tc->snd_nxt - 2 * tc->snd_mss)
+ || seq_gt (tc->limited_transmit, tc->snd_nxt)))
tc->limited_transmit = tc->snd_nxt;
+
ASSERT (seq_leq (tc->limited_transmit, tc->snd_nxt));
- snt_limited = tc->snd_nxt - tc->limited_transmit;
- snd_space = clib_max (2 * tc->snd_mss - snt_limited, 0);
+ int snt_limited = tc->snd_nxt - tc->limited_transmit;
+ snd_space = clib_max ((int) 2 * tc->snd_mss - snt_limited, 0);
}
return tcp_round_snd_space (tc, snd_space);
}
u32 start_bucket)
{
tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
- u32 byte_rate = window / ((f64) TCP_TICK * tc->srtt);
+ f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
u64 last_time = wrk->vm->clib_time.last_cpu_time;
- transport_connection_tx_pacer_reset (&tc->connection, byte_rate,
+ transport_connection_tx_pacer_reset (&tc->connection, window / srtt,
start_bucket, last_time);
}
tcp_cfg.max_rx_fifo = 32 << 20;
tcp_cfg.min_rx_fifo = 4 << 10;
- tcp_cfg.default_mtu = 1460;
+ tcp_cfg.default_mtu = 1500;
tcp_cfg.initial_cwnd_multiplier = 0;
tcp_cfg.enable_tx_pacing = 1;
tcp_cfg.cc_algo = TCP_CC_NEWRENO;
+ tcp_cfg.rwnd_min_update_ack = 1;
/* Time constants defined as timer tick (100ms) multiples */
tcp_cfg.delack_time = 1; /* 0.1s */
static clib_error_t *
tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
{
+ u32 cwnd_multiplier, tmp_time;
+ uword memory_size;
+
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "preallocated-connections %d",
&tcp_cfg.buffer_fail_fraction))
;
else if (unformat (input, "max-rx-fifo %U", unformat_memory_size,
- &tcp_cfg.max_rx_fifo))
- ;
+ &memory_size))
+ tcp_cfg.max_rx_fifo = memory_size;
else if (unformat (input, "min-rx-fifo %U", unformat_memory_size,
- &tcp_cfg.min_rx_fifo))
- ;
- else if (unformat (input, "mtu %d", &tcp_cfg.default_mtu))
+ &memory_size))
+ tcp_cfg.min_rx_fifo = memory_size;
+ else if (unformat (input, "mtu %u", &tcp_cfg.default_mtu))
;
- else if (unformat (input, "initial-cwnd-multiplier %d",
- &tcp_cfg.initial_cwnd_multiplier))
+ else if (unformat (input, "rwnd-min-update-ack %d",
+ &tcp_cfg.rwnd_min_update_ack))
;
+ else if (unformat (input, "initial-cwnd-multiplier %u",
+ &cwnd_multiplier))
+ tcp_cfg.initial_cwnd_multiplier = cwnd_multiplier;
else if (unformat (input, "no-tx-pacing"))
tcp_cfg.enable_tx_pacing = 0;
else if (unformat (input, "cc-algo %U", unformat_tcp_cc_algo,
;
else if (unformat (input, "%U", unformat_tcp_cc_algo_cfg))
;
- else if (unformat (input, "closewait-time %d", &tcp_cfg.closewait_time))
- tcp_cfg.closewait_time /= TCP_TIMER_TICK;
- else if (unformat (input, "timewait-time %d", &tcp_cfg.timewait_time))
- tcp_cfg.timewait_time /= TCP_TIMER_TICK;
- else if (unformat (input, "finwait1-time %d", &tcp_cfg.finwait1_time))
- tcp_cfg.finwait1_time /= TCP_TIMER_TICK;
- else if (unformat (input, "finwait2-time %d", &tcp_cfg.finwait2_time))
- tcp_cfg.finwait2_time /= TCP_TIMER_TICK;
- else if (unformat (input, "lastack-time %d", &tcp_cfg.lastack_time))
- tcp_cfg.lastack_time /= TCP_TIMER_TICK;
- else if (unformat (input, "closing-time %d", &tcp_cfg.closing_time))
- tcp_cfg.closing_time /= TCP_TIMER_TICK;
- else if (unformat (input, "cleanup-time %d", &tcp_cfg.cleanup_time))
- tcp_cfg.cleanup_time /= TCP_TIMER_TICK;
+ else if (unformat (input, "closewait-time %u", &tmp_time))
+ tcp_cfg.closewait_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "timewait-time %u", &tmp_time))
+ tcp_cfg.timewait_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "finwait1-time %u", &tmp_time))
+ tcp_cfg.finwait1_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "finwait2-time %u", &tmp_time))
+ tcp_cfg.finwait2_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "lastack-time %u", &tmp_time))
+ tcp_cfg.lastack_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "closing-time %u", &tmp_time))
+ tcp_cfg.closing_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "cleanup-time %u", &tmp_time))
+ tcp_cfg.cleanup_time = tmp_time / TCP_TIMER_TICK;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
/* Push segments */
tcp_rcv_sacks (dummy_tc, next_ack);
if (has_new_ack)
- dummy_tc->snd_una = next_ack + dummy_tc->sack_sb.snd_una_adv;
+ dummy_tc->snd_una = next_ack;
if (verbose)
s = format (s, "result: %U", format_tcp_scoreboard,