tcp_update_rcv_mss (tcp_connection_t * tc)
{
/* TODO find our iface MTU */
- tc->mss = tcp_main.default_mtu - sizeof (tcp_header_t);
+ tc->mss = tcp_cfg.default_mtu - sizeof (tcp_header_t);
}
/**
tcp_update_rcv_mss (tc);
TCP_IW_N_SEGMENTS * tc->mss;
*/
- return TCP_MIN_RX_FIFO_SIZE;
+ return tcp_cfg.min_rx_fifo;
}
/**
u32
tcp_initial_window_to_advertise (tcp_connection_t * tc)
{
- tcp_main_t *tm = &tcp_main;
- u32 max_fifo;
-
- /* Initial wnd for SYN. Fifos are not allocated yet.
- * Use some predefined value. For SYN-ACK we still want the
- * scale to be computed in the same way */
- max_fifo = tm->max_rx_fifo ? tm->max_rx_fifo : TCP_MAX_RX_FIFO_SIZE;
-
/* Compute rcv wscale only if peer advertised support for it */
if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
- tc->rcv_wscale = tcp_window_compute_scale (max_fifo);
+ tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
u8 len = 0;
opts->flags |= TCP_OPTS_FLAG_MSS;
- opts->mss = tcp_main.default_mtu; /*XXX discover that */
+ opts->mss = tcp_cfg.default_mtu; /*XXX discover that */
len += TCP_OPTION_LEN_MSS;
opts->flags |= TCP_OPTS_FLAG_WSCALE;
if (tcp_opts_tstamp (&tc->rcv_opts))
{
opts->flags |= TCP_OPTS_FLAG_TSTAMP;
- opts->tsval = tcp_time_now_w_thread (tc->c_thread_index);
+ opts->tsval = tcp_tstamp (tc);
opts->tsecr = tc->tsval_recent;
len += TCP_OPTION_LEN_TIMESTAMP;
}
tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
+ vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
+ TCP_ERROR_RST_SENT, 1);
}
/**
}
tcp_enqueue_to_ip_lookup_now (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
+ vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
+ TCP_ERROR_RST_SENT, 1);
}
static void
* Setup retransmit and establish timers before requesting buffer
* such that we can return if we've ran out.
*/
- tcp_timer_set (tc, TCP_TIMER_ESTABLISH_AO, TCP_ESTABLISH_TIME);
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
tc->rto * TCP_TO_TIMER_TICK);
tc->snd_nxt += data_len;
tc->rcv_las = tc->rcv_nxt;
+ tc->bytes_out += data_len;
+ tc->data_segs_out += 1;
+
TCP_EVT_DBG (TCP_EVT_PKTIZE, tc);
}
}
void
-tcp_program_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+tcp_program_ack (tcp_connection_t * tc)
{
if (!(tc->flags & TCP_CONN_SNDACK))
{
- vec_add1 (wrk->pending_acks, tc->c_c_index);
+ session_add_self_custom_tx_evt (&tc->connection, 1);
tc->flags |= TCP_CONN_SNDACK;
}
}
void
-tcp_program_dupack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+tcp_program_dupack (tcp_connection_t * tc)
{
if (!(tc->flags & TCP_CONN_SNDACK))
{
- vec_add1 (wrk->pending_acks, tc->c_c_index);
+ session_add_self_custom_tx_evt (&tc->connection, 1);
tc->flags |= TCP_CONN_SNDACK;
}
if (tc->pending_dupacks < 255)
}
void
-tcp_send_acks (tcp_worker_ctx_t * wrk)
+tcp_program_fastretransmit (tcp_connection_t * tc)
{
- u32 thread_index, *pending_acks;
- tcp_connection_t *tc;
- int i, j, n_acks;
-
- if (!vec_len (wrk->pending_acks))
- return;
-
- thread_index = wrk->vm->thread_index;
- pending_acks = wrk->pending_acks;
- for (i = 0; i < vec_len (pending_acks); i++)
+ if (!(tc->flags & TCP_CONN_FRXT_PENDING))
{
- tc = tcp_connection_get (pending_acks[i], thread_index);
- tc->flags &= ~TCP_CONN_SNDACK;
- if (!tc->pending_dupacks)
- {
- tcp_send_ack (tc);
- continue;
- }
-
- /* If we're supposed to send dupacks but have no ooo data
- * send only one ack */
- if (!vec_len (tc->snd_sacks))
- {
- tcp_send_ack (tc);
- continue;
- }
-
- /* Start with first sack block */
- tc->snd_sack_pos = 0;
-
- /* Generate enough dupacks to cover all sack blocks. Do not generate
- * more sacks than the number of packets received. But do generate at
- * least 3, i.e., the number needed to signal congestion, if needed. */
- n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
- n_acks = clib_min (n_acks, tc->pending_dupacks);
- n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
- for (j = 0; j < n_acks; j++)
- tcp_send_ack (tc);
-
- tc->pending_dupacks = 0;
- tc->snd_sack_pos = 0;
+ session_add_self_custom_tx_evt (&tc->connection, 0);
+ tc->flags |= TCP_CONN_FRXT_PENDING;
}
- _vec_len (wrk->pending_acks) = 0;
}
/**
void
tcp_send_window_update_ack (tcp_connection_t * tc)
{
- tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
u32 win;
if (tcp_zero_rwnd_sent (tc))
if (win > 0)
{
tcp_zero_rwnd_sent_off (tc);
- tcp_program_ack (wrk, tc);
+ tcp_program_ack (tc);
}
}
}
/* Start is beyond snd_congestion */
start = tc->snd_una + offset;
if (seq_geq (start, tc->snd_congestion))
- goto done;
+ return 0;
/* Don't overshoot snd_congestion */
if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
{
max_deq_bytes = tc->snd_congestion - start;
if (max_deq_bytes == 0)
- goto done;
+ return 0;
}
n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
tcp_bt_track_rxt (tc, start, start + n_bytes);
}
-done:
+ tc->bytes_retrans += n_bytes;
+ tc->segs_retrans += 1;
TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
return n_bytes;
}
tc->rcv_dupacks = 0;
tc->rtt_ts = 0;
tc->cwnd_acc_bytes = 0;
+ tc->tr_occurences += 1;
tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss);
tcp_recovery_on (tc);
}
-static inline void
-tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
+void
+tcp_timer_retransmit_handler (u32 tc_index)
{
u32 thread_index = vlib_get_thread_index ();
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_buffer_t *b = 0;
u32 bi, n_bytes;
- if (is_syn)
- {
- tc = tcp_half_open_connection_get (index);
- /* Note: the connection may have transitioned to ESTABLISHED... */
- if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
- return;
- tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
- }
- else
- {
- tc = tcp_connection_get (index, thread_index);
- /* Note: the connection may have been closed and pool_put */
- if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
- return;
- tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
- /* Wait-close and retransmit could pop at the same time */
- if (tc->state == TCP_STATE_CLOSED)
- return;
- }
+ tc = tcp_connection_get (tc_index, thread_index);
+
+ /* Note: the connection may have been closed and pool_put */
+ if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Wait-close and retransmit could pop at the same time */
+ if (tc->state == TCP_STATE_CLOSED)
+ return;
if (tc->state >= TCP_STATE_ESTABLISHED)
{
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
session_transport_closing_notify (&tc->connection);
tcp_connection_timers_reset (tc);
- tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
return;
}
/* First retransmit timeout */
if (tc->rto_boff == 1)
- tcp_cc_init_rxt_timeout (tc);
+ {
+ tcp_cc_init_rxt_timeout (tc);
+ /* Record timestamp. Eifel detection algorithm RFC3522 */
+ tc->snd_rxt_ts = tcp_tstamp (tc);
+ }
if (tc->flags & TCP_CONN_RATE_SAMPLE)
tcp_bt_flush_samples (tc);
/* If we've sent beyond snd_congestion, update it */
tc->snd_congestion = seq_max (tc->snd_nxt, tc->snd_congestion);
-
tc->snd_nxt = tc->snd_una;
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
- /* Send one segment. Note that n_bytes may be zero due to buffer
- * shortfall */
+ /* Send one segment. n_bytes may be zero due to buffer shortfall */
n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
if (!n_bytes)
{
}
bi = vlib_get_buffer_index (vm, b);
-
- /* For first retransmit, record timestamp (Eifel detection RFC3522) */
- if (tc->rto_boff == 1)
- tc->snd_rxt_ts = tcp_time_now_w_thread (tc->c_thread_index);
-
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
tcp_retransmit_timer_force_update (tc);
}
- /* Retransmit for SYN */
- else if (tc->state == TCP_STATE_SYN_SENT)
+ /* Retransmit SYN-ACK */
+ else if (tc->state == TCP_STATE_SYN_RCVD)
{
- /* Half-open connection actually moved to established but we were
- * waiting for syn retransmit to pop to call cleanup from the right
- * thread. */
- if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
- {
- if (tcp_half_open_connection_cleanup (tc))
- TCP_DBG ("could not remove half-open connection");
- return;
- }
-
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
- /* Try without increasing RTO a number of times. If this fails,
- * start growing RTO exponentially */
- tc->rto_boff += 1;
- if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ tc->rtt_ts = 0;
- tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
- tc->rto * TCP_TO_TIMER_TICK);
+ /* Passive open establish timeout */
+ if (tc->rto > TCP_ESTABLISH_TIME >> 1)
+ {
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ return;
+ }
if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
{
- tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
return;
}
- b = vlib_get_buffer (vm, bi);
- tcp_init_buffer (vm, b);
- tcp_make_syn (tc, b);
-
- tc->rtt_ts = 0;
- TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0);
-
- /* This goes straight to ipx_lookup. Retransmit timer set already */
- tcp_push_ip_hdr (wrk, tc, b);
- tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
- }
- /* Retransmit SYN-ACK */
- else if (tc->state == TCP_STATE_SYN_RCVD)
- {
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
-
tc->rto_boff += 1;
if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
- tc->rtt_ts = 0;
tcp_retransmit_timer_force_update (tc);
- if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
- {
- tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
- return;
- }
-
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tcp_make_synack (tc, b);
}
}
+/**
+ * SYN retransmit timer handler. Active open only.
+ */
void
-tcp_timer_retransmit_handler (u32 index)
+tcp_timer_retransmit_syn_handler (u32 tc_index)
{
- tcp_timer_retransmit_handler_i (index, 0);
-}
+ u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ vlib_main_t *vm = wrk->vm;
+ tcp_connection_t *tc;
+ vlib_buffer_t *b = 0;
+ u32 bi;
-void
-tcp_timer_retransmit_syn_handler (u32 index)
-{
- tcp_timer_retransmit_handler_i (index, 1);
+ tc = tcp_half_open_connection_get (tc_index);
+
+ /* Note: the connection may have transitioned to ESTABLISHED... */
+ if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Half-open connection actually moved to established but we were
+ * waiting for syn retransmit to pop to call cleanup from the right
+ * thread. */
+ if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
+ {
+ if (tcp_half_open_connection_cleanup (tc))
+ TCP_DBG ("could not remove half-open connection");
+ return;
+ }
+
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+ tc->rtt_ts = 0;
+
+ /* Active open establish timeout */
+ if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
+ {
+ session_stream_connect_notify (&tc->connection, 1 /* fail */ );
+ tcp_connection_cleanup (tc);
+ return;
+ }
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
+ return;
+ }
+
+ /* Try without increasing RTO a number of times. If this fails,
+ * start growing RTO exponentially */
+ tc->rto_boff += 1;
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_syn (tc, b);
+
+ TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0);
+
+ /* This goes straight to ipx_lookup */
+ tcp_push_ip_hdr (wrk, tc, b);
+ tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
+
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
+ tc->rto * TCP_TO_TIMER_TICK);
}
/**
tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
u32 burst_size)
{
- u32 offset, n_segs = 0, n_written, bi;
+ u32 offset, n_segs = 0, n_written, bi, available_wnd;
vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b = 0;
offset = tc->snd_nxt - tc->snd_una;
+ available_wnd = tc->snd_wnd - offset;
+ burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
+
while (n_segs < burst_size)
{
n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
snd_space = tcp_available_cc_snd_space (tc);
if (snd_space < tc->snd_mss)
{
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
return 0;
}
snd_space / tc->snd_mss);
n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
if (max_deq > n_segs_now * tc->snd_mss)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
n_segs += n_segs_now;
goto done;
}
}
if (hole)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
done:
return n_segs;
burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
if (max_deq > n_segs_now * tc->snd_mss)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
n_segs += n_segs_now;
}
else
return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
}
+
+static int
+tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
+{
+ int j, n_acks;
+
+ if (!tc->pending_dupacks)
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* If we're supposed to send dupacks but have no ooo data
+ * send only one ack */
+ if (!vec_len (tc->snd_sacks))
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* Start with first sack block */
+ tc->snd_sack_pos = 0;
+
+ /* Generate enough dupacks to cover all sack blocks. Do not generate
+ * more sacks than the number of packets received. But do generate at
+ * least 3, i.e., the number needed to signal congestion, if needed. */
+ n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
+ n_acks = clib_min (n_acks, tc->pending_dupacks);
+ n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
+ for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
+ tcp_send_ack (tc);
+
+ if (n_acks < max_burst_size)
+ {
+ tc->pending_dupacks = 0;
+ tc->snd_sack_pos = 0;
+ tc->dupacks_out += n_acks;
+ return n_acks;
+ }
+ else
+ {
+ TCP_DBG ("constrained by burst size");
+ tc->pending_dupacks = n_acks - max_burst_size;
+ tc->dupacks_out += max_burst_size;
+ tcp_program_dupack (tc);
+ return max_burst_size;
+ }
+}
+
+static int
+tcp_do_fastretransmit (tcp_connection_t * tc, u32 max_burst_size)
+{
+ u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
+ tcp_worker_ctx_t *wrk;
+
+ wrk = tcp_get_worker (tc->c_thread_index);
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ wrk->vm->
+ clib_time.last_cpu_time);
+ burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_fastretransmit (tc);
+ return 0;
+ }
+
+ n_segs = tcp_fast_retransmit (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
+ return n_segs;
+}
+
+int
+tcp_session_custom_tx (void *conn, u32 max_burst_size)
+{
+ tcp_connection_t *tc = (tcp_connection_t *) conn;
+ u32 n_segs = 0;
+
+ if (tcp_in_fastrecovery (tc) && (tc->flags & TCP_CONN_FRXT_PENDING))
+ {
+ tc->flags &= ~TCP_CONN_FRXT_PENDING;
+ n_segs = tcp_do_fastretransmit (tc, max_burst_size);
+ max_burst_size -= n_segs;
+ }
+
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ return n_segs;
+
+ tc->flags &= ~TCP_CONN_SNDACK;
+
+ /* We have retransmitted packets and no dupack */
+ if (n_segs && !tc->pending_dupacks)
+ return n_segs;
+
+ if (!max_burst_size)
+ {
+ tcp_program_ack (tc);
+ return max_burst_size;
+ }
+
+ n_segs += tcp_send_acks (tc, max_burst_size);
+
+ return n_segs;
+}
#endif /* CLIB_MARCH_VARIANT */
static void
if (!TCP_ALWAYS_ACK)
tcp_timer_reset (tc0, TCP_TIMER_DELACK);
+
+ tc0->segs_out += 1;
}
always_inline uword