return tcp_initial_window_to_advertise (tc);
tcp_update_rcv_wnd (tc);
-
- if (tc->rcv_wnd == 0)
- {
- tc->flags |= TCP_CONN_SENT_RCV_WND0;
- }
- else
- {
- tc->flags &= ~TCP_CONN_SENT_RCV_WND0;
- }
-
return tc->rcv_wnd >> tc->rcv_wscale;
}
if (tcp_opts_tstamp (&tc->rcv_opts))
{
opts->flags |= TCP_OPTS_FLAG_TSTAMP;
- opts->tsval = tcp_time_now_w_thread (tc->c_thread_index);
+ opts->tsval = tcp_tstamp (tc);
opts->tsecr = tc->tsval_recent;
len += TCP_OPTION_LEN_TIMESTAMP;
}
&tc->snd_opts);
tcp_update_rcv_wnd (tc);
+
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tc->flags |= TCP_CONN_TRACK_BURST;
+
+ if (tc->snd_una == tc->snd_nxt)
+ tcp_cc_event (tc, TCP_CC_EVT_START_TX);
}
void
tcp_options_write ((u8 *) (th + 1), snd_opts);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
+
+ if (wnd == 0)
+ tcp_zero_rwnd_sent_on (tc);
+ else
+ tcp_zero_rwnd_sent_off (tc);
}
/**
tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
{
tcp_connection_t *tc = (tcp_connection_t *) tconn;
+
+ if (tc->flags & TCP_CONN_TRACK_BURST)
+ {
+ tcp_bt_check_app_limited (tc);
+ tcp_bt_track_tx (tc);
+ tc->flags &= ~TCP_CONN_TRACK_BURST;
+ }
+
tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
/* update_snd_nxt */ 1);
+
tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
- ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd));
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
/* If not tracking an ACK, start tracking */
if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
}
void
-tcp_program_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+tcp_program_ack (tcp_connection_t * tc)
{
if (!(tc->flags & TCP_CONN_SNDACK))
{
- vec_add1 (wrk->pending_acks, tc->c_c_index);
+ session_add_self_custom_tx_evt (&tc->connection, 1);
tc->flags |= TCP_CONN_SNDACK;
}
}
void
-tcp_program_dupack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+tcp_program_dupack (tcp_connection_t * tc)
{
if (!(tc->flags & TCP_CONN_SNDACK))
{
- vec_add1 (wrk->pending_acks, tc->c_c_index);
+ session_add_self_custom_tx_evt (&tc->connection, 1);
tc->flags |= TCP_CONN_SNDACK;
}
if (tc->pending_dupacks < 255)
}
void
-tcp_send_acks (tcp_worker_ctx_t * wrk)
+tcp_program_fastretransmit (tcp_connection_t * tc)
{
- u32 thread_index, *pending_acks;
- tcp_connection_t *tc;
- int i, j, n_acks;
-
- if (!vec_len (wrk->pending_acks))
- return;
-
- thread_index = wrk->vm->thread_index;
- pending_acks = wrk->pending_acks;
- for (i = 0; i < vec_len (pending_acks); i++)
+ if (!(tc->flags & TCP_CONN_FRXT_PENDING))
{
- tc = tcp_connection_get (pending_acks[i], thread_index);
- tc->flags &= ~TCP_CONN_SNDACK;
- if (!tc->pending_dupacks)
- {
- tcp_send_ack (tc);
- continue;
- }
-
- /* If we're supposed to send dupacks but have no ooo data
- * send only one ack */
- if (!vec_len (tc->snd_sacks))
- {
- tcp_send_ack (tc);
- continue;
- }
-
- /* Start with first sack block */
- tc->snd_sack_pos = 0;
-
- /* Generate enough dupacks to cover all sack blocks. Do not generate
- * more sacks than the number of packets received. But do generate at
- * least 3, i.e., the number needed to signal congestion, if needed. */
- n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
- n_acks = clib_min (n_acks, tc->pending_dupacks);
- n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
- for (j = 0; j < n_acks; j++)
- tcp_send_ack (tc);
-
- tc->pending_dupacks = 0;
- tc->snd_sack_pos = 0;
+ session_add_self_custom_tx_evt (&tc->connection, 0);
+ tc->flags |= TCP_CONN_FRXT_PENDING;
}
- _vec_len (wrk->pending_acks) = 0;
}
/**
tcp_send_ack (tc);
}
+/**
+ * Send Window Update ACK,
+ * ensuring that it will be sent once, if RWND became non-zero,
+ * after zero RWND has been advertised in ACK before
+ */
+void
+tcp_send_window_update_ack (tcp_connection_t * tc)
+{
+ u32 win;
+
+ if (tcp_zero_rwnd_sent (tc))
+ {
+ win = tcp_window_to_advertise (tc, tc->state);
+ if (win > 0)
+ {
+ tcp_zero_rwnd_sent_off (tc);
+ tcp_program_ack (tc);
+ }
+ }
+}
+
/**
* Allocate a new buffer and build a new tcp segment
*
return 0;
if (tcp_in_fastrecovery (tc))
- tc->snd_rxt_bytes += n_bytes;
+ {
+ tc->snd_rxt_bytes += n_bytes;
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tcp_bt_track_rxt (tc, start, start + n_bytes);
+ }
done:
TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
* Reset congestion control, switch cwnd to loss window and try again.
*/
static void
-tcp_rxt_timeout_cc (tcp_connection_t * tc)
+tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
{
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 6);
tc->prev_ssthresh = tc->ssthresh;
tc->prev_cwnd = tc->cwnd;
- /* Cleanly recover cc (also clears up fast retransmit) */
+ /* Clear fast recovery state if needed */
if (tcp_in_fastrecovery (tc))
- {
- /* TODO be less aggressive about this */
- scoreboard_clear (&tc->sack_sb);
- tcp_cc_fastrecovery_exit (tc);
- }
- else
- tc->rcv_dupacks = 0;
+ tcp_cc_fastrecovery_clear (tc);
+
+ /* Let cc algo decide loss cwnd and ssthresh */
+ tcp_cc_loss (tc);
/* Start again from the beginning */
- tc->cc_algo->congestion (tc);
- tc->cwnd = tcp_loss_wnd (tc);
tc->snd_congestion = tc->snd_nxt;
+ tc->rcv_dupacks = 0;
tc->rtt_ts = 0;
tc->cwnd_acc_bytes = 0;
tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss);
tcp_update_rto (tc);
}
+ /* Peer is dead or network connectivity is lost. Close connection.
+ * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
+ * a min rto of 0.2s we need to retry about 8 times. */
+ if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
+ {
+ tcp_send_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closing_notify (&tc->connection);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
+ return;
+ }
+
/* Increment RTO backoff (also equal to number of retries) and go back
* to first un-acked byte */
tc->rto_boff += 1;
+ /* TODO be less aggressive about clearing scoreboard */
+ scoreboard_clear (&tc->sack_sb);
+
/* First retransmit timeout */
if (tc->rto_boff == 1)
- tcp_rxt_timeout_cc (tc);
- else
- scoreboard_clear (&tc->sack_sb);
+ tcp_cc_init_rxt_timeout (tc);
+
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tcp_bt_flush_samples (tc);
/* If we've sent beyond snd_congestion, update it */
tc->snd_congestion = seq_max (tc->snd_nxt, tc->snd_congestion);
/* For first retransmit, record timestamp (Eifel detection RFC3522) */
if (tc->rto_boff == 1)
- tc->snd_rxt_ts = tcp_time_now_w_thread (tc->c_thread_index);
+ tc->snd_rxt_ts = tcp_tstamp (tc);
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
tcp_retransmit_timer_force_update (tc);
snd_space = tcp_available_cc_snd_space (tc);
if (snd_space < tc->snd_mss)
{
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
return 0;
}
snd_space / tc->snd_mss);
n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
if (max_deq > n_segs_now * tc->snd_mss)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
n_segs += n_segs_now;
goto done;
}
}
if (hole)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
done:
return n_segs;
ASSERT (tcp_in_fastrecovery (tc));
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
+ snd_space = tcp_available_cc_snd_space (tc);
+
if (!tcp_fastrecovery_first (tc))
goto send_unsent;
/* RFC 6582: [If a partial ack], retransmit the first unacknowledged
* segment. */
- snd_space = tc->sack_sb.last_bytes_delivered;
while (snd_space > 0 && n_segs < burst_size)
{
n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
send_unsent:
/* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
- snd_space = tcp_available_cc_snd_space (tc);
if (snd_space < tc->snd_mss || tc->snd_mss == 0)
goto done;
burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
if (max_deq > n_segs_now * tc->snd_mss)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_fastretransmit (tc);
n_segs += n_segs_now;
}
else
return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
}
+
+static int
+tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
+{
+ int j, n_acks;
+
+ if (!tc->pending_dupacks)
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* If we're supposed to send dupacks but have no ooo data
+ * send only one ack */
+ if (!vec_len (tc->snd_sacks))
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* Start with first sack block */
+ tc->snd_sack_pos = 0;
+
+ /* Generate enough dupacks to cover all sack blocks. Do not generate
+ * more sacks than the number of packets received. But do generate at
+ * least 3, i.e., the number needed to signal congestion, if needed. */
+ n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
+ n_acks = clib_min (n_acks, tc->pending_dupacks);
+ n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
+ for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
+ tcp_send_ack (tc);
+
+ if (n_acks < max_burst_size)
+ {
+ tc->pending_dupacks = 0;
+ tc->snd_sack_pos = 0;
+ return n_acks;
+ }
+ else
+ {
+ TCP_DBG ("constrained by burst size");
+ tc->pending_dupacks = n_acks - max_burst_size;
+ tcp_program_dupack (tc);
+ return max_burst_size;
+ }
+}
+
+static int
+tcp_do_fastretransmit (tcp_connection_t * tc, u32 max_burst_size)
+{
+ u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
+ tcp_worker_ctx_t *wrk;
+
+ wrk = tcp_get_worker (tc->c_thread_index);
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ wrk->vm->
+ clib_time.last_cpu_time);
+ burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_fastretransmit (tc);
+ return 0;
+ }
+
+ n_segs = tcp_fast_retransmit (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
+ return n_segs;
+}
+
+int
+tcp_session_custom_tx (void *conn, u32 max_burst_size)
+{
+ tcp_connection_t *tc = (tcp_connection_t *) conn;
+ u32 n_segs = 0;
+
+ if (tcp_in_fastrecovery (tc) && (tc->flags & TCP_CONN_FRXT_PENDING))
+ {
+ tc->flags &= ~TCP_CONN_FRXT_PENDING;
+ n_segs = tcp_do_fastretransmit (tc, max_burst_size);
+ max_burst_size -= n_segs;
+ }
+
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ return n_segs;
+
+ tc->flags &= ~TCP_CONN_SNDACK;
+
+ /* We have retransmitted packets and no dupack */
+ if (n_segs && !tc->pending_dupacks)
+ return n_segs;
+
+ if (!max_burst_size)
+ {
+ tcp_program_ack (tc);
+ return max_burst_size;
+ }
+
+ n_segs += tcp_send_acks (tc, max_burst_size);
+
+ return n_segs;
+}
#endif /* CLIB_MARCH_VARIANT */
static void
return;
}
+ /* If next_index is not drop use it */
+ if (tc0->next_node_index)
+ {
+ *next0 = tc0->next_node_index;
+ vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
+ }
+
vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;