*data++ = TCP_OPTION_MSS;
*data++ = TCP_OPTION_LEN_MSS;
buf = clib_host_to_net_u16 (opts->mss);
- clib_memcpy (data, &buf, sizeof (opts->mss));
+ clib_memcpy_fast (data, &buf, sizeof (opts->mss));
data += sizeof (opts->mss);
opts_len += TCP_OPTION_LEN_MSS;
}
*data++ = TCP_OPTION_TIMESTAMP;
*data++ = TCP_OPTION_LEN_TIMESTAMP;
buf = clib_host_to_net_u32 (opts->tsval);
- clib_memcpy (data, &buf, sizeof (opts->tsval));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsval));
data += sizeof (opts->tsval);
buf = clib_host_to_net_u32 (opts->tsecr);
- clib_memcpy (data, &buf, sizeof (opts->tsecr));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsecr));
data += sizeof (opts->tsecr);
opts_len += TCP_OPTION_LEN_TIMESTAMP;
}
for (i = 0; i < n_sack_blocks; i++)
{
buf = clib_host_to_net_u32 (opts->sacks[i].start);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
buf = clib_host_to_net_u32 (opts->sacks[i].end);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
}
opts_len += 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
if (tcp_opts_tstamp (&tc->rcv_opts))
{
opts->flags |= TCP_OPTS_FLAG_TSTAMP;
- opts->tsval = tcp_time_now ();
+ opts->tsval = tcp_time_now_w_thread (tc->c_thread_index);
opts->tsecr = tc->tsval_recent;
len += TCP_OPTION_LEN_TIMESTAMP;
}
}
static int
-tcp_alloc_tx_buffers (tcp_main_t * tm, u8 thread_index, u16 * n_bufs,
- u32 wanted)
+tcp_alloc_tx_buffers (tcp_worker_ctx_t * wrk, u16 * n_bufs, u32 wanted)
{
- tcp_worker_ctx_t *ctx = &tm->wrk_ctx[thread_index];
vlib_main_t *vm = vlib_get_main ();
u32 n_alloc;
ASSERT (wanted > *n_bufs);
- vec_validate_aligned (ctx->tx_buffers, wanted - 1, CLIB_CACHE_LINE_BYTES);
- n_alloc = vlib_buffer_alloc (vm, &ctx->tx_buffers[*n_bufs],
+ vec_validate_aligned (wrk->tx_buffers, wanted - 1, CLIB_CACHE_LINE_BYTES);
+ n_alloc = vlib_buffer_alloc (vm, &wrk->tx_buffers[*n_bufs],
wanted - *n_bufs);
*n_bufs += n_alloc;
- _vec_len (ctx->tx_buffers) = *n_bufs;
+ _vec_len (wrk->tx_buffers) = *n_bufs;
return n_alloc;
}
always_inline int
-tcp_get_free_buffer_index (tcp_main_t * tm, u32 * bidx)
+tcp_get_free_buffer_index (tcp_worker_ctx_t * wrk, u32 * bidx)
{
- u32 thread_index = vlib_get_thread_index ();
- tcp_worker_ctx_t *ctx = &tm->wrk_ctx[thread_index];
- u16 n_bufs = vec_len (ctx->tx_buffers);
+ u16 n_bufs = vec_len (wrk->tx_buffers);
- TCP_DBG_BUFFER_ALLOC_MAYBE_FAIL (thread_index);
+ TCP_DBG_BUFFER_ALLOC_MAYBE_FAIL (wrk->vm->thread_index);
if (PREDICT_FALSE (!n_bufs))
{
- if (!tcp_alloc_tx_buffers (tm, thread_index, &n_bufs, VLIB_FRAME_SIZE))
+ if (!tcp_alloc_tx_buffers (wrk, &n_bufs, VLIB_FRAME_SIZE))
{
*bidx = ~0;
return -1;
}
}
- *bidx = ctx->tx_buffers[--n_bufs];
- _vec_len (ctx->tx_buffers) = n_bufs;
+ *bidx = wrk->tx_buffers[--n_bufs];
+ _vec_len (wrk->tx_buffers) = n_bufs;
return 0;
}
tcp_reuse_buffer (vm, b);
tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc);
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
tc->rcv_las = tc->rcv_nxt;
}
initial_wnd = tcp_initial_window_to_advertise (tc);
/* Make and write options */
- memset (&snd_opts, 0, sizeof (snd_opts));
+ clib_memset (&snd_opts, 0, sizeof (snd_opts));
tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale);
tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
tcp_header_t *th;
u16 initial_wnd;
- memset (snd_opts, 0, sizeof (*snd_opts));
+ clib_memset (snd_opts, 0, sizeof (*snd_opts));
tcp_reuse_buffer (vm, b);
initial_wnd = tcp_initial_window_to_advertise (tc);
tcp_options_write ((u8 *) (th + 1), snd_opts);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
/* Init retransmit timer. Use update instead of set because of
* retransmissions */
}
always_inline void
-tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+tcp_enqueue_to_ip_lookup_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
u8 is_ip4, u32 fib_index, u8 flush)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- u32 thread_index = vlib_get_thread_index ();
+ vlib_main_t *vm = wrk->vm;
u32 *to_next, next_index;
vlib_frame_t *f;
next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
tcp_trajectory_add_start (b, 1);
- f = tm->wrk_ctx[thread_index].ip_lookup_tx_frames[!is_ip4];
+ f = wrk->ip_lookup_tx_frames[!is_ip4];
if (!f)
{
f = vlib_get_frame_to_node (vm, next_index);
ASSERT (f);
- tm->wrk_ctx[thread_index].ip_lookup_tx_frames[!is_ip4] = f;
+ wrk->ip_lookup_tx_frames[!is_ip4] = f;
}
to_next = vlib_frame_vector_args (f);
if (flush || f->n_vectors == VLIB_FRAME_SIZE)
{
vlib_put_frame_to_node (vm, next_index, f);
- tm->wrk_ctx[thread_index].ip_lookup_tx_frames[!is_ip4] = 0;
+ wrk->ip_lookup_tx_frames[!is_ip4] = 0;
}
}
static void
-tcp_enqueue_to_ip_lookup_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
- u8 is_ip4, u32 fib_index)
+tcp_enqueue_to_ip_lookup_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b,
+ u32 bi, u8 is_ip4, u32 fib_index)
{
- tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, fib_index, 1);
+ tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 1);
}
static void
-tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
u8 is_ip4, u32 fib_index)
{
- tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, fib_index, 0);
- if (vm->thread_index == 0 && vlib_num_workers ())
- session_flush_frames_main_thread (vm);
+ tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 0);
+ if (wrk->vm->thread_index == 0 && vlib_num_workers ())
+ session_flush_frames_main_thread (wrk->vm);
}
always_inline void
-tcp_enqueue_to_output_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+tcp_enqueue_to_output_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
u8 is_ip4, u8 flush)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- u32 thread_index = vlib_get_thread_index ();
u32 *to_next, next_index;
vlib_frame_t *f;
tcp_trajectory_add_start (b, 2);
/* Get frame to v4/6 output node */
- f = tm->wrk_ctx[thread_index].tx_frames[!is_ip4];
+ f = wrk->tx_frames[!is_ip4];
if (!f)
{
- f = vlib_get_frame_to_node (vm, next_index);
+ f = vlib_get_frame_to_node (wrk->vm, next_index);
ASSERT (f);
- tm->wrk_ctx[thread_index].tx_frames[!is_ip4] = f;
+ wrk->tx_frames[!is_ip4] = f;
}
to_next = vlib_frame_vector_args (f);
to_next[f->n_vectors] = bi;
f->n_vectors += 1;
if (flush || f->n_vectors == VLIB_FRAME_SIZE)
{
- vlib_put_frame_to_node (vm, next_index, f);
- tm->wrk_ctx[thread_index].tx_frames[!is_ip4] = 0;
+ vlib_put_frame_to_node (wrk->vm, next_index, f);
+ wrk->tx_frames[!is_ip4] = 0;
}
}
static void
-tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4)
+tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
+ u8 is_ip4)
{
- tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 0);
+ tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 0);
}
static void
-tcp_enqueue_to_output_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+tcp_enqueue_to_output_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
u8 is_ip4)
{
- tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 1);
+ tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 1);
}
static int
{
ih6 = vlib_buffer_get_current (b0);
ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
- clib_memcpy (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
- clib_memcpy (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
}
src_port = th0->src_port;
void
tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi, sw_if_index, fib_index;
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
u8 tcp_hdr_len, flags = 0;
tcp_header_t *th, *pkt_th;
u32 seq, ack;
ip6_header_t *ih6, *pkt_ih6;
fib_protocol_t fib_proto;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return;
b = vlib_get_buffer (vm, bi);
{
flags = TCP_FLAG_RST;
seq = pkt_th->ack_number;
- ack = (tc && tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
+ ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
}
else
{
ASSERT (!bogus);
}
- tcp_enqueue_to_ip_lookup_now (vm, b, bi, is_ip4, fib_index);
+ tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
void
tcp_send_reset (tcp_connection_t * tc)
{
- vlib_main_t *vm = vlib_get_main ();
- tcp_main_t *tm = vnet_get_tcp_main ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi;
tcp_header_t *th;
u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
u8 flags;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return;
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
ASSERT (!bogus);
}
- tcp_enqueue_to_ip_lookup_now (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
+ tcp_enqueue_to_ip_lookup_now (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
static void
-tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b)
+tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ vlib_buffer_t * b)
{
tcp_header_t *th = vlib_buffer_get_current (b);
- vlib_main_t *vm = vlib_get_main ();
+ vlib_main_t *vm = wrk->vm;
if (tc->c_is_ip4)
{
ip4_header_t *ih;
void
tcp_send_syn (tcp_connection_t * tc)
{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi;
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
/*
* Setup retransmit and establish timers before requesting buffer
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
tc->rto * TCP_TO_TIMER_TICK);
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return;
b = vlib_get_buffer (vm, bi);
tcp_make_syn (tc, b);
/* Measure RTT with this */
- tc->rtt_ts = tcp_time_now ();
+ tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
tc->rtt_seq = tc->snd_nxt;
tc->rto_boff = 0;
- tcp_push_ip_hdr (tm, tc, b);
- tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
+ tcp_push_ip_hdr (wrk, tc, b);
+ tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
}
+void
+tcp_send_synack (tcp_connection_t * tc)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
+ u32 bi;
+
+ /* Get buffer */
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ return;
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_make_synack (tc, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+}
+
/**
* Flush tx frame populated by retransmits and timer pops
*/
void
-tcp_flush_frame_to_output (vlib_main_t * vm, u8 thread_index, u8 is_ip4)
+tcp_flush_frame_to_output (tcp_worker_ctx_t * wrk, u8 is_ip4)
{
- if (tcp_main.wrk_ctx[thread_index].tx_frames[!is_ip4])
+ if (wrk->tx_frames[!is_ip4])
{
u32 next_index;
next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
- vlib_put_frame_to_node (vm, next_index,
- tcp_main.
- wrk_ctx[thread_index].tx_frames[!is_ip4]);
- tcp_main.wrk_ctx[thread_index].tx_frames[!is_ip4] = 0;
+ vlib_put_frame_to_node (wrk->vm, next_index, wrk->tx_frames[!is_ip4]);
+ wrk->tx_frames[!is_ip4] = 0;
}
}
* Flush ip lookup tx frames populated by timer pops
*/
static void
-tcp_flush_frame_to_ip_lookup (vlib_main_t * vm, u8 thread_index, u8 is_ip4)
+tcp_flush_frame_to_ip_lookup (tcp_worker_ctx_t * wrk, u8 is_ip4)
{
- if (tcp_main.wrk_ctx[thread_index].ip_lookup_tx_frames[!is_ip4])
+ if (wrk->ip_lookup_tx_frames[!is_ip4])
{
u32 next_index;
next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
- vlib_put_frame_to_node (vm, next_index,
- tcp_main.
- wrk_ctx[thread_index].ip_lookup_tx_frames
- [!is_ip4]);
- tcp_main.wrk_ctx[thread_index].ip_lookup_tx_frames[!is_ip4] = 0;
+ vlib_put_frame_to_node (wrk->vm, next_index,
+ wrk->ip_lookup_tx_frames[!is_ip4]);
+ wrk->ip_lookup_tx_frames[!is_ip4] = 0;
}
}
* Flush v4 and v6 tcp and ip-lookup tx frames for thread index
*/
void
-tcp_flush_frames_to_output (u8 thread_index)
+tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk)
{
- vlib_main_t *vm = vlib_get_main ();
- tcp_flush_frame_to_output (vm, thread_index, 1);
- tcp_flush_frame_to_output (vm, thread_index, 0);
- tcp_flush_frame_to_ip_lookup (vm, thread_index, 1);
- tcp_flush_frame_to_ip_lookup (vm, thread_index, 0);
+ tcp_flush_frame_to_output (wrk, 1);
+ tcp_flush_frame_to_output (wrk, 0);
+ tcp_flush_frame_to_ip_lookup (wrk, 1);
+ tcp_flush_frame_to_ip_lookup (wrk, 0);
}
/**
void
tcp_send_fin (tcp_connection_t * tc)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi;
u8 fin_snt = 0;
- tcp_retransmit_timer_force_update (tc);
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ fin_snt = tc->flags & TCP_CONN_FINSNT;
+ if (fin_snt)
+ tc->snd_nxt = tc->snd_una;
+
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
{
/* Out of buffers so program fin retransmit ASAP */
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
- tc->flags |= TCP_CONN_FINSNT;
- tc->snd_una_max += 1;
- tc->snd_nxt = tc->snd_una_max;
- return;
+ goto post_enqueue;
}
+ tcp_retransmit_timer_force_update (tc);
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
- fin_snt = tc->flags & TCP_CONN_FINSNT;
- if (fin_snt)
- tc->snd_nxt = tc->snd_una;
tcp_make_fin (tc, b);
- tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4);
+ TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
+
+post_enqueue:
if (!fin_snt)
{
tc->flags |= TCP_CONN_FINSNT;
{
tc->snd_nxt = tc->snd_una_max;
}
- TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
}
always_inline u8
if (maybe_burst)
{
- clib_memcpy ((u8 *) (th + 1),
- tm->wrk_ctx[tc->c_thread_index].cached_opts,
- tc->snd_opts_len);
+ clib_memcpy_fast ((u8 *) (th + 1),
+ tm->wrk_ctx[tc->c_thread_index].cached_opts,
+ tc->snd_opts_len);
}
else
{
tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, /* compute opts */ 0,
/* burst */ 1);
tc->snd_una_max = tc->snd_nxt;
- ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd
- + tcp_fastrecovery_sent_1_smss (tc) * tc->snd_mss));
+ ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd));
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
/* If not tracking an ACK, start tracking */
if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
{
- tc->rtt_ts = tcp_time_now ();
+ tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
tc->rtt_seq = tc->snd_nxt;
}
if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
void
tcp_send_ack (tcp_connection_t * tc)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
-
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi;
/* Get buffer */
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return;
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
/* Fill in the ACK */
tcp_make_ack (tc, b);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+}
+
+void
+tcp_program_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ vec_add1 (wrk->pending_acks, tc->c_c_index);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+}
+
+void
+tcp_program_dupack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ vec_add1 (wrk->pending_acks, tc->c_c_index);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+ if (tc->pending_dupacks < 255)
+ tc->pending_dupacks += 1;
+}
+
+void
+tcp_send_acks (tcp_worker_ctx_t * wrk)
+{
+ u32 thread_index, *pending_acks;
+ tcp_connection_t *tc;
+ int i, j, n_acks;
+
+ if (!vec_len (wrk->pending_acks))
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ pending_acks = wrk->pending_acks;
+ for (i = 0; i < vec_len (pending_acks); i++)
+ {
+ tc = tcp_connection_get (pending_acks[i], thread_index);
+ tc->flags &= ~TCP_CONN_SNDACK;
+ n_acks = clib_max (1, tc->pending_dupacks);
+ /* If we're supposed to send dupacks but have no ooo data
+ * send only one ack */
+ if (tc->pending_dupacks && !vec_len (tc->snd_sacks))
+ n_acks = 1;
+ for (j = 0; j < n_acks; j++)
+ tcp_send_ack (tc);
+ tc->pending_dupacks = 0;
+ }
+ _vec_len (wrk->pending_acks) = 0;
}
/**
}
/**
- * Build a retransmit segment
+ * Allocate a new buffer and build a new tcp segment
*
- * @return the number of bytes in the segment or 0 if there's nothing to
- * retransmit
+ * @param wrk tcp worker
+ * @param tc connection for which the segment will be allocated
+ * @param offset offset of the first byte in the tx fifo
+ * @param max_deq_byte segment size
+ * @param[out] b pointer to buffer allocated
+ *
+ * @return the number of bytes in the segment or 0 if buffer cannot be
+ * allocated or no data available
*/
-static u32
-tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset,
- u32 max_deq_bytes, vlib_buffer_t ** b)
+static int
+tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
+ u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
+ u32 bi, seg_size;
+ vlib_main_t *vm = wrk->vm;
int n_bytes = 0;
- u32 start, bi, available_bytes, seg_size;
u8 *data;
- ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
- ASSERT (max_deq_bytes != 0);
-
- /*
- * Make sure we can retransmit something
- */
- available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
- ASSERT (available_bytes >= offset);
- available_bytes -= offset;
- if (!available_bytes)
- return 0;
- max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
- max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
-
- /* Start is beyond snd_congestion */
- start = tc->snd_una + offset;
- if (seq_geq (start, tc->snd_congestion))
- goto done;
-
- /* Don't overshoot snd_congestion */
- if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
- {
- max_deq_bytes = tc->snd_congestion - start;
- if (max_deq_bytes == 0)
- goto done;
- }
-
seg_size = max_deq_bytes + MAX_HDRS_LEN;
/*
*/
/* Easy case, buffer size greater than mss */
- if (PREDICT_TRUE (seg_size <= tm->bytes_per_buffer))
+ if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
{
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return 0;
*b = vlib_get_buffer (vm, bi);
data = tcp_init_buffer (vm, *b);
else
{
u32 chain_bi = ~0, n_bufs_per_seg;
- u32 thread_index = vlib_get_thread_index ();
u16 n_peeked, len_to_deq, available_bufs;
vlib_buffer_t *chain_b, *prev_b;
int i;
/* Make sure we have enough buffers */
- n_bufs_per_seg = ceil ((double) seg_size / tm->bytes_per_buffer);
- available_bufs = vec_len (tm->wrk_ctx[thread_index].tx_buffers);
+ n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
+ available_bufs = vec_len (wrk->tx_buffers);
if (n_bufs_per_seg > available_bufs)
{
- tcp_alloc_tx_buffers (tm, thread_index, &available_bufs,
- VLIB_FRAME_SIZE);
-
+ tcp_alloc_tx_buffers (wrk, &available_bufs, VLIB_FRAME_SIZE);
if (n_bufs_per_seg > available_bufs)
{
*b = 0;
}
}
- tcp_get_free_buffer_index (tm, &bi);
+ (void) tcp_get_free_buffer_index (wrk, &bi);
ASSERT (bi != (u32) ~ 0);
*b = vlib_get_buffer (vm, bi);
data = tcp_init_buffer (vm, *b);
n_bytes = stream_session_peek_bytes (&tc->connection, data, offset,
- tm->bytes_per_buffer -
- MAX_HDRS_LEN);
+ bytes_per_buffer - MAX_HDRS_LEN);
b[0]->current_length = n_bytes;
b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
b[0]->total_length_not_including_first_buffer = 0;
for (i = 1; i < n_bufs_per_seg; i++)
{
prev_b = chain_b;
- len_to_deq = clib_min (max_deq_bytes, tm->bytes_per_buffer);
- tcp_get_free_buffer_index (tm, &chain_bi);
+ len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
+ tcp_get_free_buffer_index (wrk, &chain_bi);
ASSERT (chain_bi != (u32) ~ 0);
chain_b = vlib_get_buffer (vm, chain_bi);
chain_b->current_data = 0;
}
ASSERT (n_bytes > 0);
- ASSERT (((*b)->current_data + (*b)->current_length) <=
- tm->bytes_per_buffer);
+ ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
+
+ return n_bytes;
+}
+
+/**
+ * Build a retransmit segment
+ *
+ * @return the number of bytes in the segment or 0 if there's nothing to
+ * retransmit
+ */
+static u32
+tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
+ tcp_connection_t * tc, u32 offset,
+ u32 max_deq_bytes, vlib_buffer_t ** b)
+{
+ u32 start, available_bytes;
+ int n_bytes = 0;
+
+ ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
+ ASSERT (max_deq_bytes != 0);
+
+ /*
+ * Make sure we can retransmit something
+ */
+ available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
+ ASSERT (available_bytes >= offset);
+ available_bytes -= offset;
+ if (!available_bytes)
+ return 0;
+
+ max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
+ max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
+
+ /* Start is beyond snd_congestion */
+ start = tc->snd_una + offset;
+ if (seq_geq (start, tc->snd_congestion))
+ goto done;
+
+ /* Don't overshoot snd_congestion */
+ if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
+ {
+ max_deq_bytes = tc->snd_congestion - start;
+ if (max_deq_bytes == 0)
+ goto done;
+ }
+
+ n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
+ if (!n_bytes)
+ return 0;
if (tcp_in_fastrecovery (tc))
tc->snd_rxt_bytes += n_bytes;
/* Cleanly recover cc (also clears up fast retransmit) */
if (tcp_in_fastrecovery (tc))
- tcp_cc_fastrecovery_exit (tc);
+ {
+ /* TODO be less aggressive about this */
+ scoreboard_clear (&tc->sack_sb);
+ tcp_cc_fastrecovery_exit (tc);
+ }
/* Start again from the beginning */
tc->cc_algo->congestion (tc);
tc->snd_congestion = tc->snd_una_max;
tc->rtt_ts = 0;
tc->cwnd_acc_bytes = 0;
-
+ tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss);
tcp_recovery_on (tc);
}
static inline void
tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ vlib_main_t *vm = wrk->vm;
tcp_connection_t *tc;
vlib_buffer_t *b = 0;
u32 bi, n_bytes;
tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
}
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
-
if (tc->state >= TCP_STATE_ESTABLISHED)
{
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+
/* Lost FIN, retransmit and return */
if (tcp_is_lost_fin (tc))
{
/* First retransmit timeout */
if (tc->rto_boff == 1)
tcp_rxt_timeout_cc (tc);
+ else
+ scoreboard_clear (&tc->sack_sb);
/* If we've sent beyond snd_congestion, update it */
if (seq_gt (tc->snd_una_max, tc->snd_congestion))
/* Send one segment. Note that n_bytes may be zero due to buffer
* shortfall */
- n_bytes = tcp_prepare_retransmit_segment (tc, 0, tc->snd_mss, &b);
-
- /* TODO be less aggressive about this */
- scoreboard_clear (&tc->sack_sb);
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
if (n_bytes == 0)
{
/* For first retransmit, record timestamp (Eifel detection RFC3522) */
if (tc->rto_boff == 1)
- tc->snd_rxt_ts = tcp_time_now ();
+ tc->snd_rxt_ts = tcp_time_now_w_thread (tc->c_thread_index);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
tcp_retransmit_timer_force_update (tc);
}
/* Retransmit for SYN */
return;
}
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+
/* Try without increasing RTO a number of times. If this fails,
* start growing RTO exponentially */
tc->rto_boff += 1;
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
tc->rto * TCP_TO_TIMER_TICK);
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
return;
b = vlib_get_buffer (vm, bi);
TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0);
/* This goes straight to ipx_lookup. Retransmit timer set already */
- tcp_push_ip_hdr (tm, tc, b);
- tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
+ tcp_push_ip_hdr (wrk, tc, b);
+ tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
}
/* Retransmit SYN-ACK */
else if (tc->state == TCP_STATE_SYN_RCVD)
{
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+
tc->rto_boff += 1;
if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
tc->rtt_ts = 0;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
{
tcp_retransmit_timer_force_update (tc);
return;
TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 1);
/* Retransmit timer already updated, just enqueue to output */
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
}
else
{
void
tcp_timer_persist_handler (u32 index)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
- vlib_main_t *vm = vlib_get_main ();
u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ u32 bi, max_snd_bytes, available_bytes, offset;
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ vlib_main_t *vm = wrk->vm;
tcp_connection_t *tc;
vlib_buffer_t *b;
- u32 bi, max_snd_bytes, available_bytes, offset;
int n_bytes = 0;
u8 *data;
/* Problem already solved or worse */
if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED
- || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc))
+ || tc->snd_wnd > tc->snd_mss)
return;
available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
/*
* Try to force the first unsent segment (or buffer)
*/
- if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
{
tcp_persist_timer_set (tc);
return;
tcp_push_hdr_i (tc, b, tc->state, /* compute opts */ 0, /* burst */ 0);
tc->snd_una_max = tc->snd_nxt;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
/* Just sent new data, enable retransmit */
tcp_retransmit_timer_update (tc);
/**
* Retransmit first unacked segment
*/
-void
-tcp_retransmit_first_unacked (tcp_connection_t * tc)
+int
+tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
{
- vlib_main_t *vm = vlib_get_main ();
- vlib_buffer_t *b;
u32 bi, old_snd_nxt, n_bytes;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
old_snd_nxt = tc->snd_nxt;
tc->snd_nxt = tc->snd_una;
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
- n_bytes = tcp_prepare_retransmit_segment (tc, 0, tc->snd_mss, &b);
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
+
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
if (!n_bytes)
- return;
- bi = vlib_get_buffer_index (vm, b);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ return -1;
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
tc->snd_nxt = old_snd_nxt;
+
+ return 0;
+}
+
+static int
+tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
+{
+ u32 offset, n_segs = 0, n_written, bi;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
+
+ tc->snd_nxt = tc->snd_una_max;
+ offset = tc->snd_una_max - tc->snd_una;
+ while (n_segs < burst_size)
+ {
+ n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
+ if (!n_written)
+ goto done;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ offset += n_written;
+ n_segs += 1;
+ }
+
+done:
+ return n_segs;
}
+#define scoreboard_rescue_rxt_valid(_sb, _tc) \
+ (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
+ && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
+
/**
* Do fast retransmit with SACKs
*/
-void
-tcp_fast_retransmit_sack (tcp_connection_t * tc)
+int
+tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
{
- vlib_main_t *vm = vlib_get_main ();
- u32 n_written = 0, offset, max_bytes, n_segs = 0;
- vlib_buffer_t *b = 0;
+ u32 n_written = 0, offset, max_bytes, n_segs = 0, n_segs_now;
sack_scoreboard_hole_t *hole;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
sack_scoreboard_t *sb;
u32 bi, old_snd_nxt;
int snd_space;
+ u32 max_deq;
u8 snd_limited = 0, can_rescue = 0;
ASSERT (tcp_in_fastrecovery (tc));
- old_snd_nxt = tc->snd_nxt;
- sb = &tc->sack_sb;
snd_space = tcp_available_cc_snd_space (tc);
-
if (snd_space < tc->snd_mss)
- goto done;
+ {
+ tcp_program_fastretransmit (wrk, tc);
+ return 0;
+ }
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
+ old_snd_nxt = tc->snd_nxt;
+ sb = &tc->sack_sb;
hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
- while (hole && snd_space > 0 && n_segs++ < VLIB_FRAME_SIZE)
+
+ max_deq = session_tx_fifo_max_dequeue (&tc->connection);
+ max_deq -= tc->snd_una_max - tc->snd_una;
+
+ while (snd_space > 0 && n_segs < burst_size)
{
- hole = scoreboard_next_rxt_hole (sb, hole,
- tcp_fastrecovery_sent_1_smss (tc),
- &can_rescue, &snd_limited);
+ hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue,
+ &snd_limited);
if (!hole)
{
- if (!can_rescue || !(seq_lt (sb->rescue_rxt, tc->snd_una)
- || seq_gt (sb->rescue_rxt,
- tc->snd_congestion)))
+ if (max_deq)
+ {
+ snd_space = clib_min (max_deq, snd_space);
+ burst_size = clib_min (burst_size - n_segs,
+ snd_space / tc->snd_mss);
+ n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
+ if (max_deq > n_segs_now * tc->snd_mss)
+ tcp_program_fastretransmit (wrk, tc);
+ n_segs += n_segs_now;
+ goto done;
+ }
+
+ if (!can_rescue || scoreboard_rescue_rxt_valid (sb, tc))
break;
/* If rescue rxt undefined or less than snd_una then one segment of
offset = tc->snd_congestion - tc->snd_una - max_bytes;
sb->rescue_rxt = tc->snd_congestion;
tc->snd_nxt = tc->snd_una + offset;
- n_written = tcp_prepare_retransmit_segment (tc, offset, max_bytes,
- &b);
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
+ max_bytes, &b);
if (!n_written)
goto done;
bi = vlib_get_buffer_index (vm, b);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ n_segs += 1;
break;
}
max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
if (max_bytes == 0)
break;
+
offset = sb->high_rxt - tc->snd_una;
tc->snd_nxt = sb->high_rxt;
- n_written = tcp_prepare_retransmit_segment (tc, offset, max_bytes, &b);
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
+ &b);
+ ASSERT (n_written <= snd_space);
/* Nothing left to retransmit */
if (n_written == 0)
break;
bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
sb->high_rxt += n_written;
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
- ASSERT (n_written <= snd_space);
snd_space -= n_written;
+ n_segs += 1;
}
+ if (hole)
+ tcp_program_fastretransmit (wrk, tc);
+
done:
/* If window allows, send 1 SMSS of new data */
tc->snd_nxt = old_snd_nxt;
+ return n_segs;
}
/**
* Fast retransmit without SACK info
*/
-void
-tcp_fast_retransmit_no_sack (tcp_connection_t * tc)
+int
+tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
{
- vlib_main_t *vm = vlib_get_main ();
- u32 n_written = 0, offset = 0, bi, old_snd_nxt;
- int snd_space;
+ u32 n_written = 0, offset = 0, bi, old_snd_nxt, max_deq, n_segs_now;
+ vlib_main_t *vm = wrk->vm;
+ int snd_space, n_segs = 0;
vlib_buffer_t *b;
ASSERT (tcp_in_fastrecovery (tc));
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
-
- /* Start resending from first un-acked segment */
old_snd_nxt = tc->snd_nxt;
- tc->snd_nxt = tc->snd_una;
- snd_space = tcp_available_cc_snd_space (tc);
- while (snd_space > 0)
+ if (!tcp_fastrecovery_first (tc))
+ goto send_unsent;
+
+ /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
+ * segment. */
+ snd_space = tc->sack_sb.last_bytes_delivered;
+ tc->snd_nxt = tc->snd_una;
+ while (snd_space > 0 && n_segs < burst_size)
{
- offset += n_written;
- n_written = tcp_prepare_retransmit_segment (tc, offset, snd_space, &b);
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
+ tc->snd_mss, &b);
/* Nothing left to retransmit */
if (n_written == 0)
break;
bi = vlib_get_buffer_index (vm, b);
- tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
snd_space -= n_written;
+ offset += n_written;
+ n_segs += 1;
+ }
+
+ if (n_segs == burst_size)
+ goto done;
+
+send_unsent:
+
+ /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
+ snd_space = tcp_available_cc_snd_space (tc);
+ if (snd_space < tc->snd_mss || tc->snd_mss == 0)
+ goto done;
+
+ max_deq = session_tx_fifo_max_dequeue (&tc->connection);
+ max_deq -= tc->snd_una_max - tc->snd_una;
+ if (max_deq)
+ {
+ snd_space = clib_min (max_deq, snd_space);
+ burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
+ n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
+ if (max_deq > n_segs_now * tc->snd_mss)
+ tcp_program_fastretransmit (wrk, tc);
+ n_segs += n_segs_now;
}
- /* Restore snd_nxt. If window allows, send 1 SMSS of new data */
+ /* Restore snd_nxt */
tc->snd_nxt = old_snd_nxt;
+
+done:
+ tcp_fastrecovery_first_off (tc);
+ return n_segs;
}
/**
* Do fast retransmit
*/
-void
-tcp_fast_retransmit (tcp_connection_t * tc)
+int
+tcp_fast_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
{
if (tcp_opts_sack_permitted (&tc->rcv_opts))
- tcp_fast_retransmit_sack (tc);
+ return tcp_fast_retransmit_sack (wrk, tc, burst_size);
else
- tcp_fast_retransmit_no_sack (tc);
-}
-
-static u32
-tcp_session_has_ooo_data (tcp_connection_t * tc)
-{
- stream_session_t *s = session_get (tc->c_s_index, tc->c_thread_index);
- return svm_fifo_has_ooo_data (s->server_rx_fifo);
+ return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
}
static void
tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
vm->thread_index);
t = vlib_add_trace (vm, node, b, sizeof (*t));
- clib_memcpy (&t->tcp_header, th, sizeof (t->tcp_header));
- clib_memcpy (&t->tcp_connection, tc, sizeof (t->tcp_connection));
+ clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
+ clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
}
}
tcp_output_handle_link_local (tc0, b0, next0, error0);
}
- /* Filter out DUPACKs if there are no OOO segments left */
- if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK))
- {
- /* N.B. Should not filter burst of dupacks. Two issues:
- * 1) dupacks open cwnd on remote peer when congested
- * 2) acks leaving should have the latest rcv_wnd since the
- * burst may have eaten up all of it, so only the old ones
- * could be filtered.
- */
- if (!tcp_session_has_ooo_data (tc0))
- {
- *error0 = TCP_ERROR_FILTERED_DUPACKS;
- *next0 = TCP_OUTPUT_NEXT_DROP;
- return;
- }
- }
-
- /* Stop DELACK timer and fix flags */
- tc0->flags &= ~(TCP_CONN_SNDACK);
if (!TCP_ALWAYS_ACK)
tcp_timer_reset (tc0, TCP_TIMER_DELACK);
}
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- tcp_set_time_now (thread_index);
+ tcp_set_time_now (tcp_get_worker (thread_index));
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
tcp46_output_trace_frame (vm, node, from, n_left_from);
else
th0 = ip6_next_header ((ip6_header_t *) th0);
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_header, th0,
+ sizeof (t0->tcp_header));
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,