/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Copyright (c) 2016-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*/
#include <vnet/tcp/tcp.h>
-#include <vnet/lisp-cp/packets.h>
#include <math.h>
-vlib_node_registration_t tcp4_output_node;
-vlib_node_registration_t tcp6_output_node;
-
typedef enum _tcp_output_next
{
TCP_OUTPUT_NEXT_DROP,
tcp_connection_t tcp_connection;
} tcp_tx_trace_t;
-u16 dummy_mtu = 1460;
-
-u8 *
+static u8 *
format_tcp_tx_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
return s;
}
+#ifndef CLIB_MARCH_VARIANT
static u8
tcp_window_compute_scale (u32 window)
{
return wnd_scale;
}
-/**
- * Update max segment size we're able to process.
- *
- * The value is constrained by our interface's MTU and IP options. It is
- * also what we advertise to our peer.
- */
-void
-tcp_update_rcv_mss (tcp_connection_t * tc)
-{
- /* TODO find our iface MTU */
- tc->mss = dummy_mtu - sizeof (tcp_header_t);
-}
-
/**
* TCP's initial window
*/
tcp_update_rcv_mss (tc);
TCP_IW_N_SEGMENTS * tc->mss;
*/
- return TCP_MIN_RX_FIFO_SIZE;
+ return tcp_cfg.min_rx_fifo;
}
/**
u32
tcp_initial_window_to_advertise (tcp_connection_t * tc)
{
- tcp_main_t *tm = &tcp_main;
- u32 max_fifo;
+ /* Compute rcv wscale only if peer advertised support for it */
+ if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
+ tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
- /* Initial wnd for SYN. Fifos are not allocated yet.
- * Use some predefined value. For SYN-ACK we still want the
- * scale to be computed in the same way */
- max_fifo = tm->max_rx_fifo ? tm->max_rx_fifo : TCP_MAX_RX_FIFO_SIZE;
-
- tc->rcv_wscale = tcp_window_compute_scale (max_fifo);
tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
return clib_min (tc->rcv_wnd, TCP_WND_MAX);
}
-static void
+static inline void
tcp_update_rcv_wnd (tcp_connection_t * tc)
{
+ u32 available_space, wnd;
i32 observed_wnd;
- u32 available_space, max_fifo, wnd;
+
+ ASSERT (tc->rcv_opts.mss < transport_rx_fifo_size (&tc->connection));
/*
* Figure out how much space we have available
*/
available_space = transport_max_rx_enqueue (&tc->connection);
- max_fifo = transport_rx_fifo_size (&tc->connection);
-
- ASSERT (tc->rcv_opts.mss < max_fifo);
- if (available_space < tc->rcv_opts.mss && available_space < max_fifo >> 3)
- available_space = 0;
+ if (PREDICT_FALSE (available_space < tc->rcv_opts.mss))
+ {
+ tc->rcv_wnd = 0;
+ return;
+ }
/*
* Use the above and what we know about what we've previously advertised
* to compute the new window
*/
observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
- if (observed_wnd < 0)
- observed_wnd = 0;
/* Bad. Thou shalt not shrink */
- if (available_space < observed_wnd)
+ if (PREDICT_FALSE ((i32) available_space < observed_wnd))
{
- wnd = observed_wnd;
- TCP_EVT_DBG (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
+ wnd = clib_max (observed_wnd, 0);
+ TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
}
else
{
/* Make sure we have a multiple of rcv_wscale */
if (wnd && tc->rcv_wscale)
{
- wnd &= ~(1 << tc->rcv_wscale);
+ wnd &= ~((1 << tc->rcv_wscale) - 1);
if (wnd == 0)
wnd = 1 << tc->rcv_wscale;
}
/**
* Compute and return window to advertise, scaled as per RFC1323
*/
-static u32
+static inline u32
tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
{
if (state < TCP_STATE_ESTABLISHED)
return tcp_initial_window_to_advertise (tc);
tcp_update_rcv_wnd (tc);
-
- if (tc->rcv_wnd == 0)
- {
- tc->flags |= TCP_CONN_SENT_RCV_WND0;
- }
- else
- {
- tc->flags &= ~TCP_CONN_SENT_RCV_WND0;
- }
-
return tc->rcv_wnd >> tc->rcv_wscale;
}
*data++ = TCP_OPTION_MSS;
*data++ = TCP_OPTION_LEN_MSS;
buf = clib_host_to_net_u16 (opts->mss);
- clib_memcpy (data, &buf, sizeof (opts->mss));
+ clib_memcpy_fast (data, &buf, sizeof (opts->mss));
data += sizeof (opts->mss);
opts_len += TCP_OPTION_LEN_MSS;
}
*data++ = TCP_OPTION_TIMESTAMP;
*data++ = TCP_OPTION_LEN_TIMESTAMP;
buf = clib_host_to_net_u32 (opts->tsval);
- clib_memcpy (data, &buf, sizeof (opts->tsval));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsval));
data += sizeof (opts->tsval);
buf = clib_host_to_net_u32 (opts->tsecr);
- clib_memcpy (data, &buf, sizeof (opts->tsecr));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsecr));
data += sizeof (opts->tsecr);
opts_len += TCP_OPTION_LEN_TIMESTAMP;
}
if (tcp_opts_sack (opts))
{
int i;
- u32 n_sack_blocks = clib_min (vec_len (opts->sacks),
- TCP_OPTS_MAX_SACK_BLOCKS);
- if (n_sack_blocks != 0)
+ if (opts->n_sack_blocks != 0)
{
*data++ = TCP_OPTION_SACK_BLOCK;
- *data++ = 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
- for (i = 0; i < n_sack_blocks; i++)
+ *data++ = 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
+ for (i = 0; i < opts->n_sack_blocks; i++)
{
buf = clib_host_to_net_u32 (opts->sacks[i].start);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
buf = clib_host_to_net_u32 (opts->sacks[i].end);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
}
- opts_len += 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
+ opts_len += 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
}
}
}
static int
-tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale)
+tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
{
u8 len = 0;
opts->flags |= TCP_OPTS_FLAG_MSS;
- opts->mss = dummy_mtu; /*XXX discover that */
+ opts->mss = tc->mss;
len += TCP_OPTION_LEN_MSS;
opts->flags |= TCP_OPTS_FLAG_WSCALE;
- opts->wscale = wnd_scale;
+ opts->wscale = tc->rcv_wscale;
len += TCP_OPTION_LEN_WINDOW_SCALE;
opts->flags |= TCP_OPTS_FLAG_TSTAMP;
if (tcp_opts_tstamp (&tc->rcv_opts))
{
opts->flags |= TCP_OPTS_FLAG_TSTAMP;
- opts->tsval = tcp_time_now_w_thread (tc->c_thread_index);
+ opts->tsval = tcp_tstamp (tc);
opts->tsecr = tc->tsval_recent;
len += TCP_OPTION_LEN_TIMESTAMP;
}
if (vec_len (tc->snd_sacks))
{
opts->flags |= TCP_OPTS_FLAG_SACK;
- opts->sacks = tc->snd_sacks;
- opts->n_sack_blocks = clib_min (vec_len (tc->snd_sacks),
+ if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
+ tc->snd_sack_pos = 0;
+ opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
+ opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
+ opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
TCP_OPTS_MAX_SACK_BLOCKS);
+ tc->snd_sack_pos += opts->n_sack_blocks;
len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
}
}
switch (state)
{
case TCP_STATE_ESTABLISHED:
+ case TCP_STATE_CLOSE_WAIT:
case TCP_STATE_FIN_WAIT_1:
+ case TCP_STATE_LAST_ACK:
+ case TCP_STATE_CLOSING:
+ case TCP_STATE_FIN_WAIT_2:
+ case TCP_STATE_TIME_WAIT:
case TCP_STATE_CLOSED:
- case TCP_STATE_CLOSE_WAIT:
return tcp_make_established_options (tc, opts);
case TCP_STATE_SYN_RCVD:
return tcp_make_synack_options (tc, opts);
case TCP_STATE_SYN_SENT:
- return tcp_make_syn_options (opts, tc->rcv_wscale);
+ return tcp_make_syn_options (tc, opts);
default:
clib_warning ("State not handled! %d", state);
return 0;
&tc->snd_opts);
tcp_update_rcv_wnd (tc);
-}
-
-void
-tcp_init_mss (tcp_connection_t * tc)
-{
- u16 default_min_mss = 536;
- tcp_update_rcv_mss (tc);
-
- /* TODO cache mss and consider PMTU discovery */
- tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss);
-
- if (tc->snd_mss < 45)
- {
- clib_warning ("snd mss is 0");
- /* Assume that at least the min default mss works */
- tc->snd_mss = default_min_mss;
- tc->rcv_opts.mss = default_min_mss;
- }
-
- /* We should have enough space for 40 bytes of options */
- ASSERT (tc->snd_mss > 45);
-
- /* If we use timestamp option, account for it */
- if (tcp_opts_tstamp (&tc->rcv_opts))
- tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
-}
-static int
-tcp_alloc_tx_buffers (tcp_worker_ctx_t * wrk, u16 * n_bufs, u32 wanted)
-{
- vlib_main_t *vm = vlib_get_main ();
- u32 n_alloc;
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tc->flags |= TCP_CONN_TRACK_BURST;
- ASSERT (wanted > *n_bufs);
- vec_validate_aligned (wrk->tx_buffers, wanted - 1, CLIB_CACHE_LINE_BYTES);
- n_alloc = vlib_buffer_alloc (vm, &wrk->tx_buffers[*n_bufs],
- wanted - *n_bufs);
- *n_bufs += n_alloc;
- _vec_len (wrk->tx_buffers) = *n_bufs;
- return n_alloc;
+ if (tc->snd_una == tc->snd_nxt)
+ tcp_cc_event (tc, TCP_CC_EVT_START_TX);
}
-always_inline int
-tcp_get_free_buffer_index (tcp_worker_ctx_t * wrk, u32 * bidx)
-{
- u16 n_bufs = vec_len (wrk->tx_buffers);
-
- TCP_DBG_BUFFER_ALLOC_MAYBE_FAIL (wrk->vm->thread_index);
-
- if (PREDICT_FALSE (!n_bufs))
- {
- if (!tcp_alloc_tx_buffers (wrk, &n_bufs, VLIB_FRAME_SIZE))
- {
- *bidx = ~0;
- return -1;
- }
- }
- *bidx = wrk->tx_buffers[--n_bufs];
- _vec_len (wrk->tx_buffers) = n_bufs;
- return 0;
-}
+#endif /* CLIB_MARCH_VARIANT */
static void *
tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
vnet_buffer (b)->tcp.flags = 0;
/* Leave enough space for headers */
- return vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
+ return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
}
+#ifndef CLIB_MARCH_VARIANT
static void *
tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
{
ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
- b->flags &= VLIB_BUFFER_NON_DEFAULT_FREELIST;
b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
b->total_length_not_including_first_buffer = 0;
b->current_data = 0;
vnet_buffer (b)->tcp.flags = 0;
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
/* Leave enough space for headers */
- return vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
+ return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
}
/**
* Prepare ACK
*/
-static void
+static inline void
tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
u8 flags)
{
tcp_options_write ((u8 *) (th + 1), snd_opts);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
+
+ if (wnd == 0)
+ tcp_zero_rwnd_sent_on (tc);
+ else
+ tcp_zero_rwnd_sent_off (tc);
}
/**
* Convert buffer to ACK
*/
-void
+static inline void
tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
{
- vlib_main_t *vm = vlib_get_main ();
-
- tcp_reuse_buffer (vm, b);
tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
- TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc);
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
+ TCP_EVT (TCP_EVT_ACK_SENT, tc);
tc->rcv_las = tc->rcv_nxt;
}
void
tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
{
- vlib_main_t *vm = vlib_get_main ();
- u8 flags = 0;
-
- tcp_reuse_buffer (vm, b);
-
- flags = TCP_FLAG_FIN | TCP_FLAG_ACK;
- tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, flags);
-
- /* Reset flags, make sure ack is sent */
- vnet_buffer (b)->tcp.flags &= ~TCP_BUF_FLAG_DUPACK;
+ tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
}
/**
/* Make and write options */
clib_memset (&snd_opts, 0, sizeof (snd_opts));
- tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale);
+ tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
void
tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
{
- vlib_main_t *vm = vlib_get_main ();
tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
u8 tcp_opts_len, tcp_hdr_opts_len;
tcp_header_t *th;
u16 initial_wnd;
clib_memset (snd_opts, 0, sizeof (*snd_opts));
- tcp_reuse_buffer (vm, b);
-
initial_wnd = tcp_initial_window_to_advertise (tc);
tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
tcp_options_write ((u8 *) (th + 1), snd_opts);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
-
- /* Init retransmit timer. Use update instead of set because of
- * retransmissions */
- tcp_retransmit_timer_force_update (tc);
- TCP_EVT_DBG (TCP_EVT_SYNACK_SENT, tc);
}
always_inline void
{
tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 1);
}
+#endif /* CLIB_MARCH_VARIANT */
static int
tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0,
{
ih6 = vlib_buffer_get_current (b0);
ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
- clib_memcpy (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
- clib_memcpy (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
}
src_port = th0->src_port;
return 0;
}
+#ifndef CLIB_MARCH_VARIANT
/**
* Send reset without reusing existing buffer
*
* It extracts connection info out of original packet
*/
void
-tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
+tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
+ u32 thread_index, u8 is_ip4)
{
- tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
u32 bi, sw_if_index, fib_index;
ip6_header_t *ih6, *pkt_ih6;
fib_protocol_t fib_proto;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
return;
b = vlib_get_buffer (vm, bi);
{
flags = TCP_FLAG_RST;
seq = pkt_th->ack_number;
- ack = (tc && tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
+ ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
}
else
{
}
tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index);
- TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
+ TCP_EVT (TCP_EVT_RST_SENT, tc);
+ vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
+ TCP_ERROR_RST_SENT, 1);
}
/**
u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
u8 flags;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
return;
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
ASSERT (opts_write_len == tc->snd_opts_len);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
- if (tc->c_is_ip4)
- {
- ip4_header_t *ih4;
- ih4 = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip.ip4,
- &tc->c_rmt_ip.ip4, IP_PROTOCOL_TCP, 0);
- th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
- }
- else
- {
- int bogus = ~0;
- ip6_header_t *ih6;
- ih6 = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip.ip6,
- &tc->c_rmt_ip.ip6, IP_PROTOCOL_TCP);
- th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
- ASSERT (!bogus);
- }
- tcp_enqueue_to_ip_lookup_now (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
- TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ TCP_EVT (TCP_EVT_RST_SENT, tc);
+ vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
+ TCP_ERROR_RST_SENT, 1);
}
static void
* Setup retransmit and establish timers before requesting buffer
* such that we can return if we've ran out.
*/
- tcp_timer_set (tc, TCP_TIMER_ESTABLISH, TCP_ESTABLISH_TIME);
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
tc->rto * TCP_TO_TIMER_TICK);
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
- return;
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
+ return;
+ }
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tcp_make_syn (tc, b);
/* Measure RTT with this */
- tc->rtt_ts = tcp_time_now ();
+ tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
tc->rtt_seq = tc->snd_nxt;
tc->rto_boff = 0;
tcp_push_ip_hdr (wrk, tc, b);
tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
- TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
+ TCP_EVT (TCP_EVT_SYN_SENT, tc);
+}
+
+void
+tcp_send_synack (tcp_connection_t * tc)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
+ u32 bi;
+
+ tcp_retransmit_timer_force_update (tc);
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
+ return;
+ }
+
+ tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_synack (tc, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
}
/**
fin_snt = tc->flags & TCP_CONN_FINSNT;
if (fin_snt)
- tc->snd_nxt = tc->snd_una;
+ tc->snd_nxt -= 1;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
{
/* Out of buffers so program fin retransmit ASAP */
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
- goto post_enqueue;
+ if (fin_snt)
+ tc->snd_nxt += 1;
+ else
+ /* Make sure retransmit retries a fin not data */
+ tc->flags |= TCP_CONN_FINSNT;
+ return;
}
tcp_retransmit_timer_force_update (tc);
tcp_init_buffer (vm, b);
tcp_make_fin (tc, b);
tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4);
- TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
-
-post_enqueue:
+ TCP_EVT (TCP_EVT_FIN_SENT, tc);
+ /* Account for the FIN */
+ tc->snd_nxt += 1;
if (!fin_snt)
{
tc->flags |= TCP_CONN_FINSNT;
tc->flags &= ~TCP_CONN_FINPNDG;
- /* Account for the FIN */
- tc->snd_una_max += 1;
- tc->snd_nxt = tc->snd_una_max;
- }
- else
- {
- tc->snd_nxt = tc->snd_una_max;
- }
-}
-
-always_inline u8
-tcp_make_state_flags (tcp_connection_t * tc, tcp_state_t next_state)
-{
- switch (next_state)
- {
- case TCP_STATE_ESTABLISHED:
- case TCP_STATE_CLOSE_WAIT:
- return TCP_FLAG_ACK;
- case TCP_STATE_SYN_RCVD:
- return TCP_FLAG_SYN | TCP_FLAG_ACK;
- case TCP_STATE_SYN_SENT:
- return TCP_FLAG_SYN;
- case TCP_STATE_LAST_ACK:
- case TCP_STATE_FIN_WAIT_1:
- if (tc->snd_nxt + 1 < tc->snd_una_max)
- return TCP_FLAG_ACK;
- else
- return TCP_FLAG_FIN;
- default:
- clib_warning ("Shouldn't be here!");
+ tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
}
- return 0;
}
/**
- * Push TCP header and update connection variables
+ * Push TCP header and update connection variables. Should only be called
+ * for segments with data, not for 'control' packets.
*/
always_inline void
-tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b,
- tcp_state_t next_state, u8 compute_opts, u8 maybe_burst)
+tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
+ u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
{
+ u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
u32 advertise_wnd, data_len;
- u8 tcp_hdr_opts_len, flags;
tcp_main_t *tm = &tcp_main;
tcp_header_t *th;
if (maybe_burst)
advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
else
- advertise_wnd = tcp_window_to_advertise (tc, next_state);
-
- flags = tcp_make_state_flags (tc, next_state);
+ advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
- th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
+ if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
+ {
+ if (seq_geq (tc->psh_seq, snd_nxt)
+ && seq_lt (tc->psh_seq, snd_nxt + data_len))
+ flags |= TCP_FLAG_PSH;
+ }
+ th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
tc->rcv_nxt, tcp_hdr_opts_len, flags,
advertise_wnd);
if (maybe_burst)
{
- clib_memcpy ((u8 *) (th + 1),
- tm->wrk_ctx[tc->c_thread_index].cached_opts,
- tc->snd_opts_len);
+ clib_memcpy_fast ((u8 *) (th + 1),
+ tm->wrk_ctx[tc->c_thread_index].cached_opts,
+ tc->snd_opts_len);
}
else
{
* Update connection variables
*/
- tc->snd_nxt += data_len;
+ if (update_snd_nxt)
+ tc->snd_nxt += data_len;
tc->rcv_las = tc->rcv_nxt;
- TCP_EVT_DBG (TCP_EVT_PKTIZE, tc);
+ tc->bytes_out += data_len;
+ tc->data_segs_out += 1;
+
+ TCP_EVT (TCP_EVT_PKTIZE, tc);
}
u32
-tcp_push_header (tcp_connection_t * tc, vlib_buffer_t * b)
+tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
{
- tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, /* compute opts */ 0,
- /* burst */ 1);
- tc->snd_una_max = tc->snd_nxt;
- ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd
- + tcp_fastrecovery_sent_1_smss (tc) * tc->snd_mss));
+ tcp_connection_t *tc = (tcp_connection_t *) tconn;
+
+ if (tc->flags & TCP_CONN_TRACK_BURST)
+ {
+ tcp_bt_check_app_limited (tc);
+ tcp_bt_track_tx (tc);
+ tc->flags &= ~TCP_CONN_TRACK_BURST;
+ }
+
+ tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
+ /* update_snd_nxt */ 1);
+
+ tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
/* If not tracking an ACK, start tracking */
if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
vlib_buffer_t *b;
u32 bi;
- /* Get buffer */
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
- return;
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_update_rcv_wnd (tc);
+ return;
+ }
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
-
- /* Fill in the ACK */
tcp_make_ack (tc, b);
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
}
+void
+tcp_program_ack (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 1);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+}
+
+void
+tcp_program_dupack (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 1);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+ if (tc->pending_dupacks < 255)
+ tc->pending_dupacks += 1;
+}
+
+void
+tcp_program_retransmit (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_RXT_PENDING))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 0);
+ tc->flags |= TCP_CONN_RXT_PENDING;
+ }
+}
+
/**
* Delayed ack timer handler
*
}
/**
- * Build a retransmit segment
+ * Send window update ack
*
- * @return the number of bytes in the segment or 0 if there's nothing to
- * retransmit
+ * Ensures that it will be sent only once, after a zero rwnd has been
+ * advertised in a previous ack, and only if rwnd has grown beyond a
+ * configurable value.
*/
-static u32
-tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
- tcp_connection_t * tc, u32 offset,
- u32 max_deq_bytes, vlib_buffer_t ** b)
+void
+tcp_send_window_update_ack (tcp_connection_t * tc)
+{
+ if (tcp_zero_rwnd_sent (tc))
+ {
+ tcp_update_rcv_wnd (tc);
+ if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
+ {
+ tcp_zero_rwnd_sent_off (tc);
+ tcp_program_ack (tc);
+ }
+ }
+}
+
+/**
+ * Allocate a new buffer and build a new tcp segment
+ *
+ * @param wrk tcp worker
+ * @param tc connection for which the segment will be allocated
+ * @param offset offset of the first byte in the tx fifo
+ * @param max_deq_byte segment size
+ * @param[out] b pointer to buffer allocated
+ *
+ * @return the number of bytes in the segment or 0 if buffer cannot be
+ * allocated or no data available
+ */
+static int
+tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
{
u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
vlib_main_t *vm = wrk->vm;
+ u32 bi, seg_size;
int n_bytes = 0;
- u32 start, bi, available_bytes, seg_size;
u8 *data;
- ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
- ASSERT (max_deq_bytes != 0);
-
- /*
- * Make sure we can retransmit something
- */
- available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
- ASSERT (available_bytes >= offset);
- available_bytes -= offset;
- if (!available_bytes)
- return 0;
- max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
- max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
-
- /* Start is beyond snd_congestion */
- start = tc->snd_una + offset;
- if (seq_geq (start, tc->snd_congestion))
- goto done;
-
- /* Don't overshoot snd_congestion */
- if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
- {
- max_deq_bytes = tc->snd_congestion - start;
- if (max_deq_bytes == 0)
- goto done;
- }
-
- seg_size = max_deq_bytes + MAX_HDRS_LEN;
+ seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
/*
* Prepare options
/* Easy case, buffer size greater than mss */
if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
{
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
return 0;
*b = vlib_get_buffer (vm, bi);
data = tcp_init_buffer (vm, *b);
- n_bytes = stream_session_peek_bytes (&tc->connection, data, offset,
- max_deq_bytes);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ max_deq_bytes);
ASSERT (n_bytes == max_deq_bytes);
b[0]->current_length = n_bytes;
- tcp_push_hdr_i (tc, *b, tc->state, /* compute opts */ 0, /* burst */ 0);
- if (seq_gt (tc->snd_nxt, tc->snd_una_max))
- tc->snd_una_max = tc->snd_nxt;
+ tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 0);
}
/* Split mss into multiple buffers */
else
{
- u32 chain_bi = ~0, n_bufs_per_seg;
- u16 n_peeked, len_to_deq, available_bufs;
+ u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
+ u16 n_peeked, len_to_deq;
vlib_buffer_t *chain_b, *prev_b;
int i;
/* Make sure we have enough buffers */
n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
- available_bufs = vec_len (wrk->tx_buffers);
- if (n_bufs_per_seg > available_bufs)
+ vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
+ CLIB_CACHE_LINE_BYTES);
+ n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
+ if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
{
- tcp_alloc_tx_buffers (wrk, &available_bufs, VLIB_FRAME_SIZE);
- if (n_bufs_per_seg > available_bufs)
- {
- *b = 0;
- return 0;
- }
+ if (n_bufs)
+ vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
+ return 0;
}
- tcp_get_free_buffer_index (wrk, &bi);
- ASSERT (bi != (u32) ~ 0);
- *b = vlib_get_buffer (vm, bi);
+ *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
data = tcp_init_buffer (vm, *b);
- n_bytes = stream_session_peek_bytes (&tc->connection, data, offset,
- bytes_per_buffer - MAX_HDRS_LEN);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ bytes_per_buffer -
+ TRANSPORT_MAX_HDRS_LEN);
b[0]->current_length = n_bytes;
b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
b[0]->total_length_not_including_first_buffer = 0;
{
prev_b = chain_b;
len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
- tcp_get_free_buffer_index (wrk, &chain_bi);
- ASSERT (chain_bi != (u32) ~ 0);
+ chain_bi = wrk->tx_buffers[--n_bufs];
chain_b = vlib_get_buffer (vm, chain_bi);
chain_b->current_data = 0;
data = vlib_buffer_get_current (chain_b);
- n_peeked = stream_session_peek_bytes (&tc->connection, data,
- offset + n_bytes, len_to_deq);
+ n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
+ offset + n_bytes,
+ len_to_deq);
ASSERT (n_peeked == len_to_deq);
n_bytes += n_peeked;
chain_b->current_length = n_peeked;
b[0]->total_length_not_including_first_buffer += n_peeked;
}
- tcp_push_hdr_i (tc, *b, tc->state, /* compute opts */ 0, /* burst */ 0);
- if (seq_gt (tc->snd_nxt, tc->snd_una_max))
- tc->snd_una_max = tc->snd_nxt;
+ tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 0);
+
+ if (PREDICT_FALSE (n_bufs))
+ {
+ clib_warning ("not all buffers consumed");
+ vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
+ }
}
ASSERT (n_bytes > 0);
ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
- if (tcp_in_fastrecovery (tc))
- tc->snd_rxt_bytes += n_bytes;
-
-done:
- TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
return n_bytes;
}
/**
- * Reset congestion control, switch cwnd to loss window and try again.
+ * Build a retransmit segment
+ *
+ * @return the number of bytes in the segment or 0 if there's nothing to
+ * retransmit
*/
-static void
-tcp_rxt_timeout_cc (tcp_connection_t * tc)
+static u32
+tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
+ tcp_connection_t * tc, u32 offset,
+ u32 max_deq_bytes, vlib_buffer_t ** b)
{
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 6);
- tc->prev_ssthresh = tc->ssthresh;
- tc->prev_cwnd = tc->cwnd;
+ u32 start, available_bytes;
+ int n_bytes = 0;
+
+ ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
+ ASSERT (max_deq_bytes != 0);
+
+ /*
+ * Make sure we can retransmit something
+ */
+ available_bytes = transport_max_tx_dequeue (&tc->connection);
+ ASSERT (available_bytes >= offset);
+ available_bytes -= offset;
+ if (!available_bytes)
+ return 0;
+
+ max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
+ max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
+
+ /* Start is beyond snd_congestion */
+ start = tc->snd_una + offset;
+ if (seq_geq (start, tc->snd_congestion))
+ return 0;
- /* Cleanly recover cc (also clears up fast retransmit) */
- if (tcp_in_fastrecovery (tc))
+ /* Don't overshoot snd_congestion */
+ if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
{
- /* TODO be less aggressive about this */
- scoreboard_clear (&tc->sack_sb);
- tcp_cc_fastrecovery_exit (tc);
+ max_deq_bytes = tc->snd_congestion - start;
+ if (max_deq_bytes == 0)
+ return 0;
}
- /* Start again from the beginning */
- tc->cc_algo->congestion (tc);
- tc->cwnd = tcp_loss_wnd (tc);
- tc->snd_congestion = tc->snd_una_max;
- tc->rtt_ts = 0;
- tc->cwnd_acc_bytes = 0;
+ n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
+ if (!n_bytes)
+ return 0;
- tcp_recovery_on (tc);
+ tc->snd_rxt_bytes += n_bytes;
+
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tcp_bt_track_rxt (tc, start, start + n_bytes);
+
+ tc->bytes_retrans += n_bytes;
+ tc->segs_retrans += 1;
+ TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
+
+ return n_bytes;
}
-static inline void
-tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
+static void
+tcp_check_sack_reneging (tcp_connection_t * tc)
+{
+ sack_scoreboard_t *sb = &tc->sack_sb;
+ sack_scoreboard_hole_t *hole;
+
+ hole = scoreboard_first_hole (sb);
+ if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
+ return;
+
+ scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
+}
+
+/**
+ * Reset congestion control, switch cwnd to loss window and try again.
+ */
+static void
+tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
+{
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
+
+ tc->prev_ssthresh = tc->ssthresh;
+ tc->prev_cwnd = tc->cwnd;
+
+ /* If we entrered loss without fast recovery, notify cc algo of the
+ * congestion event such that it can update ssthresh and its state */
+ if (!tcp_in_fastrecovery (tc))
+ tcp_cc_congestion (tc);
+
+ /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
+ tcp_cc_loss (tc);
+
+ tc->rtt_ts = 0;
+ tc->cwnd_acc_bytes = 0;
+ tc->tr_occurences += 1;
+ tcp_recovery_on (tc);
+}
+
+void
+tcp_timer_retransmit_handler (u32 tc_index)
{
u32 thread_index = vlib_get_thread_index ();
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_buffer_t *b = 0;
u32 bi, n_bytes;
- if (is_syn)
- {
- tc = tcp_half_open_connection_get (index);
- /* Note: the connection may have transitioned to ESTABLISHED... */
- if (PREDICT_FALSE (tc == 0))
- return;
- tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
- }
- else
- {
- tc = tcp_connection_get (index, thread_index);
- /* Note: the connection may have been closed and pool_put */
- if (PREDICT_FALSE (tc == 0))
- return;
- tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
- }
+ tc = tcp_connection_get (tc_index, thread_index);
+
+ /* Note: the connection may have been closed and pool_put */
+ if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Wait-close and retransmit could pop at the same time */
+ if (tc->state == TCP_STATE_CLOSED)
+ return;
if (tc->state >= TCP_STATE_ESTABLISHED)
{
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
/* Lost FIN, retransmit and return */
- if (tcp_is_lost_fin (tc))
+ if (tc->flags & TCP_CONN_FINSNT)
{
tcp_send_fin (tc);
tc->rto_boff += 1;
/* Shouldn't be here. This condition is tricky because it has to take
* into account boff > 0 due to persist timeout. */
- if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_una_max)
+ if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
|| (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
&& !tcp_flight_size (tc)))
{
tcp_update_rto (tc);
}
- /* Increment RTO backoff (also equal to number of retries) and go back
- * to first un-acked byte */
- tc->rto_boff += 1;
-
- /* First retransmit timeout */
- if (tc->rto_boff == 1)
- tcp_rxt_timeout_cc (tc);
- else
- scoreboard_clear (&tc->sack_sb);
+ /* Peer is dead or network connectivity is lost. Close connection.
+ * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
+ * a min rto of 0.2s we need to retry about 8 times. */
+ if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
+ {
+ tcp_send_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closing_notify (&tc->connection);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
+ return;
+ }
- /* If we've sent beyond snd_congestion, update it */
- if (seq_gt (tc->snd_una_max, tc->snd_congestion))
- tc->snd_congestion = tc->snd_una_max;
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ tcp_check_sack_reneging (tc);
- tc->snd_una_max = tc->snd_nxt = tc->snd_una;
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ /* Update send congestion to make sure that rxt has data to send */
+ tc->snd_congestion = tc->snd_nxt;
- /* Send one segment. Note that n_bytes may be zero due to buffer
- * shortfall */
+ /* Send the first unacked segment. If we're short on buffers, return
+ * as soon as possible */
n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
-
- if (n_bytes == 0)
+ if (!n_bytes)
{
- tcp_retransmit_timer_force_update (tc);
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
return;
}
bi = vlib_get_buffer_index (vm, b);
-
- /* For first retransmit, record timestamp (Eifel detection RFC3522) */
- if (tc->rto_boff == 1)
- tc->snd_rxt_ts = tcp_time_now_w_thread (tc->c_thread_index);
-
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
- tcp_retransmit_timer_force_update (tc);
- }
- /* Retransmit for SYN */
- else if (tc->state == TCP_STATE_SYN_SENT)
- {
- /* Half-open connection actually moved to established but we were
- * waiting for syn retransmit to pop to call cleanup from the right
- * thread. */
- if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
- {
- if (tcp_half_open_connection_cleanup (tc))
- {
- clib_warning ("could not remove half-open connection");
- ASSERT (0);
- }
- return;
- }
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ tcp_retransmit_timer_force_update (tc);
- /* Try without increasing RTO a number of times. If this fails,
- * start growing RTO exponentially */
tc->rto_boff += 1;
- if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
-
- tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
- tc->rto * TCP_TO_TIMER_TICK);
-
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
- return;
-
- b = vlib_get_buffer (vm, bi);
- tcp_init_buffer (vm, b);
- tcp_make_syn (tc, b);
+ if (tc->rto_boff == 1)
+ {
+ tcp_cc_init_rxt_timeout (tc);
+ /* Record timestamp. Eifel detection algorithm RFC3522 */
+ tc->snd_rxt_ts = tcp_tstamp (tc);
+ }
- tc->rtt_ts = 0;
- TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0);
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una + tc->snd_mss);
- /* This goes straight to ipx_lookup. Retransmit timer set already */
- tcp_push_ip_hdr (wrk, tc, b);
- tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
+ tcp_program_retransmit (tc);
}
/* Retransmit SYN-ACK */
else if (tc->state == TCP_STATE_SYN_RCVD)
{
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
- tc->rto_boff += 1;
- if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
tc->rtt_ts = 0;
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ /* Passive open establish timeout */
+ if (tc->rto > TCP_ESTABLISH_TIME >> 1)
+ {
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ return;
+ }
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
{
- tcp_retransmit_timer_force_update (tc);
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
return;
}
+ tc->rto_boff += 1;
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ tcp_retransmit_timer_force_update (tc);
+
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tcp_make_synack (tc, b);
- TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 1);
+ TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
/* Retransmit timer already updated, just enqueue to output */
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
}
}
+/**
+ * SYN retransmit timer handler. Active open only.
+ */
void
-tcp_timer_retransmit_handler (u32 index)
+tcp_timer_retransmit_syn_handler (u32 tc_index)
{
- tcp_timer_retransmit_handler_i (index, 0);
-}
+ u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ vlib_main_t *vm = wrk->vm;
+ tcp_connection_t *tc;
+ vlib_buffer_t *b = 0;
+ u32 bi;
-void
-tcp_timer_retransmit_syn_handler (u32 index)
-{
- tcp_timer_retransmit_handler_i (index, 1);
+ tc = tcp_half_open_connection_get (tc_index);
+
+ /* Note: the connection may have transitioned to ESTABLISHED... */
+ if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Half-open connection actually moved to established but we were
+ * waiting for syn retransmit to pop to call cleanup from the right
+ * thread. */
+ if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
+ {
+ if (tcp_half_open_connection_cleanup (tc))
+ TCP_DBG ("could not remove half-open connection");
+ return;
+ }
+
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
+ tc->rtt_ts = 0;
+
+ /* Active open establish timeout */
+ if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
+ {
+ session_stream_connect_notify (&tc->connection, 1 /* fail */ );
+ tcp_connection_cleanup (tc);
+ return;
+ }
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
+ return;
+ }
+
+ /* Try without increasing RTO a number of times. If this fails,
+ * start growing RTO exponentially */
+ tc->rto_boff += 1;
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_syn (tc, b);
+
+ TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
+
+ /* This goes straight to ipx_lookup */
+ tcp_push_ip_hdr (wrk, tc, b);
+ tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
+
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
+ tc->rto * TCP_TO_TIMER_TICK);
}
/**
u8 *data;
tc = tcp_connection_get_if_valid (index, thread_index);
-
if (!tc)
return;
tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID;
/* Problem already solved or worse */
- if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED
- || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc))
+ if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
+ || (tc->flags & TCP_CONN_FINSNT))
return;
- available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
- offset = tc->snd_una_max - tc->snd_una;
+ available_bytes = transport_max_tx_dequeue (&tc->connection);
+ offset = tc->snd_nxt - tc->snd_una;
/* Reprogram persist if no new bytes available to send. We may have data
* next time */
/*
* Try to force the first unsent segment (or buffer)
*/
- if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
{
tcp_persist_timer_set (tc);
return;
tcp_validate_txf_size (tc, offset);
tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
- max_snd_bytes = clib_min (tc->snd_mss, tm->bytes_per_buffer - MAX_HDRS_LEN);
- n_bytes = stream_session_peek_bytes (&tc->connection, data, offset,
- max_snd_bytes);
+ max_snd_bytes = clib_min (tc->snd_mss,
+ tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ max_snd_bytes);
b->current_length = n_bytes;
ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
|| tc->snd_nxt == tc->snd_una_max
|| tc->rto_boff > 1));
- tcp_push_hdr_i (tc, b, tc->state, /* compute opts */ 0, /* burst */ 0);
- tc->snd_una_max = tc->snd_nxt;
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ {
+ tcp_bt_check_app_limited (tc);
+ tcp_bt_track_tx (tc);
+ }
+
+ tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 1);
+ tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
int
tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
{
- u32 bi, old_snd_nxt, n_bytes;
vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b;
+ u32 bi, n_bytes;
- old_snd_nxt = tc->snd_nxt;
- tc->snd_nxt = tc->snd_una;
-
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
if (!n_bytes)
bi = vlib_get_buffer_index (vm, b);
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
- tc->snd_nxt = old_snd_nxt;
return 0;
}
+static int
+tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
+{
+ u32 offset, n_segs = 0, n_written, bi, available_wnd;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
+
+ offset = tc->snd_nxt - tc->snd_una;
+ available_wnd = tc->snd_wnd - offset;
+ burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
+
+ while (n_segs < burst_size)
+ {
+ n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
+ if (!n_written)
+ goto done;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ offset += n_written;
+ n_segs += 1;
+
+ tc->snd_nxt += n_written;
+ tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
+ }
+
+done:
+ return n_segs;
+}
+
/**
- * Do fast retransmit with SACKs
+ * Estimate send space using proportional rate reduction (RFC6937)
*/
-int
-tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
- u32 burst_size)
+static int
+tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
+{
+ u32 pipe, prr_out;
+ int space;
+
+ pipe = tcp_flight_size (tc);
+ prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
+
+ if (pipe > tc->ssthresh)
+ {
+ space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
+ - prr_out;
+ }
+ else
+ {
+ int limit = tc->prr_delivered - prr_out + tc->snd_mss;
+ space = clib_min (tc->ssthresh - pipe, limit);
+ }
+ space = clib_max (space, prr_out ? 0 : tc->snd_mss);
+ return space;
+}
+
+#define scoreboard_rescue_rxt_valid(_sb, _tc) \
+ (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
+ && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
+
+/**
+ * Do retransmit with SACKs
+ */
+static int
+tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
{
- vlib_main_t *vm = wrk->vm;
u32 n_written = 0, offset, max_bytes, n_segs = 0;
- vlib_buffer_t *b = 0;
sack_scoreboard_hole_t *hole;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
sack_scoreboard_t *sb;
- u32 bi, old_snd_nxt;
+ u32 bi, max_deq;
int snd_space;
u8 snd_limited = 0, can_rescue = 0;
- ASSERT (tcp_in_fastrecovery (tc));
+ ASSERT (tcp_in_cong_recovery (tc));
- old_snd_nxt = tc->snd_nxt;
- sb = &tc->sack_sb;
- snd_space = tcp_available_cc_snd_space (tc);
- hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
+ if (tcp_in_recovery (tc))
+ snd_space = tcp_available_cc_snd_space (tc);
+ else
+ snd_space = tcp_fastrecovery_prr_snd_space (tc);
if (snd_space < tc->snd_mss)
{
- tcp_program_fastretransmit (wrk, tc);
- goto done;
+ /* We're cc constrained so don't accumulate tokens */
+ transport_connection_tx_pacer_reset_bucket (&tc->connection,
+ vm->
+ clib_time.last_cpu_time);
+ return 0;
}
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
+ sb = &tc->sack_sb;
+ hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
+
+ max_deq = transport_max_tx_dequeue (&tc->connection);
+ max_deq -= tc->snd_nxt - tc->snd_una;
+
while (snd_space > 0 && n_segs < burst_size)
{
- hole = scoreboard_next_rxt_hole (sb, hole,
- tcp_fastrecovery_sent_1_smss (tc),
- &can_rescue, &snd_limited);
+ hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue,
+ &snd_limited);
if (!hole)
{
- if (!can_rescue || !(seq_lt (sb->rescue_rxt, tc->snd_una)
- || seq_gt (sb->rescue_rxt,
- tc->snd_congestion)))
+ /* We are out of lost holes to retransmit so send some new data. */
+ if (max_deq)
{
- if (tcp_fastrecovery_first (tc))
- break;
-
- /* We tend to lose the first segment. Try re-resending
- * it but only once and after we've tried everything */
- hole = scoreboard_first_hole (sb);
- if (hole && hole->start == tc->snd_una)
- {
- tcp_retransmit_first_unacked (wrk, tc);
- tcp_fastrecovery_first_on (tc);
- n_segs += 1;
- }
- break;
+ u32 n_segs_new, av_window;
+ av_window = tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
+ snd_space = clib_min (snd_space, av_window);
+ snd_space = clib_min (max_deq, snd_space);
+ burst_size = clib_min (burst_size - n_segs,
+ snd_space / tc->snd_mss);
+ burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
+ n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
+ if (max_deq > n_segs_new * tc->snd_mss)
+ tcp_program_retransmit (tc);
+
+ n_segs += n_segs_new;
+ goto done;
}
+ if (tcp_in_recovery (tc) || !can_rescue
+ || scoreboard_rescue_rxt_valid (sb, tc))
+ break;
+
/* If rescue rxt undefined or less than snd_una then one segment of
* up to SMSS octets that MUST include the highest outstanding
* unSACKed sequence number SHOULD be returned, and RescueRxt set to
max_bytes = clib_min (max_bytes, snd_space);
offset = tc->snd_congestion - tc->snd_una - max_bytes;
sb->rescue_rxt = tc->snd_congestion;
- tc->snd_nxt = tc->snd_una + offset;
n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
max_bytes, &b);
if (!n_written)
max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
if (max_bytes == 0)
break;
+
offset = sb->high_rxt - tc->snd_una;
- tc->snd_nxt = sb->high_rxt;
n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
&b);
+ ASSERT (n_written <= snd_space);
/* Nothing left to retransmit */
if (n_written == 0)
break;
bi = vlib_get_buffer_index (vm, b);
- sb->high_rxt += n_written;
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
- ASSERT (n_written <= snd_space);
+
+ sb->high_rxt += n_written;
snd_space -= n_written;
n_segs += 1;
}
if (hole)
- tcp_program_fastretransmit (wrk, tc);
+ tcp_program_retransmit (tc);
done:
- /* If window allows, send 1 SMSS of new data */
- tc->snd_nxt = old_snd_nxt;
+
return n_segs;
}
/**
* Fast retransmit without SACK info
*/
-int
-tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
- u32 burst_size)
+static int
+tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
{
- u32 n_written = 0, offset = 0, bi, old_snd_nxt;
+ u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now;
vlib_main_t *vm = wrk->vm;
int snd_space, n_segs = 0;
vlib_buffer_t *b;
ASSERT (tcp_in_fastrecovery (tc));
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
- /* Start resending from first un-acked segment */
- old_snd_nxt = tc->snd_nxt;
- tc->snd_nxt = tc->snd_una;
snd_space = tcp_available_cc_snd_space (tc);
+ if (!tcp_fastrecovery_first (tc))
+ goto send_unsent;
+
+ /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
+ * segment. */
while (snd_space > 0 && n_segs < burst_size)
{
- offset += n_written;
- n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, snd_space,
- &b);
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
+ tc->snd_mss, &b);
/* Nothing left to retransmit */
if (n_written == 0)
bi = vlib_get_buffer_index (vm, b);
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
snd_space -= n_written;
+ offset += n_written;
n_segs += 1;
}
- /* More data to resend */
- if (seq_lt (tc->snd_nxt, tc->snd_congestion))
- tcp_program_fastretransmit (wrk, tc);
+ if (n_segs == burst_size)
+ goto done;
- /* Restore snd_nxt. If window allows, send 1 SMSS of new data */
- tc->snd_nxt = old_snd_nxt;
+send_unsent:
+ /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
+ if (snd_space < tc->snd_mss || tc->snd_mss == 0)
+ goto done;
+
+ max_deq = transport_max_tx_dequeue (&tc->connection);
+ max_deq -= tc->snd_nxt - tc->snd_una;
+ if (max_deq)
+ {
+ snd_space = clib_min (max_deq, snd_space);
+ burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
+ n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
+ if (max_deq > n_segs_now * tc->snd_mss)
+ tcp_program_retransmit (tc);
+ n_segs += n_segs_now;
+ }
+
+done:
+ tcp_fastrecovery_first_off (tc);
return n_segs;
}
/**
* Do fast retransmit
*/
-int
-tcp_fast_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
- u32 burst_size)
+static int
+tcp_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, u32 burst_size)
{
if (tcp_opts_sack_permitted (&tc->rcv_opts))
- return tcp_fast_retransmit_sack (wrk, tc, burst_size);
+ return tcp_retransmit_sack (wrk, tc, burst_size);
else
- return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
+ return tcp_retransmit_no_sack (wrk, tc, burst_size);
}
-static u32
-tcp_session_has_ooo_data (tcp_connection_t * tc)
+static int
+tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
+{
+ int j, n_acks;
+
+ if (!tc->pending_dupacks)
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* If we're supposed to send dupacks but have no ooo data
+ * send only one ack */
+ if (!vec_len (tc->snd_sacks))
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+
+ /* Start with first sack block */
+ tc->snd_sack_pos = 0;
+
+ /* Generate enough dupacks to cover all sack blocks. Do not generate
+ * more sacks than the number of packets received. But do generate at
+ * least 3, i.e., the number needed to signal congestion, if needed. */
+ n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
+ n_acks = clib_min (n_acks, tc->pending_dupacks);
+ n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
+ for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
+ tcp_send_ack (tc);
+
+ if (n_acks < max_burst_size)
+ {
+ tc->pending_dupacks = 0;
+ tc->snd_sack_pos = 0;
+ tc->dupacks_out += n_acks;
+ return n_acks;
+ }
+ else
+ {
+ TCP_DBG ("constrained by burst size");
+ tc->pending_dupacks = n_acks - max_burst_size;
+ tc->dupacks_out += max_burst_size;
+ tcp_program_dupack (tc);
+ return max_burst_size;
+ }
+}
+
+static int
+tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
+{
+ u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
+ tcp_worker_ctx_t *wrk;
+
+ wrk = tcp_get_worker (tc->c_thread_index);
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ wrk->vm->
+ clib_time.last_cpu_time);
+ burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_retransmit (tc);
+ return 0;
+ }
+
+ n_segs = tcp_retransmit (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
+ return n_segs;
+}
+
+int
+tcp_session_custom_tx (void *conn, u32 max_burst_size)
{
- stream_session_t *s = session_get (tc->c_s_index, tc->c_thread_index);
- return svm_fifo_has_ooo_data (s->server_rx_fifo);
+ tcp_connection_t *tc = (tcp_connection_t *) conn;
+ u32 n_segs = 0;
+
+ if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
+ {
+ tc->flags &= ~TCP_CONN_RXT_PENDING;
+ n_segs = tcp_do_retransmit (tc, max_burst_size);
+ max_burst_size -= n_segs;
+ }
+
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ return n_segs;
+
+ tc->flags &= ~TCP_CONN_SNDACK;
+
+ /* We have retransmitted packets and no dupack */
+ if (n_segs && !tc->pending_dupacks)
+ return n_segs;
+
+ if (!max_burst_size)
+ {
+ tcp_program_ack (tc);
+ return max_burst_size;
+ }
+
+ n_segs += tcp_send_acks (tc, max_burst_size);
+
+ return n_segs;
}
+#endif /* CLIB_MARCH_VARIANT */
static void
tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
vm->thread_index);
t = vlib_add_trace (vm, node, b, sizeof (*t));
- clib_memcpy (&t->tcp_header, th, sizeof (t->tcp_header));
- clib_memcpy (&t->tcp_connection, tc, sizeof (t->tcp_connection));
+ clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
+ clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
}
}
tcp_header_t *th0 = 0;
th0 = vlib_buffer_get_current (b0);
- TCP_EVT_DBG (TCP_EVT_OUTPUT, tc0, th0->flags, b0->current_length);
+ TCP_EVT (TCP_EVT_OUTPUT, tc0, th0->flags, b0->current_length);
if (is_ip4)
{
vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data;
vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data;
+ b0->flags |=
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
th0->checksum = 0;
}
}
always_inline void
-tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
- u32 * error0, u16 * next0, u8 is_ip4)
+tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
{
+ if (PREDICT_TRUE (!(b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)))
+ return;
+ u16 data_len =
+ b->current_length + b->total_length_not_including_first_buffer -
+ sizeof (tcp_header_t) - tc->snd_opts_len;
- if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
+ if (data_len > tc->snd_mss)
{
- *error0 = TCP_ERROR_INVALID_CONNECTION;
- *next0 = TCP_OUTPUT_NEXT_DROP;
- return;
+ ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
+ ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
+ b->flags |= VNET_BUFFER_F_GSO;
+ vnet_buffer2 (b)->gso_l4_hdr_sz =
+ sizeof (tcp_header_t) + tc->snd_opts_len;
+ vnet_buffer2 (b)->gso_size = tc->snd_mss;
+ }
+}
+
+always_inline void
+tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
+ vlib_node_runtime_t * error_node, u16 * next0,
+ u8 is_ip4)
+{
+ /* If next_index is not drop use it */
+ if (tc0->next_node_index)
+ {
+ *next0 = tc0->next_node_index;
+ vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
+ }
+ else
+ {
+ *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
}
vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
if (!is_ip4)
{
+ u32 error0 = 0;
+
if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
- tcp_output_handle_link_local (tc0, b0, next0, error0);
- }
+ tcp_output_handle_link_local (tc0, b0, next0, &error0);
- /* Filter out DUPACKs if there are no OOO segments left */
- if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK))
- {
- /* N.B. Should not filter burst of dupacks. Two issues:
- * 1) dupacks open cwnd on remote peer when congested
- * 2) acks leaving should have the latest rcv_wnd since the
- * burst may have eaten up all of it, so only the old ones
- * could be filtered.
- */
- if (!tcp_session_has_ooo_data (tc0))
+ if (PREDICT_FALSE (error0))
{
- *error0 = TCP_ERROR_FILTERED_DUPACKS;
- *next0 = TCP_OUTPUT_NEXT_DROP;
+ b0->error = error_node->errors[error0];
return;
}
}
- /* Stop DELACK timer and fix flags */
- tc0->flags &= ~(TCP_CONN_SNDACK);
if (!TCP_ALWAYS_ACK)
tcp_timer_reset (tc0, TCP_TIMER_DELACK);
+
+ tc0->segs_out += 1;
}
always_inline uword
u32 n_left_from, *from, thread_index = vm->thread_index;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 nexts[VLIB_FRAME_SIZE], *next;
+ vlib_node_runtime_t *error_node;
+
+ error_node = vlib_node_get_runtime (vm, tcp_node_index (output, is_ip4));
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
while (n_left_from >= 4)
{
- u32 error0 = TCP_ERROR_PKTS_SENT, error1 = TCP_ERROR_PKTS_SENT;
tcp_connection_t *tc0, *tc1;
{
CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
}
- next[0] = next[1] = TCP_OUTPUT_NEXT_IP_LOOKUP;
-
tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
thread_index);
tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
thread_index);
- tcp_output_push_ip (vm, b[0], tc0, is_ip4);
- tcp_output_push_ip (vm, b[1], tc1, is_ip4);
+ if (PREDICT_TRUE (!tc0 + !tc1 == 0))
+ {
+ tcp_output_push_ip (vm, b[0], tc0, is_ip4);
+ tcp_output_push_ip (vm, b[1], tc1, is_ip4);
- tcp_output_handle_packet (tc0, b[0], &error0, &next[0], is_ip4);
- tcp_output_handle_packet (tc1, b[1], &error1, &next[1], is_ip4);
+ tcp_check_if_gso (tc0, b[0]);
+ tcp_check_if_gso (tc1, b[1]);
+
+ tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4);
+ tcp_output_handle_packet (tc1, b[1], error_node, &next[1], is_ip4);
+ }
+ else
+ {
+ if (tc0 != 0)
+ {
+ tcp_output_push_ip (vm, b[0], tc0, is_ip4);
+ tcp_check_if_gso (tc0, b[0]);
+ tcp_output_handle_packet (tc0, b[0], error_node, &next[0],
+ is_ip4);
+ }
+ else
+ {
+ b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
+ next[0] = TCP_OUTPUT_NEXT_DROP;
+ }
+ if (tc1 != 0)
+ {
+ tcp_output_push_ip (vm, b[1], tc1, is_ip4);
+ tcp_check_if_gso (tc1, b[1]);
+ tcp_output_handle_packet (tc1, b[1], error_node, &next[1],
+ is_ip4);
+ }
+ else
+ {
+ b[1]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
+ next[1] = TCP_OUTPUT_NEXT_DROP;
+ }
+ }
b += 2;
next += 2;
}
while (n_left_from > 0)
{
- u32 error0 = TCP_ERROR_PKTS_SENT;
tcp_connection_t *tc0;
if (n_left_from > 1)
CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
}
- next[0] = TCP_OUTPUT_NEXT_IP_LOOKUP;
tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
thread_index);
- tcp_output_push_ip (vm, b[0], tc0, is_ip4);
- tcp_output_handle_packet (tc0, b[0], &error0, &next[0], is_ip4);
+ if (PREDICT_TRUE (tc0 != 0))
+ {
+ tcp_output_push_ip (vm, b[0], tc0, is_ip4);
+ tcp_check_if_gso (tc0, b[0]);
+ tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4);
+ }
+ else
+ {
+ b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
+ next[0] = TCP_OUTPUT_NEXT_DROP;
+ }
b += 1;
next += 1;
}
vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
+ TCP_ERROR_PKTS_SENT, frame->n_vectors);
return frame->n_vectors;
}
-static uword
-tcp4_output (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
}
-static uword
-tcp6_output (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (tcp4_output_node) =
{
- .function = tcp4_output,.name = "tcp4-output",
- /* Takes a vector of packets. */
- .vector_size = sizeof (u32),
- .n_errors = TCP_N_ERROR,
- .error_strings = tcp_error_strings,
- .n_next_nodes = TCP_OUTPUT_N_NEXT,
- .next_nodes = {
+ .name = "tcp4-output",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = TCP_N_ERROR,
+ .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
+ .error_strings = tcp_error_strings,
+ .n_next_nodes = TCP_OUTPUT_N_NEXT,
+ .next_nodes = {
#define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
foreach_tcp4_output_next
#undef _
- },
- .format_buffer = format_tcp_header,
- .format_trace = format_tcp_tx_trace,
+ },
+ .format_buffer = format_tcp_header,
+ .format_trace = format_tcp_tx_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (tcp4_output_node, tcp4_output);
-
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (tcp6_output_node) =
{
- .function = tcp6_output,
.name = "tcp6-output",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
.n_errors = TCP_N_ERROR,
+ .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
.error_strings = tcp_error_strings,
.n_next_nodes = TCP_OUTPUT_N_NEXT,
.next_nodes = {
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (tcp6_output_node, tcp6_output);
-
typedef enum _tcp_reset_next
{
TCP_RESET_NEXT_DROP,
else
th0 = ip6_next_header ((ip6_header_t *) th0);
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_header, th0,
+ sizeof (t0->tcp_header));
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
return from_frame->n_vectors;
}
-static uword
-tcp4_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return tcp46_send_reset_inline (vm, node, from_frame, 1);
}
-static uword
-tcp6_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return tcp46_send_reset_inline (vm, node, from_frame, 0);
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (tcp4_reset_node) = {
- .function = tcp4_send_reset,
.name = "tcp4-reset",
.vector_size = sizeof (u32),
.n_errors = TCP_N_ERROR,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (tcp4_reset_node, tcp4_send_reset);
-
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (tcp6_reset_node) = {
- .function = tcp6_send_reset,
.name = "tcp6-reset",
.vector_size = sizeof (u32),
.n_errors = TCP_N_ERROR,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (tcp6_reset_node, tcp6_send_reset);
-
/*
* fd.io coding-style-patch-verification: ON
*