* @param to TCP options data structure to be populated
* @return -1 if parsing failed
*/
-int
+static int
tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
{
const u8 *data;
/* Karn's rule, part 1. Don't use retransmitted segments to estimate
* RTT because they're ambiguous. */
if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
- goto done;
+ {
+ if (tcp_in_recovery (tc))
+ return 0;
+ goto done;
+ }
if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
{
tc->rtt_ts = 0;
/* If we got here something must've been ACKed so make sure boff is 0,
- * even if mrrt is not valid since we update the rto lower */
+ * even if mrtt is not valid since we update the rto lower */
tc->rto_boff = 0;
tcp_update_rto (tc);
return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
}
-void
+static u32
+scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
+{
+ ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
+ return hole - sb->holes;
+}
+
+static u32
+scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
+{
+ return hole->end - hole->start;
+}
+
+sack_scoreboard_hole_t *
+scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
+{
+ if (index != TCP_INVALID_SACK_HOLE_INDEX)
+ return pool_elt_at_index (sb->holes, index);
+ return 0;
+}
+
+sack_scoreboard_hole_t *
+scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
+{
+ if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
+ return pool_elt_at_index (sb->holes, hole->next);
+ return 0;
+}
+
+sack_scoreboard_hole_t *
+scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
+{
+ if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
+ return pool_elt_at_index (sb->holes, hole->prev);
+ return 0;
+}
+
+sack_scoreboard_hole_t *
+scoreboard_first_hole (sack_scoreboard_t * sb)
+{
+ if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
+ return pool_elt_at_index (sb->holes, sb->head);
+ return 0;
+}
+
+sack_scoreboard_hole_t *
+scoreboard_last_hole (sack_scoreboard_t * sb)
+{
+ if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
+ return pool_elt_at_index (sb->holes, sb->tail);
+ return 0;
+}
+
+static void
scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
{
sack_scoreboard_hole_t *next, *prev;
pool_put (sb->holes, hole);
}
-sack_scoreboard_hole_t *
+static sack_scoreboard_hole_t *
scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
u32 start, u32 end)
{
return hole;
}
-void
+static void
scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
{
sack_scoreboard_hole_t *hole, *prev;
return hole;
}
-void
+static void
scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq)
{
sack_scoreboard_hole_t *hole;
sb->high_rxt = seq;
}
+void
+scoreboard_init (sack_scoreboard_t * sb)
+{
+ sb->head = TCP_INVALID_SACK_HOLE_INDEX;
+ sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
+ sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+}
+
+void
+scoreboard_clear (sack_scoreboard_t * sb)
+{
+ sack_scoreboard_hole_t *hole;
+ while ((hole = scoreboard_first_hole (sb)))
+ {
+ scoreboard_remove_hole (sb, hole);
+ }
+ ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
+ ASSERT (pool_elts (sb->holes) == 0);
+ sb->sacked_bytes = 0;
+ sb->last_sacked_bytes = 0;
+ sb->last_bytes_delivered = 0;
+ sb->snd_una_adv = 0;
+ sb->high_sacked = 0;
+ sb->high_rxt = 0;
+ sb->lost_bytes = 0;
+ sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+}
+
/**
* Test that scoreboard is sane after recovery
*
* Returns 1 if scoreboard is empty or if first hole beyond
* snd_una.
*/
-u8
+static u8
tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
{
sack_scoreboard_hole_t *hole;
int i, j;
sb->last_sacked_bytes = 0;
- sb->snd_una_adv = 0;
- old_sacked_bytes = sb->sacked_bytes;
sb->last_bytes_delivered = 0;
+ sb->snd_una_adv = 0;
if (!tcp_opts_sack (&tc->rcv_opts)
&& sb->head == TCP_INVALID_SACK_HOLE_INDEX)
return;
+ old_sacked_bytes = sb->sacked_bytes;
+
/* Remove invalid blocks */
blk = tc->rcv_opts.sacks;
while (blk < vec_end (tc->rcv_opts.sacks))
&& timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
}
-int
+static int
tcp_cc_recover (tcp_connection_t * tc)
{
ASSERT (tcp_in_cong_recovery (tc));
* Legitimate ACK. 1) See if we can exit recovery
*/
/* XXX limit this only to first partial ack? */
- tcp_retransmit_timer_update (tc);
+ if (seq_lt (tc->snd_una, tc->snd_congestion))
+ tcp_retransmit_timer_force_update (tc);
+ else
+ tcp_retransmit_timer_update (tc);
if (seq_geq (tc->snd_una, tc->snd_congestion))
{
{
tc->cc_algo->rcv_ack (tc);
tc->tsecr_last_ack = tc->rcv_opts.tsecr;
+ transport_add_tx_event (&tc->connection);
return;
}
tcp_fast_retransmit (tc);
}
-void
-tcp_cc_init (tcp_connection_t * tc)
-{
- tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
- tc->cc_algo->init (tc);
-}
-
/**
* Process incoming ACK
*/
/* When we entered recovery, we reset snd_nxt to snd_una. Seems peer
* still has the data so accept the ack */
if (tcp_in_recovery (tc)
- && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_congestion)
- && seq_geq (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
+ && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_congestion))
{
- tc->snd_una_max = tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
+ tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
+ if (seq_gt (tc->snd_nxt, tc->snd_una_max))
+ tc->snd_una_max = tc->snd_nxt;
goto process_ack;
}
}
/** Enqueue data for delivery to application */
-always_inline int
+static int
tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
u16 data_len)
{
}
/** Enqueue out-of-order data */
-always_inline int
+static int
tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
u16 data_len)
{
tcp_connection_t tcp_connection;
} tcp_rx_trace_t;
-u8 *
+static u8 *
format_tcp_rx_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
return s;
}
-u8 *
+static u8 *
format_tcp_rx_trace_short (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
s = format (s, "%d -> %d (%U)",
- clib_net_to_host_u16 (t->tcp_header.src_port),
- clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state,
+ clib_net_to_host_u16 (t->tcp_header.dst_port),
+ clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
t->tcp_connection.state);
return s;
}
-void
+static void
tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
{
clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
}
+static void
+tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u8 is_ip4)
+{
+ u32 *from, n_left;
+
+ n_left = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left >= 1)
+ {
+ tcp_connection_t *tc0;
+ tcp_rx_trace_t *t0;
+ tcp_header_t *th0;
+ vlib_buffer_t *b0;
+ u32 bi0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
+ tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
+ vm->thread_index);
+ th0 = tcp_buffer_hdr (b0);
+ tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
+ }
+
+ from += 1;
+ n_left -= 1;
+ }
+}
+
always_inline void
tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
u8 is_ip4, u32 evt, u32 val)
always_inline uword
tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame, int is_ip4)
+ vlib_frame_t * frame, int is_ip4)
{
- u32 my_thread_index = vm->thread_index, errors = 0;
+ u32 thread_index = vm->thread_index, errors = 0;
u32 n_left_from, next_index, *from, *to_next;
u16 err_counters[TCP_N_ERROR] = { 0 };
u8 is_fin = 0;
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ tcp_established_trace_frame (vm, node, frame, is_ip4);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
while (n_left_from > 0)
b0 = vlib_get_buffer (vm, bi0);
tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
- my_thread_index);
+ thread_index);
if (PREDICT_FALSE (tc0 == 0))
{
done:
b0->error = node->errors[error0];
- if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0,
- sizeof (*t0));
- tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
- }
-
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
}
errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
- my_thread_index);
+ thread_index);
err_counters[TCP_ERROR_EVENT_FIFO_FULL] = errors;
tcp_store_err_counters (established, err_counters);
- tcp_flush_frame_to_output (vm, my_thread_index, is_ip4);
- return from_frame->n_vectors;
+ tcp_flush_frame_to_output (vm, thread_index, is_ip4);
+
+ return frame->n_vectors;
}
static uword
if (tcp_opts_wscale (&new_tc0->rcv_opts))
new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
- /* RFC1323: SYN and SYN-ACK wnd not scaled */
- new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
+ new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
+ << new_tc0->snd_wscale;
new_tc0->snd_wl1 = seq0;
new_tc0->snd_wl2 = ack0;
tcp_connection_timers_reset (tc0);
tcp_make_fin (tc0, b0);
tc0->snd_nxt += 1;
+ tc0->snd_una_max = tc0->snd_nxt;
+ tcp_retransmit_timer_set (tc0);
next0 = tcp_next_output (tc0->c_is_ip4);
stream_session_disconnect_notify (&tc0->connection);
tc0->state = TCP_STATE_CLOSE_WAIT;
static void
tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_buffer_t ** bufs, u32 n_bufs, u8 is_ip4)
+ vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
{
tcp_connection_t *tc;
tcp_header_t *tcp;
tcp_rx_trace_t *t;
- u32 n_trace;
int i;
- n_trace = vlib_get_trace_count (vm, node);
- for (i = 0; i < clib_min (n_trace, n_bufs); i++)
+ for (i = 0; i < n_bufs; i++)
{
- t = vlib_add_trace (vm, node, bufs[i], sizeof (*t));
- tc = tcp_connection_get (vnet_buffer (bufs[i])->tcp.connection_index,
- vm->thread_index);
- tcp = vlib_buffer_get_current (bufs[i]);
- tcp_set_rx_trace_data (t, tc, tcp, bufs[i], is_ip4);
+ if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
+ tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
+ vm->thread_index);
+ tcp = vlib_buffer_get_current (bs[i]);
+ tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
+ }
}
}
/* ACK for for a SYN-ACK -> tcp-rcv-process. */
_(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
_(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+ _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
+ TCP_ERROR_NONE);
_(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+ _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
+ TCP_ERROR_NONE);
/* SYN-ACK for a SYN */
_(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
TCP_ERROR_NONE);
#undef _
}
-clib_error_t *
+static clib_error_t *
tcp_input_init (vlib_main_t * vm)
{
clib_error_t *error = 0;