}
static void
-preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh,
+preallocate_fifo_pairs (svm_fifo_segment_private_t * s,
svm_fifo_segment_create_args_t * a)
{
- u32 rx_fifo_size, tx_fifo_size;
- u32 rx_rounded_data_size, tx_rounded_data_size;
+ svm_fifo_segment_header_t *fsh = s->h;
+ u32 rx_fifo_size, tx_fifo_size, pairs_to_allocate;
+ u32 rx_rounded_data_size, tx_rounded_data_size, pair_size;
svm_fifo_t *f;
u8 *rx_fifo_space, *tx_fifo_space;
int rx_freelist_index, tx_freelist_index;
- max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE);
/* Calculate space requirements */
- rx_fifo_size = (sizeof (*f) + rx_rounded_data_size)
- * a->preallocated_fifo_pairs;
- tx_fifo_size = (sizeof (*f) + tx_rounded_data_size)
- * a->preallocated_fifo_pairs;
+ pair_size = 2 * sizeof (*f) + rx_rounded_data_size + tx_rounded_data_size;
+ pairs_to_allocate = clib_min (s->ssvm.ssvm_size / pair_size,
+ a->preallocated_fifo_pairs);
+ rx_fifo_size = (sizeof (*f) + rx_rounded_data_size) * pairs_to_allocate;
+ tx_fifo_size = (sizeof (*f) + tx_rounded_data_size) * pairs_to_allocate;
vec_validate_init_empty (fsh->free_fifos,
clib_max (rx_freelist_index, tx_freelist_index),
/* Carve rx fifo space */
f = (svm_fifo_t *) rx_fifo_space;
- for (i = 0; i < a->preallocated_fifo_pairs; i++)
+ for (i = 0; i < pairs_to_allocate; i++)
{
f->freelist_index = rx_freelist_index;
f->next = fsh->free_fifos[rx_freelist_index];
}
/* Carve tx fifo space */
f = (svm_fifo_t *) tx_fifo_space;
- for (i = 0; i < a->preallocated_fifo_pairs; i++)
+ for (i = 0; i < pairs_to_allocate; i++)
{
f->freelist_index = tx_freelist_index;
f->next = fsh->free_fifos[tx_freelist_index];
tx_fifo_space += sizeof (*f) + tx_rounded_data_size;
f = (svm_fifo_t *) tx_fifo_space;
}
+
+ /* Account for the pairs allocated */
+ a->preallocated_fifo_pairs -= pairs_to_allocate;
}
/** (master) create an svm fifo segment */
sh->opaque[0] = fsh;
s->h = fsh;
fsh->segment_name = format (0, "%s%c", a->segment_name, 0);
- preallocate_fifo_pairs (fsh, a);
+ preallocate_fifo_pairs (s, a);
ssvm_pop_heap (oldheap);
segment_count = a->private_segment_count;
}
- /* Spread preallocated fifo pairs across segments */
- a->preallocated_fifo_pairs =
- (a->preallocated_fifo_pairs + segment_count - 1) / segment_count;
-
/* Allocate segments */
for (i = 0; i < segment_count; i++)
{
if (a->private_segment_count)
{
+ if (i != 0)
+ fsh->flags |= FIFO_SEGMENT_F_IS_PREALLOCATED;
oldheap = clib_mem_get_heap ();
clib_mem_set_heap (sh->heap);
- preallocate_fifo_pairs (fsh, a);
+ preallocate_fifo_pairs (s, a);
clib_mem_set_heap (oldheap);
}
sh->ready = 1;
#define FIFO_SEGMENT_F_IS_PRIVATE 1 << 0 /* Private segment */
#define FIFO_SEGMENT_F_IS_MAIN_HEAP 1 << 1 /* Segment is main heap */
+#define FIFO_SEGMENT_F_IS_PREALLOCATED 1 << 2 /* Segment is preallocated */
typedef struct
{
hash_foreach (handle, index, app->listeners_table,
({
vec_add1 (handles, handle);
+ sm = segment_manager_get (index);
+ sm->app_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
}));
/* *INDENT-ON* */
segment_manager_has_fifos (segment_manager_t * sm)
{
svm_fifo_segment_private_t *segment;
- /* Weird, but handle it */
- if (vec_len (sm->segment_indices) == 0)
- return 0;
- if (vec_len (sm->segment_indices) == 1)
- {
- segment = svm_fifo_segment_get_segment (sm->segment_indices[0]);
- if (svm_fifo_segment_num_fifos (segment) == 0)
- return 0;
- }
- if (CLIB_DEBUG)
+ int i;
+
+ for (i = 0; i < vec_len (sm->segment_indices); i++)
{
- svm_fifo_segment_private_t *segment;
- int i;
- for (i = 1; i < vec_len (sm->segment_indices); i++)
- {
- segment = svm_fifo_segment_get_segment (sm->segment_indices[i]);
- if (!svm_fifo_segment_has_fifos (segment))
- clib_warning ("segment has no fifos!");
- }
+ segment = svm_fifo_segment_get_segment (sm->segment_indices[i]);
+ if (CLIB_DEBUG && i && !svm_fifo_segment_has_fifos (segment)
+ && !(segment->h->flags & FIFO_SEGMENT_F_IS_PREALLOCATED))
+ clib_warning ("segment %d has no fifos!", sm->segment_indices[i]);
+ if (svm_fifo_segment_has_fifos (segment))
+ return 1;
}
- return 1;
+ return 0;
+}
+
+static u8
+segment_manager_app_detached (segment_manager_t * sm)
+{
+ return (sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX);
}
static void
clib_spinlock_lock (&sm->lockp);
svm_segment_index = sm->segment_indices[segment_index];
fifo_segment = svm_fifo_segment_get_segment (svm_segment_index);
+ if (!fifo_segment
+ || ((fifo_segment->h->flags & FIFO_SEGMENT_F_IS_PREALLOCATED)
+ && !segment_manager_app_detached (sm)))
+ {
+ clib_spinlock_unlock (&sm->lockp);
+ return;
+ }
svm_fifo_segment_delete (fifo_segment);
vec_del1 (sm->segment_indices, segment_index);
clib_spinlock_unlock (&sm->lockp);
*
* Since the fifos allocated in the segment keep backpointers to the sessions
* prior to removing the segment, we call session disconnect. This
- * subsequently propages into transport.
+ * subsequently propagates into transport.
*/
void
segment_manager_del (segment_manager_t * sm)
{
+ int i;
- ASSERT (vec_len (sm->segment_indices) <= 1);
- if (vec_len (sm->segment_indices))
+ ASSERT (!segment_manager_has_fifos (sm)
+ && segment_manager_app_detached (sm));
+
+ /* If we have empty preallocated segments that haven't been removed, remove
+ * them now. Apart from that, the first segment in the first segment manager
+ * is not removed when all fifos are removed. It can only be removed when
+ * the manager is explicitly deleted/detached by the app. */
+ for (i = vec_len (sm->segment_indices) - 1; i >= 0; i--)
{
- /* The first segment in the first segment manager is not removed when
- * all fifos are removed. It can only be removed when the manager is
- * explicitly deleted/detached by the app. */
if (CLIB_DEBUG)
{
- svm_fifo_segment_private_t *fifo_segment;
- fifo_segment =
- svm_fifo_segment_get_segment (sm->segment_indices[0]);
- ASSERT (!svm_fifo_segment_has_fifos (fifo_segment));
+ svm_fifo_segment_private_t *segment;
+ segment = svm_fifo_segment_get_segment (sm->segment_indices[i]);
+ ASSERT (!svm_fifo_segment_has_fifos (segment));
}
- segment_manager_del_segment (sm, 0);
+ segment_manager_del_segment (sm, i);
}
clib_spinlock_free (&sm->lockp);
if (CLIB_DEBUG)
segment_manager_del_sessions (sm);
else
{
- ASSERT (!sm->first_is_protected
- || sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX);
+ ASSERT (!sm->first_is_protected || segment_manager_app_detached (sm));
segment_manager_del (sm);
}
}
}
/* Remove segment manager if no sessions and detached from app */
- if (sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX && is_first)
+ if (segment_manager_app_detached (sm)
+ && !segment_manager_has_fifos (sm))
segment_manager_del (sm);
}
}
st);
if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
{
- clib_warning ("half-open was removed!");
+ TCP_DBG ("half-open was removed!");
return -1;
}
u32 thread_index)
{
static u16 serial_number = 0;
+ u32 tries = 0;
session_fifo_event_t evt;
unix_shared_memory_queue_t *q;
evt.event_id = serial_number++;
q = session_manager_get_vpp_event_queue (thread_index);
-
- /* Based on request block (or not) for lack of space */
- if (PREDICT_TRUE (q->cursize < q->maxsize))
+ while (unix_shared_memory_queue_add (q, (u8 *) & evt, 1))
{
- if (unix_shared_memory_queue_add (q, (u8 *) & evt,
- 0 /* do wait for mutex */ ))
+ if (tries++ == 3)
{
- clib_warning ("failed to enqueue evt");
+ TCP_DBG ("failed to enqueue evt");
+ break;
}
}
- else
- {
- clib_warning ("queue full");
- return;
- }
}
/**
return 0;
}
+ /* Check how much we can pull. */
+ max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo);
+
if (peek_data)
{
- /* Offset in rx fifo from where to peek data */
+ /* Offset in rx fifo from where to peek data */
tx_offset = transport_vft->tx_fifo_offset (tc0);
+ if (PREDICT_FALSE (tx_offset >= max_dequeue0))
+ max_dequeue0 = 0;
+ else
+ max_dequeue0 -= tx_offset;
}
- /* Check how much we can pull. If buffering, subtract the offset */
- max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - tx_offset;
-
/* Nothing to read return */
if (max_dequeue0 == 0)
{
{
n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, tx_offset,
len_to_deq0, data0);
+ if (n_bytes_read <= 0)
+ goto dequeue_fail;
/* Keep track of progress locally, transport is also supposed to
* increment it independently when pushing the header */
tx_offset += n_bytes_read;
{
n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo,
len_to_deq0, data0);
+ if (n_bytes_read <= 0)
+ goto dequeue_fail;
}
- if (n_bytes_read <= 0)
- goto dequeue_fail;
-
b0->current_length = n_bytes_read;
left_to_snd0 -= n_bytes_read;
case FIFO_EVENT_APP_TX:
s0 = session_event_get_session (e0, my_thread_index);
- if (CLIB_DEBUG && !s0)
+ if (PREDICT_FALSE (!s0))
{
clib_warning ("It's dead, Jim!");
continue;
if ((i % 4) == 0)
vlib_process_suspend (vm, 10e-6);
ASSERT (i + 1 >= tm->ready_connections);
- while (i + 1 - tm->ready_connections > 8000)
+ while (i + 1 - tm->ready_connections > 1000)
{
vlib_process_suspend (vm, 100e-6);
}
/* If not snd_wnd constrained and we can't write at least a segment,
* don't try at all */
if (PREDICT_FALSE (snd_space < tc->snd_mss))
- return 0;
+ return snd_space < tc->cwnd ? 0 : snd_space;
/* round down to mss multiple */
return snd_space - (snd_space % tc->snd_mss);
{
ASSERT (tc->state == TCP_STATE_SYN_SENT);
stream_session_connect_notify (&tc->connection, 1 /* fail */ );
+ TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2);
}
else
{
/* note: the connection may have already disappeared */
if (PREDICT_FALSE (tc == 0))
return;
-
+ TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2);
ASSERT (tc->state == TCP_STATE_SYN_RCVD);
/* Start cleanup. App wasn't notified yet so use delete notify as
* opposed to delete to cleanup session layer state. */
vec_validate (tm->tx_frames[0], num_threads - 1);
vec_validate (tm->tx_frames[1], num_threads - 1);
+ vec_validate (tm->ip_lookup_tx_frames[0], num_threads - 1);
+ vec_validate (tm->ip_lookup_tx_frames[1], num_threads - 1);
tm->bytes_per_buffer = vlib_buffer_free_list_buffer_size
(vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
#define TCP_ESTABLISH_TIME 750 /* 75s */
#define TCP_SYN_RCVD_TIME 600 /* 60s */
#define TCP_2MSL_TIME 300 /* 30s */
-#define TCP_CLOSEWAIT_TIME 20 /* 0.1s */
-#define TCP_CLEANUP_TIME 5 /* 0.5s Time to wait before cleanup */
+#define TCP_CLOSEWAIT_TIME 20 /* 2s */
+#define TCP_TIMEWAIT_TIME 20 /* 2s */
+#define TCP_CLEANUP_TIME 10 /* 1s Time to wait before cleanup */
#define TCP_TIMER_PERSIST_MIN 2 /* 0.2s */
#define TCP_RTO_MAX 60 * THZ /* Min max RTO (60s) as per RFC6298 */
/** per-worker tx buffer free lists */
u32 **tx_buffers;
- /** per-worker tx frames to 4/6 output nodes */
+ /** per-worker tx frames to tcp 4/6 output nodes */
vlib_frame_t **tx_frames[2];
+ /** per-worker tx frames to ip 4/6 lookup nodes */
+ vlib_frame_t **ip_lookup_tx_frames[2];
/* Per worker-thread timer wheel for connections timers */
tw_timer_wheel_16t_2w_512sl_t *timer_wheels;
* Infra and evt track setup
*/
-#define TCP_DBG(_tc, _evt, _args...) \
-{ \
- u8 *_tmp = 0; \
- _tmp = format(_tmp, "%U", format_tcp_connection_verbose, _tc); \
- clib_warning("%s", _tmp); \
- vec_free(_tmp); \
-}
+#define TCP_DBG(_fmt, _args...) clib_warning (_fmt, ##_args)
#define DECLARE_ETD(_tc, _e, _size) \
struct \
#define TCP_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args)
#else
#define TCP_EVT_DBG(_evt, _args...)
+#define TCP_DBG(_fmt, _args...)
#endif
/*
if (tcp_syn (th0))
{
/* TODO implement RFC 5961 */
- if (tc0->state != TCP_STATE_SYN_RCVD)
- tcp_make_ack (tc0, b0);
+ if (tc0->state == TCP_STATE_SYN_RCVD)
+ {
+ tcp_make_synack (tc0, b0);
+ TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
+ }
else
- tcp_make_synack (tc0, b0);
+ {
+ tcp_make_ack (tc0, b0);
+ TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
+ }
*next0 = tcp_next_output (tc0->c_is_ip4);
- TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
return -1;
}
/* 8: check the FIN bit */
if (PREDICT_FALSE (is_fin))
{
- /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead
- * wait for session to call close. To avoid lingering
+ /* Enter CLOSE-WAIT and notify session. To avoid lingering
* in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
- tc0->state = TCP_STATE_CLOSE_WAIT;
- TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
+ /* Account for the FIN if nothing else was received */
if (vnet_buffer (b0)->tcp.data_len == 0)
- {
- tc0->rcv_nxt += 1;
- next0 = TCP_ESTABLISHED_NEXT_DROP;
- }
+ tc0->rcv_nxt += 1;
+ tcp_make_ack (tc0, b0);
+ next0 = tcp_next_output (tc0->c_is_ip4);
+ tc0->state = TCP_STATE_CLOSE_WAIT;
stream_session_disconnect_notify (&tc0->connection);
tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
+ TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
}
done:
seq0 = vnet_buffer (b0)->tcp.seq_number;
tcp0 = tcp_buffer_hdr (b0);
+ /* Crude check to see if the connection handle does not match
+ * the packet. Probably connection just switched to established */
+ if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
+ || tcp0->src_port != tc0->c_rmt_port))
+ goto drop;
+
if (PREDICT_FALSE
(!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
goto drop;
tcp_header_t *tcp0 = 0;
tcp_connection_t *tc0;
u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
+ u8 is_fin0;
bi0 = from[0];
to_next[0] = bi0;
}
tcp0 = tcp_buffer_hdr (b0);
+ is_fin0 = tcp_is_fin (tcp0);
/* SYNs, FINs and data consume sequence numbers */
vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
- + tcp_is_syn (tcp0) + tcp_is_fin (tcp0)
- + vnet_buffer (b0)->tcp.data_len;
+ + tcp_is_syn (tcp0) + is_fin0 + vnet_buffer (b0)->tcp.data_len;
if (CLIB_DEBUG)
{
/* If FIN is ACKed */
else if (tc0->snd_una == tc0->snd_una_max)
{
- tc0->rcv_nxt += 1;
tc0->state = TCP_STATE_FIN_WAIT_2;
TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
- if (tcp_fin (tcp0))
- {
- /* Stop all timers, 2MSL will be set lower */
- tcp_connection_timers_reset (tc0);
- }
- else
- {
- /* Wait for peer to finish sending its data */
- tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
- TCP_2MSL_TIME);
- }
+ /* Stop all retransmit timers because we have nothing more
+ * to send. Enable waitclose though because we're willing to
+ * wait for peer's FIN but not indefinitely. */
+ tcp_connection_timers_reset (tc0);
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
}
break;
case TCP_STATE_FIN_WAIT_2:
if (!tcp_rcv_ack_is_acceptable (tc0, b0))
goto drop;
+ tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
/* Apparently our FIN was lost */
- if (tcp_fin (tcp0))
+ if (is_fin0)
{
- /* Don't "make" fin since that increments snd_nxt */
tcp_send_fin (tc0);
goto drop;
}
* particular, this makes sure that we won't have dead sessions
* when processing events on the tx path */
tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
-
- /* Stop retransmit */
tcp_retransmit_timer_reset (tc0);
goto drop;
goto drop;
tcp_make_ack (tc0, b0);
- tcp_timer_reset (tc0, TCP_TIMER_WAITCLOSE);
- tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
goto drop;
case TCP_STATE_FIN_WAIT_2:
if (vnet_buffer (b0)->tcp.data_len)
error0 = tcp_segment_rcv (tm, tc0, b0, &next0);
+ else if (is_fin0)
+ tc0->rcv_nxt += 1;
break;
case TCP_STATE_CLOSE_WAIT:
case TCP_STATE_CLOSING:
}
/* 8: check the FIN bit */
- if (!tcp_fin (tcp0))
+ if (!is_fin0)
goto drop;
switch (tc0->state)
tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
break;
case TCP_STATE_FIN_WAIT_2:
- /* Got FIN, send ACK! */
+ /* Got FIN, send ACK! Be more aggressive with resource cleanup */
tc0->state = TCP_STATE_TIME_WAIT;
tcp_connection_timers_reset (tc0);
- tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
tcp_make_ack (tc0, b0);
next0 = tcp_next_output (is_ip4);
TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
break;
case TCP_STATE_TIME_WAIT:
- /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait
+ /* Remain in the TIME-WAIT state. Restart the time-wait
* timeout.
*/
- tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
break;
}
TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
TCP_ERROR_NONE);
_(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
_(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
- _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
+ _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
_(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
- _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
+ _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
TCP_ERROR_CONNECTION_CLOSED);
#undef _
}
}
always_inline void
-tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
- u8 is_ip4)
+tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+ u8 is_ip4, u8 flush)
{
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ u32 thread_index = vlib_get_thread_index ();
u32 *to_next, next_index;
vlib_frame_t *f;
/* Send to IP lookup */
next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
- f = vlib_get_frame_to_node (vm, next_index);
+ if (VLIB_BUFFER_TRACE_TRAJECTORY > 0)
+ {
+ b->pre_data[0] = 2;
+ b->pre_data[1] = next_index;
+ }
+
+ f = tm->ip_lookup_tx_frames[!is_ip4][thread_index];
+ if (!f)
+ {
+ f = vlib_get_frame_to_node (vm, next_index);
+ ASSERT (f);
+ tm->ip_lookup_tx_frames[!is_ip4][thread_index] = f;
+ }
- /* Enqueue the packet */
to_next = vlib_frame_vector_args (f);
- to_next[0] = bi;
- f->n_vectors = 1;
- vlib_put_frame_to_node (vm, next_index, f);
+ to_next[f->n_vectors] = bi;
+ f->n_vectors += 1;
+ if (flush || f->n_vectors == VLIB_FRAME_SIZE)
+ {
+ vlib_put_frame_to_node (vm, next_index, f);
+ tm->ip_lookup_tx_frames[!is_ip4][thread_index] = 0;
+ }
+}
+
+always_inline void
+tcp_enqueue_to_ip_lookup_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+ u8 is_ip4)
+{
+ tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 1);
+}
+
+always_inline void
+tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
+ u8 is_ip4)
+{
+ tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 0);
}
always_inline void
/* Decide where to send the packet */
next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
-
- /* Initialize the trajectory trace, if configured */
if (VLIB_BUFFER_TRACE_TRAJECTORY > 0)
{
b->pre_data[0] = 1;
ASSERT (!bogus);
}
- tcp_enqueue_to_ip_lookup (vm, b, bi, is_ip4);
+ tcp_enqueue_to_ip_lookup_now (vm, b, bi, is_ip4);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
}
/**
- * Flush both v4 and v6 tx frames for thread index
+ * Flush ip lookup tx frames populated by timer pops
+ */
+always_inline void
+tcp_flush_frame_to_ip_lookup (vlib_main_t * vm, u8 thread_index, u8 is_ip4)
+{
+ if (tcp_main.ip_lookup_tx_frames[!is_ip4][thread_index])
+ {
+ u32 next_index;
+ next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
+ vlib_put_frame_to_node (vm, next_index,
+ tcp_main.ip_lookup_tx_frames[!is_ip4]
+ [thread_index]);
+ tcp_main.ip_lookup_tx_frames[!is_ip4][thread_index] = 0;
+ }
+}
+
+/**
+ * Flush v4 and v6 tcp and ip-lookup tx frames for thread index
*/
void
tcp_flush_frames_to_output (u8 thread_index)
vlib_main_t *vm = vlib_get_main ();
tcp_flush_frame_to_output (vm, thread_index, 1);
tcp_flush_frame_to_output (vm, thread_index, 0);
+ tcp_flush_frame_to_ip_lookup (vm, thread_index, 1);
+ tcp_flush_frame_to_ip_lookup (vm, thread_index, 0);
}
/**
void
tcp_send_fin (tcp_connection_t * tc)
{
- vlib_buffer_t *b;
- u32 bi;
tcp_main_t *tm = vnet_get_tcp_main ();
vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_t *b;
+ u32 bi;
+ u8 fin_snt = 0;
+
if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
return;
b = vlib_get_buffer (vm, bi);
- /* buffer will be initialized by in tcp_make_fin */
+ fin_snt = tc->flags & TCP_CONN_FINSNT;
+ if (fin_snt)
+ tc->snd_nxt = tc->snd_una;
tcp_make_fin (tc, b);
tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4);
- if (!(tc->flags & TCP_CONN_FINSNT))
+ if (!fin_snt)
{
tc->flags |= TCP_CONN_FINSNT;
tc->flags &= ~TCP_CONN_FINPNDG;
- tc->snd_nxt += 1;
+ /* Account for the FIN */
+ tc->snd_una_max += 1;
+ tc->snd_nxt = tc->snd_una_max;
}
tcp_retransmit_timer_force_update (tc);
TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
else if (tc->state == TCP_STATE_SYN_RCVD)
{
tc->rto_boff += 1;
- tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
tc->rtt_ts = 0;
if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
else
{
ASSERT (tc->state == TCP_STATE_CLOSED);
- clib_warning ("connection closed ...");
+ TCP_DBG ("connection state: %d", tc->state);
return;
}
}