u8 n_bufs_per_seg;
CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
session_dgram_hdr_t hdr;
+
+ /** Vector of tx buffer free lists */
+ u32 *tx_buffers;
+ vlib_buffer_t **transport_pending_bufs;
} session_tx_context_t;
typedef struct session_evt_elt
/** Context for session tx */
session_tx_context_t ctx;
- /** Vector of tx buffer free lists */
- u32 *tx_buffers;
-
/** Pool of session event list elements */
session_evt_elt_t *event_elts;
/** Head of list of pending events */
clib_llist_index_t old_head;
- /** Peekers rw lock */
- clib_rwlock_t peekers_rw_locks;
-
/** Vector of buffers to be sent */
u32 *pending_tx_buffers;
/** Clib file for timerfd. Used only if adaptive mode is on */
uword timerfd_file;
+ /** List of pending connects for first worker */
+ clib_llist_index_t pending_connects;
+
+ /** Flag that is set if main thread signaled to handle connects */
+ u32 n_pending_connects;
+
+ /** Main thread loops in poll mode without a connect */
+ u32 no_connect_loops;
+
+ /** List head for first worker evts pending handling on main */
+ clib_llist_index_t evts_pending_main;
+
#if SESSION_DEBUG
/** last event poll time by thread */
clib_time_type_t last_event_poll;
u8 session_node_lookup_fifo_event (svm_fifo_t * f, session_event_t * e);
+typedef void (*session_update_time_fn) (f64 time_now, u8 thread_index);
+
typedef struct session_main_
{
/** Worker contexts */
session_worker_t *wrk;
+ /** Vector of transport update time functions */
+ session_update_time_fn *update_time_fns;
+
/** Event queues memfd segment */
- fifo_segment_t evt_qs_segment;
+ fifo_segment_t wrk_mqs_segment;
/** Unique segment name counter */
u32 unique_segment_name_counter;
* Trade memory for speed, for now */
u32 *session_type_to_next;
+ /** Thread for cl and ho that rely on cl allocs */
+ u32 transport_cl_thread;
+
transport_proto_t last_transport_proto_type;
+ /** Number of workers at pool realloc barrier */
+ u32 pool_realloc_at_barrier;
+
+ /** Lock to synchronize parallel forced reallocs */
+ clib_spinlock_t pool_realloc_lock;
+
/*
* Config parameters
*/
u8 no_adaptive;
/** vpp fifo event queue configured length */
- u32 configured_event_queue_length;
+ u32 configured_wrk_mq_length;
/** Session ssvm segment configs*/
- uword session_baseva;
- uword session_va_space_size;
- uword evt_qs_segment_size;
+ uword wrk_mqs_segment_size;
/** Session table size parameters */
u32 configured_v4_session_table_buckets;
/** Preallocate session config parameter */
u32 preallocated_sessions;
+ u16 msg_id_base;
} session_main_t;
extern session_main_t session_main;
#define TRANSPORT_PROTO_INVALID (session_main.last_transport_proto_type + 1)
#define TRANSPORT_N_PROTOS (session_main.last_transport_proto_type + 1)
-static inline session_evt_elt_t *
-session_evt_elt_alloc (session_worker_t * wrk)
-{
- session_evt_elt_t *elt;
- pool_get (wrk->event_elts, elt);
- return elt;
-}
-
-static inline void
-session_evt_elt_free (session_worker_t * wrk, session_evt_elt_t * elt)
-{
- pool_put (wrk->event_elts, elt);
-}
-
static inline void
session_evt_add_old (session_worker_t * wrk, session_evt_elt_t * elt)
{
clib_llist_add_tail (wrk->event_elts, evt_list, elt,
- pool_elt_at_index (wrk->event_elts, wrk->old_head));
+ clib_llist_elt (wrk->event_elts, wrk->old_head));
}
static inline void
session_evt_add_head_old (session_worker_t * wrk, session_evt_elt_t * elt)
{
clib_llist_add (wrk->event_elts, evt_list, elt,
- pool_elt_at_index (wrk->event_elts, wrk->old_head));
+ clib_llist_elt (wrk->event_elts, wrk->old_head));
}
session_evt_alloc_ctrl (session_worker_t * wrk)
{
session_evt_elt_t *elt;
- elt = session_evt_elt_alloc (wrk);
+ clib_llist_get (wrk->event_elts, elt);
clib_llist_add_tail (wrk->event_elts, evt_list, elt,
- pool_elt_at_index (wrk->event_elts, wrk->ctrl_head));
+ clib_llist_elt (wrk->event_elts, wrk->ctrl_head));
return elt;
}
session_evt_alloc_new (session_worker_t * wrk)
{
session_evt_elt_t *elt;
- elt = session_evt_elt_alloc (wrk);
+ clib_llist_get (wrk->event_elts, elt);
clib_llist_add_tail (wrk->event_elts, evt_list, elt,
- pool_elt_at_index (wrk->event_elts, wrk->new_head));
+ clib_llist_elt (wrk->event_elts, wrk->new_head));
return elt;
}
session_evt_alloc_old (session_worker_t * wrk)
{
session_evt_elt_t *elt;
- elt = session_evt_elt_alloc (wrk);
+ clib_llist_get (wrk->event_elts, elt);
clib_llist_add_tail (wrk->event_elts, evt_list, elt,
- pool_elt_at_index (wrk->event_elts, wrk->old_head));
+ clib_llist_elt (wrk->event_elts, wrk->old_head));
return elt;
}
u64 session_segment_handle (session_t * s);
/**
- * Acquires a lock that blocks a session pool from expanding.
- *
- * This is typically used for safely peeking into other threads'
- * pools in order to clone elements. Lock should be dropped as soon
- * as possible by calling @ref session_pool_remove_peeker.
+ * Get session from handle and avoid pool validation if no same thread
*
- * NOTE: Avoid using pool_elt_at_index while the lock is held because
- * it may lead to free elt bitmap expansion/contraction!
- */
-always_inline void
-session_pool_add_peeker (u32 thread_index)
-{
- session_worker_t *wrk = &session_main.wrk[thread_index];
- if (thread_index == vlib_get_thread_index ())
- return;
- clib_rwlock_reader_lock (&wrk->peekers_rw_locks);
-}
-
-always_inline void
-session_pool_remove_peeker (u32 thread_index)
-{
- session_worker_t *wrk = &session_main.wrk[thread_index];
- if (thread_index == vlib_get_thread_index ())
- return;
- clib_rwlock_reader_unlock (&wrk->peekers_rw_locks);
-}
-
-/**
- * Get session from handle and 'lock' pool resize if not in same thread
- *
- * Caller should drop the peek 'lock' as soon as possible.
+ * Peekers are fine because pool grows with barrier (see @ref session_alloc)
*/
always_inline session_t *
session_get_from_handle_safe (u64 handle)
}
else
{
- session_pool_add_peeker (thread_index);
- /* Don't use pool_elt_at index. See @ref session_pool_add_peeker */
+ /* Don't use pool_elt_at index to avoid pool bitmap reallocs */
return wrk->sessions + session_index_from_handle (handle);
}
}
-always_inline u32
-session_get_index (session_t * s)
-{
- return (s - session_main.wrk[s->thread_index].sessions);
-}
-
always_inline session_t *
session_clone_safe (u32 session_index, u32 thread_index)
{
+ u32 current_thread_index = vlib_get_thread_index (), new_index;
session_t *old_s, *new_s;
- u32 current_thread_index = vlib_get_thread_index ();
- /* If during the memcpy pool is reallocated AND the memory allocator
- * decides to give the old chunk of memory to somebody in a hurry to
- * scribble something on it, we have a problem. So add this thread as
- * a session pool peeker.
- */
- session_pool_add_peeker (thread_index);
new_s = session_alloc (current_thread_index);
+ new_index = new_s->session_index;
+ /* Session pools are reallocated with barrier (see @ref session_alloc) */
old_s = session_main.wrk[thread_index].sessions + session_index;
clib_memcpy_fast (new_s, old_s, sizeof (*new_s));
- session_pool_remove_peeker (thread_index);
new_s->thread_index = current_thread_index;
- new_s->session_index = session_get_index (new_s);
+ new_s->session_index = new_index;
return new_s;
}
void session_transport_closing_notify (transport_connection_t * tc);
void session_transport_delete_notify (transport_connection_t * tc);
void session_half_open_delete_notify (transport_connection_t *tc);
+void session_half_open_migrate_notify (transport_connection_t *tc);
+int session_half_open_migrated_notify (transport_connection_t *tc);
void session_transport_closed_notify (transport_connection_t * tc);
void session_transport_reset_notify (transport_connection_t * tc);
int session_stream_accept (transport_connection_t * tc, u32 listener_index,
const transport_proto_vft_t * vft, u8 is_ip4,
u32 output_node);
transport_proto_t session_add_transport_proto (void);
+void session_register_update_time_fn (session_update_time_fn fn, u8 is_add);
int session_tx_fifo_peek_bytes (transport_connection_t * tc, u8 * buffer,
u32 offset, u32 max_bytes);
u32 session_tx_fifo_dequeue_drop (transport_connection_t * tc, u32 max_bytes);
session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
}
+always_inline u32
+transport_cl_thread (void)
+{
+ return session_main.transport_cl_thread;
+}
+
/*
* Listen sessions
*/
s = session_alloc (0);
s->session_state = SESSION_STATE_CONNECTING;
s->flags |= SESSION_F_HALF_OPEN;
+ /* Not ideal. Half-opens are only allocated from main with worker barrier
+ * but can be cleaned up, i.e., session_half_open_free, from main without
+ * a barrier. In debug images, the free_bitmap can grow while workers peek
+ * the sessions pool, e.g., session_half_open_migrate_notify, and as a
+ * result crash while validating the session. To avoid this, grow the bitmap
+ * now. */
+ if (CLIB_DEBUG)
+ {
+ session_t *sp = session_main.wrk[0].sessions;
+ clib_bitmap_validate (pool_header (sp)->free_bitmap, s->session_index);
+ }
return s;
}
}
void session_wrk_enable_adaptive_mode (session_worker_t *wrk);
-fifo_segment_t *session_main_get_evt_q_segment (void);
+fifo_segment_t *session_main_get_wrk_mqs_segment (void);
void session_node_enable_disable (u8 is_en);
clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en);
+void session_wrk_handle_evts_main_rpc ();
session_t *session_alloc_for_connection (transport_connection_t * tc);
session_t *session_alloc_for_half_open (transport_connection_t *tc);
+typedef void (pool_safe_realloc_rpc_fn) (void *rpc_args);
+
+typedef struct
+{
+ u8 ph[STRUCT_OFFSET_OF (pool_header_t, max_elts) + 4];
+ u32 flag;
+} pool_safe_realloc_header_t;
+
+STATIC_ASSERT_SIZEOF (pool_safe_realloc_header_t, sizeof (pool_header_t));
+
+#define POOL_REALLOC_SAFE_ELT_THRESH 32
+
+#define pool_realloc_flag(PH) \
+ ((pool_safe_realloc_header_t *) pool_header (PH))->flag
+
+#define pool_realloc_safe_aligned(P, align) \
+ do \
+ { \
+ vlib_main_t *vm = vlib_get_main (); \
+ u32 free_elts, max_elts, n_alloc; \
+ ASSERT (vlib_get_thread_index () == 0); \
+ vlib_worker_thread_barrier_sync (vm); \
+ free_elts = pool_free_elts (P); \
+ max_elts = pool_max_len (P); \
+ n_alloc = clib_max (2 * max_elts, POOL_REALLOC_SAFE_ELT_THRESH); \
+ pool_alloc_aligned (P, free_elts + n_alloc, align); \
+ clib_bitmap_validate (pool_header (P)->free_bitmap, \
+ max_elts + n_alloc); \
+ pool_realloc_flag (P) = 0; \
+ vlib_worker_thread_barrier_release (vm); \
+ } \
+ while (0)
+
+always_inline void
+pool_program_safe_realloc (void *p, u32 thread_index,
+ pool_safe_realloc_rpc_fn *rpc_fn)
+{
+ /* Reuse pad as a realloc flag */
+ if (pool_realloc_flag (p))
+ return;
+
+ pool_realloc_flag (p) = 1;
+ session_send_rpc_evt_to_thread (0 /* thread index */, rpc_fn,
+ uword_to_pointer (thread_index, void *));
+}
+
+always_inline void
+pool_realloc_maybe_wait_at_barrier (void)
+{
+ if (!(*vlib_worker_threads->wait_at_barrier))
+ return;
+
+ /* Node refork required. Don't stop at the barrier from within a node */
+ if (*vlib_worker_threads->node_reforks_required)
+ return;
+
+ clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
+
+ while (*vlib_worker_threads->wait_at_barrier)
+ ;
+
+ clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
+}
+
+#define pool_realloc_all_at_barrier(_not) \
+ (*vlib_worker_threads->workers_at_barrier >= (vlib_num_workers () - _not))
+
+#define pool_realloc_safe_force(P) \
+ do \
+ { \
+ ALWAYS_ASSERT (*vlib_worker_threads->node_reforks_required); \
+ if (pool_realloc_all_at_barrier (1)) \
+ { \
+ pool_alloc (P, pool_max_len (P)); \
+ } \
+ else \
+ { \
+ session_main_t *sm = &session_main; \
+ clib_warning ("forced pool realloc"); \
+ clib_atomic_fetch_add (&sm->pool_realloc_at_barrier, 1); \
+ while (!pool_realloc_all_at_barrier (sm->pool_realloc_at_barrier)) \
+ ; \
+ clib_spinlock_lock (&sm->pool_realloc_lock); \
+ pool_alloc (P, pool_max_len (P)); \
+ clib_spinlock_unlock (&sm->pool_realloc_lock); \
+ clib_atomic_fetch_add (&sm->pool_realloc_at_barrier, -1); \
+ } \
+ } \
+ while (0)
+
+#define pool_needs_realloc(P) \
+ ((!P) || \
+ (vec_len (pool_header (P)->free_indices) < POOL_REALLOC_SAFE_ELT_THRESH && \
+ pool_free_elts (P) < POOL_REALLOC_SAFE_ELT_THRESH))
+
+#define pool_get_aligned_safe(P, E, thread_index, rpc_fn, align) \
+ do \
+ { \
+ ASSERT (vlib_get_thread_index () == thread_index || \
+ vlib_thread_is_main_w_barrier ()); \
+ if (PREDICT_FALSE (pool_needs_realloc (P))) \
+ { \
+ if (PREDICT_FALSE (!(P))) \
+ { \
+ pool_alloc_aligned (P, 2 * POOL_REALLOC_SAFE_ELT_THRESH, \
+ align); \
+ } \
+ else if (PREDICT_FALSE (pool_free_elts (P) < \
+ POOL_REALLOC_SAFE_ELT_THRESH / 2)) \
+ { \
+ volatile typeof (P) *PP = &(P); \
+ pool_program_safe_realloc (P, thread_index, rpc_fn); \
+ if (thread_index) \
+ { \
+ while (pool_realloc_flag (P)) \
+ { \
+ /* If refork required abort and consume existing elt */ \
+ if (*vlib_worker_threads->node_reforks_required) \
+ { \
+ /* All workers at barrier realloc now */ \
+ if (pool_realloc_all_at_barrier (1)) \
+ pool_alloc_aligned (P, pool_max_len (P), align); \
+ break; \
+ } \
+ pool_realloc_maybe_wait_at_barrier (); \
+ } \
+ if (pool_free_elts (P) == 0) \
+ pool_realloc_safe_force (P); \
+ ALWAYS_ASSERT (pool_free_elts (P) > 0); \
+ } \
+ (P) = *PP; \
+ } \
+ else \
+ { \
+ pool_program_safe_realloc (P, thread_index, rpc_fn); \
+ } \
+ } \
+ pool_get_aligned (P, E, align); \
+ } \
+ while (0)
+
#endif /* __included_session_h__ */
/*