pool_get_aligned_will_expand (smm->sessions[thread_index], will_expand,
CLIB_CACHE_LINE_BYTES);
/* If we have peekers, let them finish */
- if (PREDICT_FALSE (will_expand))
+ if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
{
- clib_spinlock_lock_if_init (&smm->peekers_write_locks[thread_index]);
+ clib_rwlock_writer_lock (&smm->peekers_rw_locks[thread_index]);
pool_get_aligned (session_manager_main.sessions[thread_index], s,
CLIB_CACHE_LINE_BYTES);
- clib_spinlock_unlock_if_init (&smm->peekers_write_locks[thread_index]);
+ clib_rwlock_writer_unlock (&smm->peekers_rw_locks[thread_index]);
}
else
{
{
SESSION_DBG ("failed to notify app");
if (!is_fail)
- stream_session_disconnect (new_s);
+ stream_session_disconnect_transport (new_s);
}
else
{
}
/**
- * Disconnect session and propagate to transport. This should eventually
+ * Initialize session disconnect.
+ *
+ * Request is always sent to session node to ensure that all outstanding
+ * requests are served before transport is notified.
+ */
+void
+stream_session_disconnect (stream_session_t * s)
+{
+ if (!s || s->session_state == SESSION_STATE_CLOSED)
+ return;
+ s->session_state = SESSION_STATE_CLOSED;
+ session_send_session_evt_to_thread (session_handle (s),
+ FIFO_EVENT_DISCONNECT, s->thread_index);
+}
+
+/**
+ * Notify transport the session can be disconnected. This should eventually
* result in a delete notification that allows us to cleanup session state.
* Called for both active/passive disconnects.
*
- * Should be called from the session's thread.
+ * Must be called from the session's thread.
*/
void
-stream_session_disconnect (stream_session_t * s)
+stream_session_disconnect_transport (stream_session_t * s)
{
s->session_state = SESSION_STATE_CLOSED;
tp_vfts[session_get_transport_proto (s)].close (s->connection_index,
static clib_error_t *
session_manager_main_enable (vlib_main_t * vm)
{
+ segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args;
session_manager_main_t *smm = &session_manager_main;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
u32 num_threads;
vec_validate (smm->pending_disconnects, num_threads - 1);
vec_validate (smm->free_event_vector, num_threads - 1);
vec_validate (smm->vpp_event_queues, num_threads - 1);
- vec_validate (smm->session_peekers, num_threads - 1);
- vec_validate (smm->peekers_readers_locks, num_threads - 1);
- vec_validate (smm->peekers_write_locks, num_threads - 1);
+ vec_validate (smm->peekers_rw_locks, num_threads - 1);
for (i = 0; i < TRANSPORT_N_PROTO; i++)
for (j = 0; j < num_threads; j++)
vec_validate (smm->pending_disconnects[i], 0);
_vec_len (smm->pending_disconnects[i]) = 0;
if (num_threads > 1)
- {
- clib_spinlock_init (&smm->peekers_readers_locks[i]);
- clib_spinlock_init (&smm->peekers_write_locks[i]);
- }
+ clib_rwlock_init (&smm->peekers_rw_locks[i]);
}
#if SESSION_DBG
session_vpp_event_queues_allocate (smm);
/* Initialize fifo segment main baseva and timeout */
- svm_fifo_segment_init (smm->session_baseva + smm->evt_qs_segment_size,
- smm->segment_timeout);
+ sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size;
+ sm_args->size = smm->session_va_space_size;
+ segment_manager_main_init (sm_args);
/* Preallocate sessions */
if (smm->preallocated_sessions)
{
session_manager_main_t *smm = &session_manager_main;
smm->session_baseva = 0x200000000ULL;
- smm->segment_timeout = 20;
+ smm->session_va_space_size = (u64) 128 << 30;
smm->evt_qs_segment_size = 64 << 20;
smm->is_enabled = 0;
return 0;