segment_manager_t *sm;
session_t *ls;
u32 sm_index;
- int i;
+ int i, j;
/*
* Listener cleanup
}));
/* *INDENT-ON* */
+ hash_free (app_wrk->listeners_table);
+
for (i = 0; i < vec_len (handles); i++)
{
a->app_index = app->app_index;
/* seg manager is removed when unbind completes */
(void) vnet_unlisten (a);
}
+ vec_reset_length (handles);
/*
* Connects segment manager cleanup
segment_manager_init_free (sm);
}
+ /*
+ * Half-open cleanup
+ */
+
+ for (i = 0; i < vec_len (app_wrk->half_open_table); i++)
+ {
+ if (!app_wrk->half_open_table[i])
+ continue;
+
+ /* *INDENT-OFF* */
+ hash_foreach (handle, sm_index, app_wrk->half_open_table[i], ({
+ vec_add1 (handles, handle);
+ }));
+ /* *INDENT-ON* */
+
+ for (j = 0; j < vec_len (handles); j++)
+ session_cleanup_half_open (i, handles[j]);
+
+ hash_free (app_wrk->half_open_table[i]);
+ vec_reset_length (handles);
+ }
+
+ vec_free (app_wrk->half_open_table);
+ vec_free (handles);
+
/* If first segment manager is used by a listener */
if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
&& app_wrk->first_segment_manager != app_wrk->connects_seg_manager)
segment_manager_free (sm);
}
- pool_put (app_workers, app_wrk);
if (CLIB_DEBUG)
clib_memset (app_wrk, 0xfe, sizeof (*app_wrk));
+ pool_put (app_workers, app_wrk);
}
application_t *
svm_fifo_t *rx_fifo = 0, *tx_fifo = 0;
int rv;
- if ((rv = segment_manager_alloc_session_fifos (sm, &rx_fifo, &tx_fifo)))
+ if ((rv = segment_manager_alloc_session_fifos (sm, s->thread_index,
+ &rx_fifo, &tx_fifo)))
return rv;
rx_fifo->master_session_index = s->session_index;
/* Allocate segment manager. All sessions derived out of a listen session
* have fifos allocated by the same segment manager. */
if (!(sm = app_worker_alloc_segment_manager (app_wrk)))
- return -1;
+ return SESSION_E_ALLOC;
/* Keep track of the segment manager for the listener or this worker */
hash_set (app_wrk->listeners_table, listen_session_get_handle (ls),
segment_manager_index (sm));
- if (session_transport_service_type (ls) == TRANSPORT_SERVICE_CL)
+ if (transport_connection_is_cless (session_get_transport (ls)))
{
- if (!ls->rx_fifo && app_worker_alloc_session_fifos (sm, ls))
- return -1;
+ if (ls->rx_fifo)
+ return SESSION_E_NOSUPPORT;
+ return app_worker_alloc_session_fifos (sm, ls);
}
return 0;
}
app_listener_t * app_listener)
{
session_t *ls;
+ int rv;
if (clib_bitmap_get (app_listener->workers, app_wrk->wrk_map_index))
- return VNET_API_ERROR_ADDRESS_IN_USE;
+ return SESSION_E_ALREADY_LISTENING;
app_listener->workers = clib_bitmap_set (app_listener->workers,
app_wrk->wrk_map_index, 1);
if (app_listener->session_index != SESSION_INVALID_INDEX)
{
ls = session_get (app_listener->session_index, 0);
- if (app_worker_init_listener (app_wrk, ls))
- return -1;
+ if ((rv = app_worker_init_listener (app_wrk, ls)))
+ return rv;
}
if (app_listener->local_index != SESSION_INVALID_INDEX)
{
ls = session_get (app_listener->local_index, 0);
- if (app_worker_init_listener (app_wrk, ls))
- return -1;
+ if ((rv = app_worker_init_listener (app_wrk, ls)))
+ return rv;
}
return 0;
if (PREDICT_FALSE (!sm_indexp))
return;
- sm = segment_manager_get (*sm_indexp);
- if (app_wrk->first_segment_manager == *sm_indexp)
+ /* Dealloc fifos, if any (dgram listeners) */
+ if (ls->rx_fifo)
{
- /* Delete sessions but don't remove segment manager */
- app_wrk->first_segment_manager_in_use = 0;
- segment_manager_del_sessions (sm);
+ segment_manager_dealloc_fifos (ls->rx_fifo, ls->tx_fifo);
+ ls->tx_fifo = ls->rx_fifo = 0;
}
- else
+
+ /* Try to cleanup segment manager */
+ sm = segment_manager_get (*sm_indexp);
+ if (sm && app_wrk->first_segment_manager != *sm_indexp)
{
- segment_manager_init_free (sm);
+ segment_manager_app_detach (sm);
+ if (!segment_manager_has_fifos (sm))
+ segment_manager_free (sm);
}
+
hash_unset (app_wrk->listeners_table, handle);
}
app_worker_t *app_wrk;
segment_manager_t *sm;
session_t *listener;
+ application_t *app;
listener = listen_session_get_from_handle (s->listener_handle);
app_wrk = application_listener_select_worker (listener);
s->app_wrk_index = app_wrk->wrk_index;
+ app = application_get (app_wrk->app_index);
+ if (app->cb_fns.fifo_tuning_callback)
+ s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;
+
sm = app_worker_get_listen_segment_manager (app_wrk, listener);
if (app_worker_alloc_session_fifos (sm, s))
return -1;
if (!application_is_builtin_proxy (app))
{
sm = app_worker_get_connect_segment_manager (app_wrk);
- if (app_worker_alloc_session_fifos (sm, s))
- return -1;
+ return app_worker_alloc_session_fifos (sm, s);
}
+
+ if (app->cb_fns.fifo_tuning_callback)
+ s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;
+
return 0;
}
int
-app_worker_connect_notify (app_worker_t * app_wrk, session_t * s, u32 opaque)
+app_worker_connect_notify (app_worker_t * app_wrk, session_t * s,
+ session_error_t err, u32 opaque)
{
application_t *app = application_get (app_wrk->app_index);
return app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque,
- s, s == 0 /* is_fail */ );
+ s, err);
+}
+
+int
+app_worker_add_half_open (app_worker_t * app_wrk, transport_proto_t tp,
+ session_handle_t ho_handle,
+ session_handle_t wrk_handle)
+{
+ ASSERT (vlib_get_thread_index () == 0);
+ vec_validate (app_wrk->half_open_table, tp);
+ hash_set (app_wrk->half_open_table[tp], ho_handle, wrk_handle);
+ return 0;
+}
+
+int
+app_worker_del_half_open (app_worker_t * app_wrk, transport_proto_t tp,
+ session_handle_t ho_handle)
+{
+ ASSERT (vlib_get_thread_index () == 0);
+ hash_unset (app_wrk->half_open_table[tp], ho_handle);
+ return 0;
+}
+
+u64
+app_worker_lookup_half_open (app_worker_t * app_wrk, transport_proto_t tp,
+ session_handle_t ho_handle)
+{
+ u64 *ho_wrk_handlep;
+
+ /* No locking because all updates are done from main thread */
+ ho_wrk_handlep = hash_get (app_wrk->half_open_table[tp], ho_handle);
+ if (!ho_wrk_handlep)
+ return SESSION_INVALID_HANDLE;
+
+ return *ho_wrk_handlep;
}
int
return 0;
}
+int
+app_worker_transport_closed_notify (app_worker_t * app_wrk, session_t * s)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ if (app->cb_fns.session_transport_closed_callback)
+ app->cb_fns.session_transport_closed_callback (s);
+ return 0;
+}
+
int
app_worker_reset_notify (app_worker_t * app_wrk, session_t * s)
{
return 0;
}
+int
+app_worker_cleanup_notify (app_worker_t * app_wrk, session_t * s,
+ session_cleanup_ntf_t ntf)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ if (app->cb_fns.session_cleanup_callback)
+ app->cb_fns.session_cleanup_callback (s, ntf);
+ return 0;
+}
+
int
app_worker_builtin_rx (app_worker_t * app_wrk, session_t * s)
{
return 0;
}
+int
+app_worker_migrate_notify (app_worker_t * app_wrk, session_t * s,
+ session_handle_t new_sh)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ app->cb_fns.session_migrate_callback (s, new_sh);
+ return 0;
+}
+
int
app_worker_own_session (app_worker_t * app_wrk, session_t * s)
{
return 0;
}
+int
+app_worker_session_fifo_tuning (app_worker_t * app_wrk, session_t * s,
+ svm_fifo_t * f,
+ session_ft_action_t act, u32 len)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.fifo_tuning_callback (s, f, act, len);
+}
+
int
app_worker_alloc_connects_segment_manager (app_worker_t * app_wrk)
{
{
uword *smp;
smp = hash_get (app->listeners_table, listen_session_get_handle (listener));
- ASSERT (smp != 0);
+ ALWAYS_ASSERT (smp != 0);
return segment_manager_get (*smp);
}
app_worker_add_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
application_t *app = application_get (app_wrk->app_index);
- return app->cb_fns.add_segment_callback (app_wrk->api_client_index,
+
+ return app->cb_fns.add_segment_callback (app_wrk->wrk_index,
segment_handle);
}
app_worker_del_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
application_t *app = application_get (app_wrk->app_index);
- return app->cb_fns.del_segment_callback (app_wrk->api_client_index,
+ return app->cb_fns.del_segment_callback (app_wrk->wrk_index,
segment_handle);
}
}
static inline int
-app_enqueue_evt (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, u8 lock)
-{
- if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
- {
- clib_warning ("evt q full");
- svm_msg_q_free_msg (mq, msg);
- if (lock)
- svm_msg_q_unlock (mq);
- return -1;
- }
-
- if (lock)
- {
- svm_msg_q_add_and_unlock (mq, msg);
- return 0;
- }
-
- /* Even when not locking the ring, we must wait for queue mutex */
- if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
- {
- clib_warning ("msg q add returned");
- return -1;
- }
- return 0;
-}
-
-static inline int
-app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s, u8 lock)
+app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s)
{
session_event_t *evt;
svm_msg_q_msg_t msg;
svm_msg_q_t *mq;
- if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY
- && s->session_state != SESSION_STATE_LISTENING))
- return 0;
-
if (app_worker_application_is_builtin (app_wrk))
return app_worker_builtin_rx (app_wrk, s);
return 0;
mq = app_wrk->event_queue;
- if (lock)
- svm_msg_q_lock (mq);
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
+ {
+ clib_warning ("evt q full");
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
{
clib_warning ("evt q rings full");
- if (lock)
- svm_msg_q_unlock (mq);
+ svm_msg_q_unlock (mq);
return -1;
}
msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
- ASSERT (!svm_msg_q_msg_is_invalid (&msg));
-
evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
evt->session_index = s->rx_fifo->client_session_index;
evt->event_type = SESSION_IO_EVT_RX;
(void) svm_fifo_set_event (s->rx_fifo);
+ svm_msg_q_add_and_unlock (mq, &msg);
- if (app_enqueue_evt (mq, &msg, lock))
- return -1;
return 0;
}
static inline int
-app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s, u8 lock)
+app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s)
{
svm_msg_q_t *mq;
session_event_t *evt;
return app_worker_builtin_tx (app_wrk, s);
mq = app_wrk->event_queue;
- if (lock)
- svm_msg_q_lock (mq);
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
+ {
+ clib_warning ("evt q full");
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
{
clib_warning ("evt q rings full");
- if (lock)
- svm_msg_q_unlock (mq);
+ svm_msg_q_unlock (mq);
return -1;
}
msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
- ASSERT (!svm_msg_q_msg_is_invalid (&msg));
-
evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
evt->event_type = SESSION_IO_EVT_TX;
evt->session_index = s->tx_fifo->client_session_index;
- return app_enqueue_evt (mq, &msg, lock);
+ svm_msg_q_add_and_unlock (mq, &msg);
+ return 0;
}
/* *INDENT-OFF* */
typedef int (app_send_evt_handler_fn) (app_worker_t *app,
- session_t *s,
- u8 lock);
+ session_t *s);
static app_send_evt_handler_fn * const app_send_evt_handler_fns[2] = {
app_send_io_evt_rx,
app_send_io_evt_tx,
};
/* *INDENT-ON* */
-/**
- * Send event to application
- *
- * Logic from queue perspective is non-blocking. If there's
- * not enough space to enqueue a message, we return.
- */
-int
-app_worker_send_event (app_worker_t * app, session_t * s, u8 evt_type)
-{
- ASSERT (app && evt_type <= SESSION_IO_EVT_TX);
- return app_send_evt_handler_fns[evt_type] (app, s, 0 /* lock */ );
-}
-
/**
* Send event to application
*
app_worker_lock_and_send_event (app_worker_t * app, session_t * s,
u8 evt_type)
{
- return app_send_evt_handler_fns[evt_type] (app, s, 1 /* lock */ );
+ return app_send_evt_handler_fns[evt_type] (app, s);
}
u8 *
if (verbose)
{
- char buf[32];
- sprintf (buf, "%u(%u)", app_wrk->wrk_map_index, app_wrk->wrk_index);
- s = format (s, "%-40s%-25s%=10s%-15u%-15u%-10u", str, app_name,
+ u8 *buf;
+ buf = format (0, "%u(%u)", app_wrk->wrk_map_index, app_wrk->wrk_index);
+ s = format (s, "%-40s%-25s%=10v%-15u%-15u%-10u", str, app_name,
buf, app_wrk->api_client_index, handle, sm_index);
+ vec_free (buf);
}
else
s = format (s, "%-40s%-25s%=10u", str, app_name, app_wrk->wrk_map_index);