app_wrk->app_index = app->app_index;
app_wrk->wrk_map_index = ~0;
app_wrk->connects_seg_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
- app_wrk->first_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
- APP_DBG ("New app %v worker %u", app_get_name (app), app_wrk->wrk_index);
+ clib_spinlock_init (&app_wrk->detached_seg_managers_lock);
+ clib_spinlock_init (&app_wrk->postponed_mq_msgs_lock);
+ APP_DBG ("New app %v worker %u", app->name, app_wrk->wrk_index);
return app_wrk;
}
{
application_t *app = application_get (app_wrk->app_index);
vnet_unlisten_args_t _a, *a = &_a;
- u64 handle, *handles = 0;
+ u64 handle, *handles = 0, *sm_indices = 0;
segment_manager_t *sm;
+ session_handle_t *sh;
+ session_t *ls;
u32 sm_index;
int i;
- app_listener_t *al;
- session_t *ls;
/*
* Listener cleanup
/* *INDENT-OFF* */
hash_foreach (handle, sm_index, app_wrk->listeners_table, ({
ls = listen_session_get_from_handle (handle);
- al = app_listener_get (app, ls->al_index);
- vec_add1 (handles, app_listener_handle (al));
+ vec_add1 (handles, app_listen_session_handle (ls));
+ vec_add1 (sm_indices, sm_index);
sm = segment_manager_get (sm_index);
- sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
}));
/* *INDENT-ON* */
for (i = 0; i < vec_len (handles); i++)
{
+ /* Cleanup listener */
a->app_index = app->app_index;
a->wrk_map_index = app_wrk->wrk_map_index;
a->handle = handles[i];
- /* seg manager is removed when unbind completes */
(void) vnet_unlisten (a);
+
+ sm = segment_manager_get_if_valid (sm_indices[i]);
+ if (sm && !segment_manager_app_detached (sm))
+ {
+ sm->first_is_protected = 0;
+ segment_manager_init_free (sm);
+ }
}
+ vec_reset_length (handles);
+ vec_free (sm_indices);
+ hash_free (app_wrk->listeners_table);
/*
* Connects segment manager cleanup
sm = segment_manager_get (app_wrk->connects_seg_manager);
sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
sm->first_is_protected = 0;
- segment_manager_init_del (sm);
+ segment_manager_init_free (sm);
}
- /* If first segment manager is used by a listener */
- if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
- && app_wrk->first_segment_manager != app_wrk->connects_seg_manager)
+ /*
+ * Half-open cleanup
+ */
+
+ pool_foreach (sh, app_wrk->half_open_table)
+ session_cleanup_half_open (*sh);
+
+ pool_free (app_wrk->half_open_table);
+
+ /*
+ * Detached listener segment managers cleanup
+ */
+ for (i = 0; i < vec_len (app_wrk->detached_seg_managers); i++)
{
- sm = segment_manager_get (app_wrk->first_segment_manager);
- sm->first_is_protected = 0;
- sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
- /* .. and has no fifos, e.g. it might be used for redirected sessions,
- * remove it */
- if (!segment_manager_has_fifos (sm))
- segment_manager_del (sm);
+ sm = segment_manager_get (app_wrk->detached_seg_managers[i]);
+ segment_manager_init_free (sm);
}
+ vec_free (app_wrk->detached_seg_managers);
+ clib_spinlock_free (&app_wrk->detached_seg_managers_lock);
+ clib_spinlock_free (&app_wrk->postponed_mq_msgs_lock);
- pool_put (app_workers, app_wrk);
if (CLIB_DEBUG)
clib_memset (app_wrk, 0xfe, sizeof (*app_wrk));
+ pool_put (app_workers, app_wrk);
}
application_t *
static segment_manager_t *
app_worker_alloc_segment_manager (app_worker_t * app_wrk)
{
- segment_manager_t *sm = 0;
-
- /* If the first segment manager is not in use, don't allocate a new one */
- if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
- && app_wrk->first_segment_manager_in_use == 0)
- {
- sm = segment_manager_get (app_wrk->first_segment_manager);
- app_wrk->first_segment_manager_in_use = 1;
- return sm;
- }
+ segment_manager_t *sm;
- sm = segment_manager_new ();
+ sm = segment_manager_alloc ();
sm->app_wrk_index = app_wrk->wrk_index;
-
+ segment_manager_init (sm);
return sm;
}
app_worker_alloc_session_fifos (segment_manager_t * sm, session_t * s)
{
svm_fifo_t *rx_fifo = 0, *tx_fifo = 0;
- u32 fifo_segment_index;
int rv;
- if ((rv = segment_manager_alloc_session_fifos (sm, &rx_fifo, &tx_fifo,
- &fifo_segment_index)))
+ if ((rv = segment_manager_alloc_session_fifos (sm, s->thread_index,
+ &rx_fifo, &tx_fifo)))
return rv;
- rx_fifo->master_session_index = s->session_index;
+ rx_fifo->shr->master_session_index = s->session_index;
rx_fifo->master_thread_index = s->thread_index;
- tx_fifo->master_session_index = s->session_index;
+ tx_fifo->shr->master_session_index = s->session_index;
tx_fifo->master_thread_index = s->thread_index;
s->rx_fifo = rx_fifo;
/* Allocate segment manager. All sessions derived out of a listen session
* have fifos allocated by the same segment manager. */
if (!(sm = app_worker_alloc_segment_manager (app_wrk)))
- return -1;
+ return SESSION_E_ALLOC;
+
+ /* Once the first segment is mapped, don't remove it until unlisten */
+ sm->first_is_protected = 1;
/* Keep track of the segment manager for the listener or this worker */
hash_set (app_wrk->listeners_table, listen_session_get_handle (ls),
segment_manager_index (sm));
- if (session_transport_service_type (ls) == TRANSPORT_SERVICE_CL)
+ if (transport_connection_is_cless (session_get_transport (ls)))
{
- if (!ls->rx_fifo && app_worker_alloc_session_fifos (sm, ls))
- return -1;
+ if (ls->rx_fifo)
+ return SESSION_E_NOSUPPORT;
+ return app_worker_alloc_session_fifos (sm, ls);
}
return 0;
}
app_listener_t * app_listener)
{
session_t *ls;
+ int rv;
if (clib_bitmap_get (app_listener->workers, app_wrk->wrk_map_index))
- return VNET_API_ERROR_ADDRESS_IN_USE;
+ return SESSION_E_ALREADY_LISTENING;
app_listener->workers = clib_bitmap_set (app_listener->workers,
app_wrk->wrk_map_index, 1);
if (app_listener->session_index != SESSION_INVALID_INDEX)
{
ls = session_get (app_listener->session_index, 0);
- if (app_worker_init_listener (app_wrk, ls))
- return -1;
+ if ((rv = app_worker_init_listener (app_wrk, ls)))
+ return rv;
}
if (app_listener->local_index != SESSION_INVALID_INDEX)
{
ls = session_get (app_listener->local_index, 0);
- if (app_worker_init_listener (app_wrk, ls))
- return -1;
+ if ((rv = app_worker_init_listener (app_wrk, ls)))
+ return rv;
}
return 0;
}
+static void
+app_worker_add_detached_sm (app_worker_t * app_wrk, u32 sm_index)
+{
+ vec_add1 (app_wrk->detached_seg_managers, sm_index);
+}
+
+void
+app_worker_del_detached_sm (app_worker_t * app_wrk, u32 sm_index)
+{
+ u32 i;
+
+ clib_spinlock_lock (&app_wrk->detached_seg_managers_lock);
+ for (i = 0; i < vec_len (app_wrk->detached_seg_managers); i++)
+ {
+ if (app_wrk->detached_seg_managers[i] == sm_index)
+ {
+ vec_del1 (app_wrk->detached_seg_managers, i);
+ break;
+ }
+ }
+ clib_spinlock_unlock (&app_wrk->detached_seg_managers_lock);
+}
+
static void
app_worker_stop_listen_session (app_worker_t * app_wrk, session_t * ls)
{
session_handle_t handle;
segment_manager_t *sm;
uword *sm_indexp;
+ session_state_t *states = 0;
handle = listen_session_get_handle (ls);
sm_indexp = hash_get (app_wrk->listeners_table, handle);
if (PREDICT_FALSE (!sm_indexp))
return;
- sm = segment_manager_get (*sm_indexp);
- if (app_wrk->first_segment_manager == *sm_indexp)
+ /* Dealloc fifos, if any (dgram listeners) */
+ if (ls->rx_fifo)
{
- /* Delete sessions but don't remove segment manager */
- app_wrk->first_segment_manager_in_use = 0;
- segment_manager_del_sessions (sm);
+ segment_manager_dealloc_fifos (ls->rx_fifo, ls->tx_fifo);
+ ls->tx_fifo = ls->rx_fifo = 0;
}
- else
+
+ /* Try to cleanup segment manager */
+ sm = segment_manager_get (*sm_indexp);
+ if (sm)
{
- segment_manager_init_del (sm);
+ sm->first_is_protected = 0;
+ segment_manager_app_detach (sm);
+ if (!segment_manager_has_fifos (sm))
+ {
+ /* Empty segment manager, cleanup it up */
+ segment_manager_free (sm);
+ }
+ else
+ {
+ /* Delete sessions in CREATED state */
+ vec_add1 (states, SESSION_STATE_CREATED);
+ segment_manager_del_sessions_filter (sm, states);
+ vec_free (states);
+
+ /* Track segment manager in case app detaches and all the
+ * outstanding sessions need to be closed */
+ app_worker_add_detached_sm (app_wrk, *sm_indexp);
+ sm->flags |= SEG_MANAGER_F_DETACHED_LISTENER;
+ }
}
+
hash_unset (app_wrk->listeners_table, handle);
}
app_worker_t *app_wrk;
segment_manager_t *sm;
session_t *listener;
+ application_t *app;
- listener = listen_session_get (s->listener_index);
+ listener = listen_session_get_from_handle (s->listener_handle);
app_wrk = application_listener_select_worker (listener);
+ if (PREDICT_FALSE (app_wrk->mq_congested))
+ return -1;
+
s->app_wrk_index = app_wrk->wrk_index;
+ app = application_get (app_wrk->app_index);
+ if (app->cb_fns.fifo_tuning_callback)
+ s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;
sm = app_worker_get_listen_segment_manager (app_wrk, listener);
if (app_worker_alloc_session_fifos (sm, s))
application_t *app = application_get (app_wrk->app_index);
segment_manager_t *sm;
+ if (app->cb_fns.fifo_tuning_callback)
+ s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;
+
/* Allocate fifos for session, unless the app is a builtin proxy */
- if (!application_is_builtin_proxy (app))
- {
- sm = app_worker_get_connect_segment_manager (app_wrk);
- if (app_worker_alloc_session_fifos (sm, s))
- return -1;
- }
- return 0;
+ if (application_is_builtin_proxy (app))
+ return 0;
+
+ sm = app_worker_get_connect_segment_manager (app_wrk);
+ return app_worker_alloc_session_fifos (sm, s);
}
int
-app_worker_connect_notify (app_worker_t * app_wrk, session_t * s, u32 opaque)
+app_worker_connect_notify (app_worker_t * app_wrk, session_t * s,
+ session_error_t err, u32 opaque)
{
application_t *app = application_get (app_wrk->app_index);
return app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque,
- s, s == 0 /* is_fail */ );
+ s, err);
+}
+
+int
+app_worker_add_half_open (app_worker_t *app_wrk, session_handle_t sh)
+{
+ session_handle_t *shp;
+
+ ASSERT (session_vlib_thread_is_cl_thread ());
+ pool_get (app_wrk->half_open_table, shp);
+ *shp = sh;
+
+ return (shp - app_wrk->half_open_table);
+}
+
+int
+app_worker_del_half_open (app_worker_t *app_wrk, session_t *s)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ ASSERT (session_vlib_thread_is_cl_thread ());
+ pool_put_index (app_wrk->half_open_table, s->ho_index);
+ if (app->cb_fns.half_open_cleanup_callback)
+ app->cb_fns.half_open_cleanup_callback (s);
+ return 0;
}
int
return 0;
}
+int
+app_worker_transport_closed_notify (app_worker_t * app_wrk, session_t * s)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ if (app->cb_fns.session_transport_closed_callback)
+ app->cb_fns.session_transport_closed_callback (s);
+ return 0;
+}
+
int
app_worker_reset_notify (app_worker_t * app_wrk, session_t * s)
{
return 0;
}
+int
+app_worker_cleanup_notify (app_worker_t * app_wrk, session_t * s,
+ session_cleanup_ntf_t ntf)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ if (app->cb_fns.session_cleanup_callback)
+ app->cb_fns.session_cleanup_callback (s, ntf);
+ return 0;
+}
+
int
app_worker_builtin_rx (app_worker_t * app_wrk, session_t * s)
{
return 0;
}
+int
+app_worker_builtin_tx (app_worker_t * app_wrk, session_t * s)
+{
+ application_t *app = application_get (app_wrk->app_index);
+
+ if (!app->cb_fns.builtin_app_tx_callback)
+ return 0;
+
+ app->cb_fns.builtin_app_tx_callback (s);
+ return 0;
+}
+
+int
+app_worker_migrate_notify (app_worker_t * app_wrk, session_t * s,
+ session_handle_t new_sh)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ app->cb_fns.session_migrate_callback (s, new_sh);
+ return 0;
+}
+
int
app_worker_own_session (app_worker_t * app_wrk, session_t * s)
{
segment_manager_t *sm;
svm_fifo_t *rxf, *txf;
+ int rv;
if (s->session_state == SESSION_STATE_LISTENING)
return application_change_listener_owner (s, app_wrk);
s->rx_fifo = 0;
s->tx_fifo = 0;
- sm = app_worker_get_or_alloc_connect_segment_manager (app_wrk);
- if (app_worker_alloc_session_fifos (sm, s))
- return -1;
+ sm = app_worker_get_connect_segment_manager (app_wrk);
+ if ((rv = app_worker_alloc_session_fifos (sm, s)))
+ return rv;
- if (!svm_fifo_is_empty (rxf))
- {
- clib_memcpy_fast (s->rx_fifo->data, rxf->data, rxf->nitems);
- s->rx_fifo->head = rxf->head;
- s->rx_fifo->tail = rxf->tail;
- s->rx_fifo->cursize = rxf->cursize;
- }
+ if (!svm_fifo_is_empty_cons (rxf))
+ svm_fifo_clone (s->rx_fifo, rxf);
- if (!svm_fifo_is_empty (txf))
- {
- clib_memcpy_fast (s->tx_fifo->data, txf->data, txf->nitems);
- s->tx_fifo->head = txf->head;
- s->tx_fifo->tail = txf->tail;
- s->tx_fifo->cursize = txf->cursize;
- }
+ if (!svm_fifo_is_empty_cons (txf))
+ svm_fifo_clone (s->tx_fifo, txf);
segment_manager_dealloc_fifos (rxf, txf);
}
int
-app_worker_connect_session (app_worker_t * app, session_endpoint_t * sep,
- u32 api_context)
+app_worker_connect_session (app_worker_t *app_wrk, session_endpoint_cfg_t *sep,
+ session_handle_t *rsh)
{
- int rv;
+ if (PREDICT_FALSE (app_wrk->mq_congested))
+ return SESSION_E_REFUSED;
- /* Make sure we have a segment manager for connects */
- app_worker_alloc_connects_segment_manager (app);
+ sep->app_wrk_index = app_wrk->wrk_index;
- if ((rv = session_open (app->wrk_index, sep, api_context)))
- return rv;
-
- return 0;
+ return session_open (sep, rsh);
}
int
-app_worker_alloc_connects_segment_manager (app_worker_t * app_wrk)
+app_worker_session_fifo_tuning (app_worker_t * app_wrk, session_t * s,
+ svm_fifo_t * f,
+ session_ft_action_t act, u32 len)
{
- segment_manager_t *sm;
-
- if (app_wrk->connects_seg_manager == APP_INVALID_SEGMENT_MANAGER_INDEX)
- {
- sm = app_worker_alloc_segment_manager (app_wrk);
- if (sm == 0)
- return -1;
- app_wrk->connects_seg_manager = segment_manager_index (sm);
- }
- return 0;
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.fifo_tuning_callback (s, f, act, len);
}
segment_manager_t *
return segment_manager_get (app->connects_seg_manager);
}
-segment_manager_t *
-app_worker_get_or_alloc_connect_segment_manager (app_worker_t * app_wrk)
-{
- if (app_wrk->connects_seg_manager == (u32) ~ 0)
- app_worker_alloc_connects_segment_manager (app_wrk);
- return segment_manager_get (app_wrk->connects_seg_manager);
-}
-
segment_manager_t *
app_worker_get_listen_segment_manager (app_worker_t * app,
session_t * listener)
{
uword *smp;
smp = hash_get (app->listeners_table, listen_session_get_handle (listener));
- ASSERT (smp != 0);
+ ALWAYS_ASSERT (smp != 0);
return segment_manager_get (*smp);
}
app_worker_add_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
application_t *app = application_get (app_wrk->app_index);
- return app->cb_fns.add_segment_callback (app_wrk->api_client_index,
+
+ return app->cb_fns.add_segment_callback (app_wrk->wrk_index,
segment_handle);
}
app_worker_del_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
application_t *app = application_get (app_wrk->app_index);
- return app->cb_fns.del_segment_callback (app_wrk->api_client_index,
+ return app->cb_fns.del_segment_callback (app_wrk->wrk_index,
segment_handle);
}
return app_wrk->app_is_builtin;
}
-static inline int
-app_enqueue_evt (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, u8 lock)
+static int
+app_wrk_send_fd (app_worker_t *app_wrk, int fd)
{
- if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
+ if (!appns_sapi_enabled ())
{
- clib_warning ("evt q full");
- svm_msg_q_free_msg (mq, msg);
- if (lock)
- svm_msg_q_unlock (mq);
- return -1;
- }
+ vl_api_registration_t *reg;
+ clib_error_t *error;
+
+ reg =
+ vl_mem_api_client_index_to_registration (app_wrk->api_client_index);
+ if (!reg)
+ {
+ clib_warning ("no api registration for client: %u",
+ app_wrk->api_client_index);
+ return -1;
+ }
+
+ if (vl_api_registration_file_index (reg) == VL_API_INVALID_FI)
+ return -1;
+
+ error = vl_api_send_fd_msg (reg, &fd, 1);
+ if (error)
+ {
+ clib_error_report (error);
+ return -1;
+ }
- if (lock)
- {
- svm_msg_q_add_and_unlock (mq, msg);
return 0;
}
- /* Even when not locking the ring, we must wait for queue mutex */
- if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
+ app_sapi_msg_t smsg = { 0 };
+ app_namespace_t *app_ns;
+ clib_error_t *error;
+ application_t *app;
+ clib_socket_t *cs;
+ u32 cs_index;
+
+ app = application_get (app_wrk->app_index);
+ app_ns = app_namespace_get (app->ns_index);
+ cs_index = appns_sapi_handle_sock_index (app_wrk->api_client_index);
+ cs = appns_sapi_get_socket (app_ns, cs_index);
+ if (PREDICT_FALSE (!cs))
+ return -1;
+
+ /* There's no payload for the message only the type */
+ smsg.type = APP_SAPI_MSG_TYPE_SEND_FDS;
+ error = clib_socket_sendmsg (cs, &smsg, sizeof (smsg), &fd, 1);
+ if (error)
{
- clib_warning ("msg q add returned");
+ clib_error_report (error);
return -1;
}
+
return 0;
}
-static inline int
-app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s, u8 lock)
+static int
+mq_try_lock_and_alloc_msg (svm_msg_q_t *mq, session_mq_rings_e ring,
+ svm_msg_q_msg_t *msg)
+{
+ int rv, n_try = 0;
+
+ while (n_try < 75)
+ {
+ rv = svm_msg_q_lock_and_alloc_msg_w_ring (mq, ring, SVM_Q_NOWAIT, msg);
+ if (!rv)
+ return 0;
+ /*
+ * Break the loop if mq is full, usually this is because the
+ * app has crashed or is hanging on somewhere.
+ */
+ if (rv != -1)
+ break;
+ n_try += 1;
+ usleep (1);
+ }
+
+ return -1;
+}
+
+typedef union app_wrk_mq_rpc_args_
+{
+ struct
+ {
+ u32 thread_index;
+ u32 app_wrk_index;
+ };
+ uword as_uword;
+} app_wrk_mq_rpc_ags_t;
+
+static int
+app_wrk_handle_mq_postponed_msgs (void *arg)
{
+ svm_msg_q_msg_t _mq_msg, *mq_msg = &_mq_msg;
+ app_wrk_postponed_msg_t *pm;
+ app_wrk_mq_rpc_ags_t args;
+ u32 max_msg, n_msg = 0;
+ app_worker_t *app_wrk;
session_event_t *evt;
- svm_msg_q_msg_t msg;
svm_msg_q_t *mq;
- if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY
- && s->session_state != SESSION_STATE_LISTENING))
+ args.as_uword = pointer_to_uword (arg);
+ app_wrk = app_worker_get_if_valid (args.app_wrk_index);
+ if (!app_wrk)
+ return 0;
+
+ mq = app_wrk->event_queue;
+
+ clib_spinlock_lock (&app_wrk->postponed_mq_msgs_lock);
+
+ max_msg = clib_min (32, clib_fifo_elts (app_wrk->postponed_mq_msgs));
+
+ while (n_msg < max_msg)
{
- /* Session is closed so app will never clean up. Flush rx fifo */
- if (s->session_state == SESSION_STATE_CLOSED)
- svm_fifo_dequeue_drop_all (s->rx_fifo);
- return 0;
+ pm = clib_fifo_head (app_wrk->postponed_mq_msgs);
+ if (mq_try_lock_and_alloc_msg (mq, pm->ring, mq_msg))
+ break;
+
+ evt = svm_msg_q_msg_data (mq, mq_msg);
+ clib_memset (evt, 0, sizeof (*evt));
+ evt->event_type = pm->event_type;
+ clib_memcpy_fast (evt->data, pm->data, pm->len);
+
+ if (pm->fd != -1)
+ app_wrk_send_fd (app_wrk, pm->fd);
+
+ svm_msg_q_add_and_unlock (mq, mq_msg);
+
+ clib_fifo_advance_head (app_wrk->postponed_mq_msgs, 1);
+ n_msg += 1;
+ }
+
+ if (!clib_fifo_elts (app_wrk->postponed_mq_msgs))
+ {
+ app_wrk->mq_congested = 0;
+ }
+ else
+ {
+ session_send_rpc_evt_to_thread_force (
+ args.thread_index, app_wrk_handle_mq_postponed_msgs,
+ uword_to_pointer (args.as_uword, void *));
+ }
+
+ clib_spinlock_unlock (&app_wrk->postponed_mq_msgs_lock);
+
+ return 0;
+}
+
+static void
+app_wrk_add_mq_postponed_msg (app_worker_t *app_wrk, session_mq_rings_e ring,
+ u8 evt_type, void *msg, u32 msg_len, int fd)
+{
+ app_wrk_postponed_msg_t *pm;
+
+ clib_spinlock_lock (&app_wrk->postponed_mq_msgs_lock);
+
+ app_wrk->mq_congested = 1;
+
+ clib_fifo_add2 (app_wrk->postponed_mq_msgs, pm);
+ clib_memcpy_fast (pm->data, msg, msg_len);
+ pm->event_type = evt_type;
+ pm->ring = ring;
+ pm->len = msg_len;
+ pm->fd = fd;
+
+ if (clib_fifo_elts (app_wrk->postponed_mq_msgs) == 1)
+ {
+ app_wrk_mq_rpc_ags_t args = { .thread_index = vlib_get_thread_index (),
+ .app_wrk_index = app_wrk->wrk_index };
+
+ session_send_rpc_evt_to_thread_force (
+ args.thread_index, app_wrk_handle_mq_postponed_msgs,
+ uword_to_pointer (args.as_uword, void *));
}
+ clib_spinlock_unlock (&app_wrk->postponed_mq_msgs_lock);
+}
+
+always_inline void
+app_wrk_send_ctrl_evt_inline (app_worker_t *app_wrk, u8 evt_type, void *msg,
+ u32 msg_len, int fd)
+{
+ svm_msg_q_msg_t _mq_msg, *mq_msg = &_mq_msg;
+ svm_msg_q_t *mq = app_wrk->event_queue;
+ session_event_t *evt;
+ int rv;
+
+ if (PREDICT_FALSE (app_wrk->mq_congested))
+ goto handle_congestion;
+
+ rv = mq_try_lock_and_alloc_msg (mq, SESSION_MQ_CTRL_EVT_RING, mq_msg);
+ if (PREDICT_FALSE (rv))
+ goto handle_congestion;
+
+ evt = svm_msg_q_msg_data (mq, mq_msg);
+ clib_memset (evt, 0, sizeof (*evt));
+ evt->event_type = evt_type;
+ clib_memcpy_fast (evt->data, msg, msg_len);
+
+ if (fd != -1)
+ app_wrk_send_fd (app_wrk, fd);
+
+ svm_msg_q_add_and_unlock (mq, mq_msg);
+
+ return;
+
+handle_congestion:
+
+ app_wrk_add_mq_postponed_msg (app_wrk, SESSION_MQ_CTRL_EVT_RING, evt_type,
+ msg, msg_len, fd);
+}
+
+void
+app_wrk_send_ctrl_evt_fd (app_worker_t *app_wrk, u8 evt_type, void *msg,
+ u32 msg_len, int fd)
+{
+ app_wrk_send_ctrl_evt_inline (app_wrk, evt_type, msg, msg_len, fd);
+}
+
+void
+app_wrk_send_ctrl_evt (app_worker_t *app_wrk, u8 evt_type, void *msg,
+ u32 msg_len)
+{
+ app_wrk_send_ctrl_evt_inline (app_wrk, evt_type, msg, msg_len, -1);
+}
+
+static inline int
+app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s)
+{
+ svm_msg_q_msg_t _mq_msg = { 0 }, *mq_msg = &_mq_msg;
+ session_event_t *evt;
+ svm_msg_q_t *mq;
+ u32 app_session;
+ int rv;
+
if (app_worker_application_is_builtin (app_wrk))
return app_worker_builtin_rx (app_wrk, s);
if (svm_fifo_has_event (s->rx_fifo))
return 0;
+ app_session = s->rx_fifo->shr->client_session_index;
mq = app_wrk->event_queue;
- if (lock)
- svm_msg_q_lock (mq);
- if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
- {
- clib_warning ("evt q rings full");
- if (lock)
- svm_msg_q_unlock (mq);
- return -1;
- }
+ if (PREDICT_FALSE (app_wrk->mq_congested))
+ goto handle_congestion;
- msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
- ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+ rv = mq_try_lock_and_alloc_msg (mq, SESSION_MQ_IO_EVT_RING, mq_msg);
- evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
- evt->session_index = s->rx_fifo->client_session_index;
+ if (PREDICT_FALSE (rv))
+ goto handle_congestion;
+
+ evt = svm_msg_q_msg_data (mq, mq_msg);
evt->event_type = SESSION_IO_EVT_RX;
+ evt->session_index = app_session;
(void) svm_fifo_set_event (s->rx_fifo);
- if (app_enqueue_evt (mq, &msg, lock))
- return -1;
+ svm_msg_q_add_and_unlock (mq, mq_msg);
+
return 0;
+
+handle_congestion:
+
+ app_wrk_add_mq_postponed_msg (app_wrk, SESSION_MQ_IO_EVT_RING,
+ SESSION_IO_EVT_RX, &app_session,
+ sizeof (app_session), -1);
+ return -1;
}
static inline int
-app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s, u8 lock)
+app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s)
{
- svm_msg_q_t *mq;
+ svm_msg_q_msg_t _mq_msg = { 0 }, *mq_msg = &_mq_msg;
session_event_t *evt;
- svm_msg_q_msg_t msg;
+ svm_msg_q_t *mq;
+ u32 app_session;
+ int rv;
if (app_worker_application_is_builtin (app_wrk))
- return 0;
+ return app_worker_builtin_tx (app_wrk, s);
+ app_session = s->tx_fifo->shr->client_session_index;
mq = app_wrk->event_queue;
- if (lock)
- svm_msg_q_lock (mq);
- if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
- {
- clib_warning ("evt q rings full");
- if (lock)
- svm_msg_q_unlock (mq);
- return -1;
- }
+ if (PREDICT_FALSE (app_wrk->mq_congested))
+ goto handle_congestion;
+
+ rv = mq_try_lock_and_alloc_msg (mq, SESSION_MQ_IO_EVT_RING, mq_msg);
- msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
- ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+ if (PREDICT_FALSE (rv))
+ goto handle_congestion;
- evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt = svm_msg_q_msg_data (mq, mq_msg);
evt->event_type = SESSION_IO_EVT_TX;
- evt->session_index = s->tx_fifo->client_session_index;
+ evt->session_index = app_session;
- return app_enqueue_evt (mq, &msg, lock);
+ svm_msg_q_add_and_unlock (mq, mq_msg);
+
+ return 0;
+
+handle_congestion:
+
+ app_wrk_add_mq_postponed_msg (app_wrk, SESSION_MQ_IO_EVT_RING,
+ SESSION_IO_EVT_TX, &app_session,
+ sizeof (app_session), -1);
+ return -1;
}
/* *INDENT-OFF* */
typedef int (app_send_evt_handler_fn) (app_worker_t *app,
- session_t *s,
- u8 lock);
+ session_t *s);
static app_send_evt_handler_fn * const app_send_evt_handler_fns[2] = {
app_send_io_evt_rx,
app_send_io_evt_tx,
};
/* *INDENT-ON* */
-/**
- * Send event to application
- *
- * Logic from queue perspective is non-blocking. If there's
- * not enough space to enqueue a message, we return.
- */
-int
-app_worker_send_event (app_worker_t * app, session_t * s, u8 evt_type)
-{
- ASSERT (app && evt_type <= SESSION_IO_EVT_TX);
- return app_send_evt_handler_fns[evt_type] (app, s, 0 /* lock */ );
-}
-
/**
* Send event to application
*
app_worker_lock_and_send_event (app_worker_t * app, session_t * s,
u8 evt_type)
{
- return app_send_evt_handler_fns[evt_type] (app, s, 1 /* lock */ );
+ return app_send_evt_handler_fns[evt_type] (app, s);
}
u8 *
if (!app_wrk)
{
if (verbose)
- s = format (s, "%-40s%-25s%=10s%-15s%-15s%-10s", "Connection", "App",
- "Wrk", "API Client", "ListenerID", "SegManager");
+ s = format (s, "%-" SESSION_CLI_ID_LEN "s%-25s%-10s%-15s%-15s%-10s",
+ "Connection", "App", "Wrk", "API Client", "ListenerID",
+ "SegManager");
else
- s = format (s, "%-40s%-25s%=10s", "Connection", "App", "Wrk");
+ s = format (s, "%-" SESSION_CLI_ID_LEN "s%-25s%-10s", "Connection",
+ "App", "Wrk");
return s;
}
if (verbose)
{
- char buf[32];
- sprintf (buf, "%u(%u)", app_wrk->wrk_map_index, app_wrk->wrk_index);
- s = format (s, "%-40s%-25s%=10s%-15u%-15u%-10u", str, app_name,
- buf, app_wrk->api_client_index, handle, sm_index);
+ u8 *buf;
+ buf = format (0, "%u(%u)", app_wrk->wrk_map_index, app_wrk->wrk_index);
+ s = format (s, "%-" SESSION_CLI_ID_LEN "v%-25v%-10v%-15u%-15u%-10u", str,
+ app_name, buf, app_wrk->api_client_index, handle, sm_index);
+ vec_free (buf);
}
else
- s = format (s, "%-40s%-25s%=10u", str, app_name, app_wrk->wrk_map_index);
+ s = format (s, "%-" SESSION_CLI_ID_LEN "v%-25v%=10u", str, app_name,
+ app_wrk->wrk_map_index);
+
+ vec_free (str);
return s;
}
app_worker_t *app_wrk = va_arg (*args, app_worker_t *);
u32 indent = 1;
- s = format (s, "%U wrk-index %u app-index %u map-index %u "
- "api-client-index %d\n", format_white_space, indent,
- app_wrk->wrk_index, app_wrk->app_index, app_wrk->wrk_map_index,
- app_wrk->api_client_index);
+ s = format (s,
+ "%U wrk-index %u app-index %u map-index %u "
+ "api-client-index %d mq-cong %u\n",
+ format_white_space, indent, app_wrk->wrk_index,
+ app_wrk->app_index, app_wrk->wrk_map_index,
+ app_wrk->api_client_index, app_wrk->mq_congested);
return s;
}
void
app_worker_format_connects (app_worker_t * app_wrk, int verbose)
{
- svm_fifo_segment_private_t *fifo_segment;
- vlib_main_t *vm = vlib_get_main ();
segment_manager_t *sm;
- const u8 *app_name;
- u8 *s = 0;
/* Header */
if (!app_wrk)
{
- if (verbose)
- vlib_cli_output (vm, "%-40s%-20s%-15s%-10s", "Connection", "App",
- "API Client", "SegManager");
- else
- vlib_cli_output (vm, "%-40s%-20s", "Connection", "App");
+ segment_manager_format_sessions (0, verbose);
return;
}
if (app_wrk->connects_seg_manager == (u32) ~ 0)
return;
- app_name = application_name_from_index (app_wrk->app_index);
-
- /* Across all fifo segments */
sm = segment_manager_get (app_wrk->connects_seg_manager);
-
- /* *INDENT-OFF* */
- segment_manager_foreach_segment_w_lock (fifo_segment, sm, ({
- svm_fifo_t *fifo;
- u8 *str;
-
- fifo = svm_fifo_segment_get_fifo_list (fifo_segment);
- while (fifo)
- {
- u32 session_index, thread_index;
- session_t *session;
-
- session_index = fifo->master_session_index;
- thread_index = fifo->master_thread_index;
-
- session = session_get (session_index, thread_index);
- str = format (0, "%U", format_session, session, verbose);
-
- if (verbose)
- s = format (s, "%-40s%-20s%-15u%-10u", str, app_name,
- app_wrk->api_client_index, app_wrk->connects_seg_manager);
- else
- s = format (s, "%-40s%-20s", str, app_name);
-
- vlib_cli_output (vm, "%v", s);
- vec_reset_length (s);
- vec_free (str);
-
- fifo = fifo->next;
- }
- vec_free (s);
- }));
- /* *INDENT-ON* */
+ segment_manager_format_sessions (sm, verbose);
}
/*