static inline int
app_enqueue_evt (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, u8 lock)
{
- if (PREDICT_TRUE (!svm_msg_q_is_full (mq)))
- {
- if (lock)
- {
- svm_msg_q_add_w_lock (mq, msg);
- svm_msg_q_unlock (mq);
- }
- else if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
- {
- clib_warning ("msg q add returned");
- if (lock)
- svm_msg_q_unlock (mq);
- return -1;
- }
- }
- else
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
{
clib_warning ("evt q full");
svm_msg_q_free_msg (mq, msg);
svm_msg_q_unlock (mq);
return -1;
}
+
+ if (lock)
+ {
+ svm_msg_q_add_and_unlock (mq, msg);
+ return 0;
+ }
+
+ /* Even when not locking the ring, we must wait for queue mutex */
+ if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
+ {
+ clib_warning ("msg q add returned");
+ return -1;
+ }
return 0;
}
static inline int
app_send_io_evt_rx (application_t * app, stream_session_t * s, u8 lock)
{
- session_fifo_event_t *evt;
+ session_event_t *evt;
svm_msg_q_msg_t msg;
svm_msg_q_t *mq;
msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
ASSERT (!svm_msg_q_msg_is_invalid (&msg));
- evt = (session_fifo_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
evt->fifo = s->server_rx_fifo;
evt->event_type = FIFO_EVENT_APP_RX;
app_send_io_evt_tx (application_t * app, stream_session_t * s, u8 lock)
{
svm_msg_q_t *mq;
- session_fifo_event_t *evt;
+ session_event_t *evt;
svm_msg_q_msg_t msg;
if (application_is_builtin (app))
msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
ASSERT (!svm_msg_q_msg_is_invalid (&msg));
- evt = (session_fifo_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
evt->event_type = FIFO_EVENT_APP_TX;
evt->fifo = s->server_tx_fifo;