/*
- * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Copyright (c) 2018-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
#include <vppinfra/error.h>
#include <svm/queue.h>
-typedef struct svm_msg_q_ring_
+typedef struct svm_msg_q_shr_queue_
+{
+ pthread_mutex_t mutex; /* 8 bytes */
+ pthread_cond_t condvar; /* 8 bytes */
+ u32 head;
+ u32 tail;
+ volatile u32 cursize;
+ u32 maxsize;
+ u32 elsize;
+ u32 pad;
+ u8 data[0];
+} svm_msg_q_shared_queue_t;
+
+typedef struct svm_msg_q_queue_
+{
+ svm_msg_q_shared_queue_t *shr; /**< pointer to shared queue */
+ int evtfd; /**< producer/consumer eventfd */
+} svm_msg_q_queue_t;
+
+typedef struct svm_msg_q_ring_shared_
{
volatile u32 cursize; /**< current size of the ring */
u32 nitems; /**< max size of the ring */
volatile u32 head; /**< current head (for dequeue) */
volatile u32 tail; /**< current tail (for enqueue) */
u32 elsize; /**< size of an element */
- u8 *data; /**< chunk of memory for msg data */
-} svm_msg_q_ring_t;
+ u8 data[0]; /**< chunk of memory for msg data */
+} svm_msg_q_ring_shared_t;
+
+typedef struct svm_msg_q_ring_
+{
+ u32 nitems; /**< max size of the ring */
+ u32 elsize; /**< size of an element */
+ svm_msg_q_ring_shared_t *shr; /**< ring in shared memory */
+} __clib_packed svm_msg_q_ring_t;
+
+typedef struct svm_msg_q_shared_
+{
+ u32 n_rings; /**< number of rings after q */
+ u32 pad; /**< 8 byte alignment for q */
+ svm_msg_q_shared_queue_t q[0]; /**< queue for exchanging messages */
+} __clib_packed svm_msg_q_shared_t;
typedef struct svm_msg_q_
{
- svm_queue_t *q; /**< queue for exchanging messages */
+ svm_msg_q_queue_t q; /**< queue for exchanging messages */
svm_msg_q_ring_t *rings; /**< rings with message data*/
-} svm_msg_q_t;
+} __clib_packed svm_msg_q_t;
typedef struct svm_msg_q_ring_cfg_
{
* ring configs
* @return message queue
*/
-svm_msg_q_t *svm_msg_q_alloc (svm_msg_q_cfg_t * cfg);
+svm_msg_q_shared_t *svm_msg_q_alloc (svm_msg_q_cfg_t *cfg);
+svm_msg_q_shared_t *svm_msg_q_init (void *base, svm_msg_q_cfg_t *cfg);
+uword svm_msg_q_size_to_alloc (svm_msg_q_cfg_t *cfg);
+
+void svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base);
/**
* Free message queue
*/
svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index);
+/**
+ * Set event fd for queue
+ *
+ * If set, queue will exclusively use eventfds for signaling. Moreover,
+ * afterwards, the queue should only be used in non-blocking mode. Waiting
+ * for events should be done externally using something like epoll.
+ *
+ * @param mq message queue
+ * @param fd consumer eventfd
+ */
+void svm_msg_q_set_eventfd (svm_msg_q_t *mq, int fd);
+
+/**
+ * Allocate event fd for queue
+ */
+int svm_msg_q_alloc_eventfd (svm_msg_q_t *mq);
+
+/**
+ * Format message queue, shows msg count for each ring
+ */
+u8 *format_svm_msg_q (u8 *s, va_list *args);
+
+/**
+ * Check length of message queue
+ */
+static inline u32
+svm_msg_q_size (svm_msg_q_t *mq)
+{
+ return clib_atomic_load_relax_n (&mq->q.shr->cursize);
+}
+
/**
* Check if message queue is full
*/
static inline u8
svm_msg_q_is_full (svm_msg_q_t * mq)
{
- return (mq->q->cursize == mq->q->maxsize);
+ return (svm_msg_q_size (mq) == mq->q.shr->maxsize);
}
static inline u8
svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index)
{
- ASSERT (ring_index < vec_len (mq->rings));
- return (mq->rings[ring_index].cursize == mq->rings[ring_index].nitems);
+ svm_msg_q_ring_t *ring = vec_elt_at_index (mq->rings, ring_index);
+ return (clib_atomic_load_relax_n (&ring->shr->cursize) >= ring->nitems);
}
/**
static inline u8
svm_msg_q_is_empty (svm_msg_q_t * mq)
{
- return (mq->q->cursize == 0);
-}
-
-/**
- * Check length of message queue
- */
-static inline u32
-svm_msg_q_size (svm_msg_q_t * mq)
-{
- return mq->q->cursize;
+ return (svm_msg_q_size (mq) == 0);
}
/**
static inline int
svm_msg_q_try_lock (svm_msg_q_t * mq)
{
- return pthread_mutex_trylock (&mq->q->mutex);
+ int rv = pthread_mutex_trylock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return rv;
}
/**
static inline int
svm_msg_q_lock (svm_msg_q_t * mq)
{
- return pthread_mutex_lock (&mq->q->mutex);
+ int rv = pthread_mutex_lock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return rv;
}
+/**
+ * Unlock message queue
+ */
static inline void
-svm_msg_q_wait (svm_msg_q_t * mq)
+svm_msg_q_unlock (svm_msg_q_t * mq)
{
- pthread_cond_wait (&mq->q->condvar, &mq->q->mutex);
+ pthread_mutex_unlock (&mq->q.shr->mutex);
}
/**
- * Unlock message queue
+ * Wait for message queue event
+ *
+ * Must be called with mutex held. The queue only works non-blocking
+ * with eventfds, so handle blocking calls as an exception here.
*/
-static inline void
-svm_msg_q_unlock (svm_msg_q_t * mq)
+void svm_msg_q_wait (svm_msg_q_t *mq);
+
+/**
+ * Timed wait for message queue event
+ *
+ * Must be called with mutex held.
+ *
+ * @param mq message queue
+ * @param timeout time in seconds
+ */
+int svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout);
+
+static inline int
+svm_msg_q_get_eventfd (svm_msg_q_t *mq)
{
- /* The other side of the connection is not polling */
- if (mq->q->cursize < (mq->q->maxsize / 8))
- (void) pthread_cond_broadcast (&mq->q->condvar);
- pthread_mutex_unlock (&mq->q->mutex);
+ return mq->q.evtfd;
}
#endif /* SRC_SVM_MESSAGE_QUEUE_H_ */