X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Fmessage_queue.h;h=bd76eda5d880a6694497e963f2682d15617d0ea9;hb=b68108203a59e12f4b4435caba164072e234f0aa;hp=4c16c97ca7c418639eda0aedc279b2632d2c7d17;hpb=54693d23307ce8944a4d97379efd3bd4dcf0485c;p=vpp.git diff --git a/src/svm/message_queue.h b/src/svm/message_queue.h index 4c16c97ca7c..bd76eda5d88 100644 --- a/src/svm/message_queue.h +++ b/src/svm/message_queue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Cisco and/or its affiliates. + * Copyright (c) 2018-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -22,22 +22,56 @@ #include #include -#include +#include #include -typedef struct svm_msg_q_ring_ +typedef struct svm_msg_q_shr_queue_ +{ + pthread_mutex_t mutex; /* 8 bytes */ + pthread_cond_t condvar; /* 8 bytes */ + u32 head; + u32 tail; + volatile u32 cursize; + u32 maxsize; + u32 elsize; + u32 pad; + u8 data[0]; +} svm_msg_q_shared_queue_t; + +typedef struct svm_msg_q_queue_ +{ + svm_msg_q_shared_queue_t *shr; /**< pointer to shared queue */ + int evtfd; /**< producer/consumer eventfd */ + clib_spinlock_t lock; /**< private lock for multi-producer */ +} svm_msg_q_queue_t; + +typedef struct svm_msg_q_ring_shared_ { volatile u32 cursize; /**< current size of the ring */ u32 nitems; /**< max size of the ring */ volatile u32 head; /**< current head (for dequeue) */ volatile u32 tail; /**< current tail (for enqueue) */ u32 elsize; /**< size of an element */ - u8 *data; /**< chunk of memory for msg data */ + u8 data[0]; /**< chunk of memory for msg data */ +} svm_msg_q_ring_shared_t; + +typedef struct svm_msg_q_ring_ +{ + u32 nitems; /**< max size of the ring */ + u32 elsize; /**< size of an element */ + svm_msg_q_ring_shared_t *shr; /**< ring in shared memory */ } __clib_packed svm_msg_q_ring_t; +typedef struct svm_msg_q_shared_ +{ + u32 n_rings; /**< number of rings after q */ + u32 pad; /**< 8 byte alignment for q */ + svm_msg_q_shared_queue_t q[0]; /**< queue for exchanging messages */ +} __clib_packed svm_msg_q_shared_t; + typedef struct svm_msg_q_ { - svm_queue_t *q; /**< queue for exchanging messages */ + svm_msg_q_queue_t q; /**< queue for exchanging messages */ svm_msg_q_ring_t *rings; /**< rings with message data*/ } __clib_packed svm_msg_q_t; @@ -67,6 +101,13 @@ typedef union } svm_msg_q_msg_t; #define SVM_MQ_INVALID_MSG { .as_u64 = ~0 } + +typedef enum svm_msg_q_wait_type_ +{ + SVM_MQ_WAIT_EMPTY, + SVM_MQ_WAIT_FULL +} svm_msg_q_wait_type_t; + /** * Allocate message queue * @@ -78,7 +119,16 @@ typedef union * ring configs * @return message queue */ -svm_msg_q_t *svm_msg_q_alloc (svm_msg_q_cfg_t * cfg); +svm_msg_q_shared_t *svm_msg_q_alloc (svm_msg_q_cfg_t *cfg); +svm_msg_q_shared_t *svm_msg_q_init (void *base, svm_msg_q_cfg_t *cfg); +uword svm_msg_q_size_to_alloc (svm_msg_q_cfg_t *cfg); + +void svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base); + +/** + * Cleanup mq's private data + */ +void svm_msg_q_cleanup (svm_msg_q_t *mq); /** * Free message queue @@ -170,6 +220,7 @@ void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); * Consumer dequeue one message from queue * * This returns the message pointing to the data in the message rings. + * Should only be used in single consumer scenarios as no locks are grabbed. * The consumer is expected to call @ref svm_msg_q_free_msg once it * finishes processing/copies the message data. * @@ -183,18 +234,34 @@ int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, svm_q_conditional_wait_t cond, u32 time); /** - * Consumer dequeue one message from queue with mutex held + * Consumer dequeue one message from queue * - * Returns the message pointing to the data in the message rings under the - * assumption that the message queue lock is already held. The consumer is - * expected to call @ref svm_msg_q_free_msg once it finishes + * Returns the message pointing to the data in the message rings. Should only + * be used in single consumer scenarios as no locks are grabbed. The consumer + * is expected to call @ref svm_msg_q_free_msg once it finishes * processing/copies the message data. * * @param mq message queue * @param msg pointer to structure where message is to be received * @return success status */ -void svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); +int svm_msg_q_sub_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *elem); + +/** + * Consumer dequeue multiple messages from queue + * + * Returns the message pointing to the data in the message rings. Should only + * be used in single consumer scenarios as no locks are grabbed. The consumer + * is expected to call @ref svm_msg_q_free_msg once it finishes + * processing/copies the message data. + * + * @param mq message queue + * @param msg_buf pointer to array of messages to received + * @param n_msgs lengt of msg_buf array + * @return number of messages dequeued + */ +int svm_msg_q_sub_raw_batch (svm_msg_q_t *mq, svm_msg_q_msg_t *msg_buf, + u32 n_msgs); /** * Get data for message in queue @@ -214,20 +281,51 @@ void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); */ svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index); +/** + * Set event fd for queue + * + * If set, queue will exclusively use eventfds for signaling. Moreover, + * afterwards, the queue should only be used in non-blocking mode. Waiting + * for events should be done externally using something like epoll. + * + * @param mq message queue + * @param fd consumer eventfd + */ +void svm_msg_q_set_eventfd (svm_msg_q_t *mq, int fd); + +/** + * Allocate event fd for queue + */ +int svm_msg_q_alloc_eventfd (svm_msg_q_t *mq); + +/** + * Format message queue, shows msg count for each ring + */ +u8 *format_svm_msg_q (u8 *s, va_list *args); + +/** + * Check length of message queue + */ +static inline u32 +svm_msg_q_size (svm_msg_q_t *mq) +{ + return clib_atomic_load_relax_n (&mq->q.shr->cursize); +} + /** * Check if message queue is full */ static inline u8 svm_msg_q_is_full (svm_msg_q_t * mq) { - return (mq->q->cursize == mq->q->maxsize); + return (svm_msg_q_size (mq) == mq->q.shr->maxsize); } static inline u8 svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index) { - ASSERT (ring_index < vec_len (mq->rings)); - return (mq->rings[ring_index].cursize == mq->rings[ring_index].nitems); + svm_msg_q_ring_t *ring = vec_elt_at_index (mq->rings, ring_index); + return (clib_atomic_load_relax_n (&ring->shr->cursize) >= ring->nitems); } /** @@ -236,16 +334,7 @@ svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index) static inline u8 svm_msg_q_is_empty (svm_msg_q_t * mq) { - return (mq->q->cursize == 0); -} - -/** - * Check length of message queue - */ -static inline u32 -svm_msg_q_size (svm_msg_q_t * mq) -{ - return mq->q->cursize; + return (svm_msg_q_size (mq) == 0); } /** @@ -263,7 +352,17 @@ svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg) static inline int svm_msg_q_try_lock (svm_msg_q_t * mq) { - return pthread_mutex_trylock (&mq->q->mutex); + if (mq->q.evtfd == -1) + { + int rv = pthread_mutex_trylock (&mq->q.shr->mutex); + if (PREDICT_FALSE (rv == EOWNERDEAD)) + rv = pthread_mutex_consistent (&mq->q.shr->mutex); + return rv; + } + else + { + return !clib_spinlock_trylock (&mq->q.lock); + } } /** @@ -272,7 +371,18 @@ svm_msg_q_try_lock (svm_msg_q_t * mq) static inline int svm_msg_q_lock (svm_msg_q_t * mq) { - return pthread_mutex_lock (&mq->q->mutex); + if (mq->q.evtfd == -1) + { + int rv = pthread_mutex_lock (&mq->q.shr->mutex); + if (PREDICT_FALSE (rv == EOWNERDEAD)) + rv = pthread_mutex_consistent (&mq->q.shr->mutex); + return rv; + } + else + { + clib_spinlock_lock (&mq->q.lock); + return 0; + } } /** @@ -281,22 +391,31 @@ svm_msg_q_lock (svm_msg_q_t * mq) static inline void svm_msg_q_unlock (svm_msg_q_t * mq) { - /* The other side of the connection is not polling */ - if (mq->q->cursize < (mq->q->maxsize / 8)) - (void) pthread_cond_broadcast (&mq->q->condvar); - pthread_mutex_unlock (&mq->q->mutex); + if (mq->q.evtfd == -1) + { + pthread_mutex_unlock (&mq->q.shr->mutex); + } + else + { + clib_spinlock_unlock (&mq->q.lock); + } } /** * Wait for message queue event * - * Must be called with mutex held + * When eventfds are not configured, the shared memory mutex is locked + * before waiting on the condvar. Typically called by consumers. */ -static inline void -svm_msg_q_wait (svm_msg_q_t * mq) -{ - pthread_cond_wait (&mq->q->condvar, &mq->q->mutex); -} +int svm_msg_q_wait (svm_msg_q_t *mq, svm_msg_q_wait_type_t type); + +/** + * Wait for message queue event as producer + * + * Similar to @ref svm_msg_q_wait but lock (mutex or spinlock) must + * be held. Should only be called by producers. + */ +int svm_msg_q_wait_prod (svm_msg_q_t *mq); /** * Timed wait for message queue event @@ -306,16 +425,12 @@ svm_msg_q_wait (svm_msg_q_t * mq) * @param mq message queue * @param timeout time in seconds */ +int svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout); + static inline int -svm_msg_q_timedwait (svm_msg_q_t * mq, double timeout) +svm_msg_q_get_eventfd (svm_msg_q_t *mq) { - struct timespec ts; - - ts.tv_sec = unix_time_now () + (u32) timeout; - ts.tv_nsec = (timeout - (u32) timeout) * 1e9; - if (pthread_cond_timedwait (&mq->q->condvar, &mq->q->mutex, &ts)) - return -1; - return 0; + return mq->q.evtfd; } #endif /* SRC_SVM_MESSAGE_QUEUE_H_ */