2 * Copyright (c) 2018-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Unidirectional shared-memory multi-ring message queue
20 #ifndef SRC_SVM_MESSAGE_QUEUE_H_
21 #define SRC_SVM_MESSAGE_QUEUE_H_
23 #include <vppinfra/clib.h>
24 #include <vppinfra/error.h>
25 #include <vppinfra/lock.h>
26 #include <svm/queue.h>
28 typedef struct svm_msg_q_shr_queue_
30 pthread_mutex_t mutex; /* 8 bytes */
31 pthread_cond_t condvar; /* 8 bytes */
39 } svm_msg_q_shared_queue_t;
41 typedef struct svm_msg_q_queue_
43 svm_msg_q_shared_queue_t *shr; /**< pointer to shared queue */
44 int evtfd; /**< producer/consumer eventfd */
45 clib_spinlock_t lock; /**< private lock for multi-producer */
48 typedef struct svm_msg_q_ring_shared_
50 volatile u32 cursize; /**< current size of the ring */
51 u32 nitems; /**< max size of the ring */
52 volatile u32 head; /**< current head (for dequeue) */
53 volatile u32 tail; /**< current tail (for enqueue) */
54 u32 elsize; /**< size of an element */
55 u8 data[0]; /**< chunk of memory for msg data */
56 } svm_msg_q_ring_shared_t;
58 typedef struct svm_msg_q_ring_
60 u32 nitems; /**< max size of the ring */
61 u32 elsize; /**< size of an element */
62 svm_msg_q_ring_shared_t *shr; /**< ring in shared memory */
63 } __clib_packed svm_msg_q_ring_t;
65 typedef struct svm_msg_q_shared_
67 u32 n_rings; /**< number of rings after q */
68 u32 pad; /**< 8 byte alignment for q */
69 svm_msg_q_shared_queue_t q[0]; /**< queue for exchanging messages */
70 } __clib_packed svm_msg_q_shared_t;
72 typedef struct svm_msg_q_
74 svm_msg_q_queue_t q; /**< queue for exchanging messages */
75 svm_msg_q_ring_t *rings; /**< rings with message data*/
76 } __clib_packed svm_msg_q_t;
78 typedef struct svm_msg_q_ring_cfg_
83 } svm_msg_q_ring_cfg_t;
85 typedef struct svm_msg_q_cfg_
87 int consumer_pid; /**< pid of msg consumer */
88 u32 q_nitems; /**< msg queue size (not rings) */
89 u32 n_rings; /**< number of msg rings */
90 svm_msg_q_ring_cfg_t *ring_cfgs; /**< array of ring cfgs */
97 u32 ring_index; /**< ring index, could be u8 */
98 u32 elt_index; /**< index in ring */
103 #define SVM_MQ_INVALID_MSG { .as_u64 = ~0 }
105 typedef enum svm_msg_q_wait_type_
109 } svm_msg_q_wait_type_t;
112 * Allocate message queue
114 * Allocates a message queue on the heap. Based on the configuration options,
115 * apart from the message queue this also allocates (one or multiple)
116 * shared-memory rings for the messages.
118 * @param cfg configuration options: queue len, consumer pid,
120 * @return message queue
122 svm_msg_q_shared_t *svm_msg_q_alloc (svm_msg_q_cfg_t *cfg);
123 svm_msg_q_shared_t *svm_msg_q_init (void *base, svm_msg_q_cfg_t *cfg);
124 uword svm_msg_q_size_to_alloc (svm_msg_q_cfg_t *cfg);
126 void svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base);
129 * Cleanup mq's private data
131 void svm_msg_q_cleanup (svm_msg_q_t *mq);
136 * @param mq message queue to be freed
138 void svm_msg_q_free (svm_msg_q_t * mq);
141 * Allocate message buffer
143 * Message is allocated on the first available ring capable of holding
144 * the requested number of bytes.
146 * @param mq message queue
147 * @param nbytes number of bytes needed for message
148 * @return message structure pointing to the ring and position
151 svm_msg_q_msg_t svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes);
154 * Allocate message buffer on ring
156 * Message is allocated, on requested ring. The caller MUST check that
157 * the ring is not full.
159 * @param mq message queue
160 * @param ring_index ring on which the allocation should occur
161 * @return message structure pointing to the ring and position
164 svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index);
167 * Lock message queue and allocate message buffer on ring
169 * This should be used when multiple writers/readers are expected to
170 * compete for the rings/queue. Message should be enqueued by calling
171 * @ref svm_msg_q_add_w_lock and the caller MUST unlock the queue once
172 * the message in enqueued.
174 * @param mq message queue
175 * @param ring_index ring on which the allocation should occur
176 * @param noblock flag that indicates if request should block
177 * @param msg pointer to message to be filled in
178 * @return 0 on success, negative number otherwise
180 int svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
181 u8 noblock, svm_msg_q_msg_t * msg);
184 * Free message buffer
186 * Marks message buffer on ring as free.
188 * @param mq message queue
189 * @param msg message to be freed
191 void svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
194 * Producer enqueue one message to queue
196 * Must be called with mq locked. Prior to calling this, the producer should've
197 * obtained a message buffer from one of the rings.
199 * @param mq message queue
200 * @param msg message to be enqueued
202 void svm_msg_q_add_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *msg);
205 * Producer enqueue one message to queue
207 * Prior to calling this, the producer should've obtained a message buffer
208 * from one of the rings by calling @ref svm_msg_q_alloc_msg.
210 * @param mq message queue
211 * @param msg message (pointer to ring position) to be enqueued
212 * @param nowait flag to indicate if request is blocking or not
213 * @return success status
215 int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait);
218 * Producer enqueue one message to queue with mutex held
220 * Prior to calling this, the producer should've obtained a message buffer
221 * from one of the rings by calling @ref svm_msg_q_alloc_msg. It assumes
222 * the queue mutex is held.
224 * @param mq message queue
225 * @param msg message (pointer to ring position) to be enqueued
226 * @return success status
228 void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
231 * Consumer dequeue one message from queue
233 * This returns the message pointing to the data in the message rings.
234 * Should only be used in single consumer scenarios as no locks are grabbed.
235 * The consumer is expected to call @ref svm_msg_q_free_msg once it
236 * finishes processing/copies the message data.
238 * @param mq message queue
239 * @param msg pointer to structure where message is to be received
240 * @param cond flag that indicates if request should block or not
241 * @param time time to wait if condition it SVM_Q_TIMEDWAIT
242 * @return success status
244 int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
245 svm_q_conditional_wait_t cond, u32 time);
248 * Consumer dequeue one message from queue
250 * Returns the message pointing to the data in the message rings. Should only
251 * be used in single consumer scenarios as no locks are grabbed. The consumer
252 * is expected to call @ref svm_msg_q_free_msg once it finishes
253 * processing/copies the message data.
255 * @param mq message queue
256 * @param msg pointer to structure where message is to be received
257 * @return success status
259 int svm_msg_q_sub_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *elem);
262 * Consumer dequeue multiple messages from queue
264 * Returns the message pointing to the data in the message rings. Should only
265 * be used in single consumer scenarios as no locks are grabbed. The consumer
266 * is expected to call @ref svm_msg_q_free_msg once it finishes
267 * processing/copies the message data.
269 * @param mq message queue
270 * @param msg_buf pointer to array of messages to received
271 * @param n_msgs lengt of msg_buf array
272 * @return number of messages dequeued
274 int svm_msg_q_sub_raw_batch (svm_msg_q_t *mq, svm_msg_q_msg_t *msg_buf,
278 * Get data for message in queue
280 * @param mq message queue
281 * @param msg message for which the data is requested
282 * @return pointer to data
284 void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
287 * Get message queue ring
289 * @param mq message queue
290 * @param ring_index index of ring
291 * @return pointer to ring
293 svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index);
296 * Set event fd for queue
298 * If set, queue will exclusively use eventfds for signaling. Moreover,
299 * afterwards, the queue should only be used in non-blocking mode. Waiting
300 * for events should be done externally using something like epoll.
302 * @param mq message queue
303 * @param fd consumer eventfd
305 void svm_msg_q_set_eventfd (svm_msg_q_t *mq, int fd);
308 * Allocate event fd for queue
310 int svm_msg_q_alloc_eventfd (svm_msg_q_t *mq);
313 * Format message queue, shows msg count for each ring
315 u8 *format_svm_msg_q (u8 *s, va_list *args);
318 * Check length of message queue
321 svm_msg_q_size (svm_msg_q_t *mq)
323 return clib_atomic_load_relax_n (&mq->q.shr->cursize);
327 * Check if message queue is full
330 svm_msg_q_is_full (svm_msg_q_t * mq)
332 return (svm_msg_q_size (mq) == mq->q.shr->maxsize);
336 svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index)
338 svm_msg_q_ring_t *ring = vec_elt_at_index (mq->rings, ring_index);
339 return (clib_atomic_load_relax_n (&ring->shr->cursize) >= ring->nitems);
343 svm_msg_q_or_ring_is_full (svm_msg_q_t *mq, u32 ring_index)
345 return (svm_msg_q_is_full (mq) || svm_msg_q_ring_is_full (mq, ring_index));
349 * Check if message queue is empty
352 svm_msg_q_is_empty (svm_msg_q_t * mq)
354 return (svm_msg_q_size (mq) == 0);
358 * Check if message is invalid
361 svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg)
363 return (msg->as_u64 == (u64) ~ 0);
367 * Try locking message queue
370 svm_msg_q_try_lock (svm_msg_q_t * mq)
372 if (mq->q.evtfd == -1)
374 int rv = pthread_mutex_trylock (&mq->q.shr->mutex);
375 if (PREDICT_FALSE (rv == EOWNERDEAD))
376 rv = pthread_mutex_consistent (&mq->q.shr->mutex);
381 return !clib_spinlock_trylock (&mq->q.lock);
386 * Lock, or block trying, the message queue
389 svm_msg_q_lock (svm_msg_q_t * mq)
391 if (mq->q.evtfd == -1)
393 int rv = pthread_mutex_lock (&mq->q.shr->mutex);
394 if (PREDICT_FALSE (rv == EOWNERDEAD))
395 rv = pthread_mutex_consistent (&mq->q.shr->mutex);
400 clib_spinlock_lock (&mq->q.lock);
406 * Unlock message queue
409 svm_msg_q_unlock (svm_msg_q_t * mq)
411 if (mq->q.evtfd == -1)
413 pthread_mutex_unlock (&mq->q.shr->mutex);
417 clib_spinlock_unlock (&mq->q.lock);
422 * Wait for message queue event
424 * When eventfds are not configured, the shared memory mutex is locked
425 * before waiting on the condvar. Typically called by consumers.
427 int svm_msg_q_wait (svm_msg_q_t *mq, svm_msg_q_wait_type_t type);
430 * Wait for message queue event as producer
432 * Similar to @ref svm_msg_q_wait but lock (mutex or spinlock) must
433 * be held. Should only be called by producers.
435 int svm_msg_q_wait_prod (svm_msg_q_t *mq);
438 * Wait for message queue or ring event as producer
440 * Similar to @ref svm_msg_q_wait but lock (mutex or spinlock) must
441 * be held. Should only be called by producers.
443 int svm_msg_q_or_ring_wait_prod (svm_msg_q_t *mq, u32 ring_index);
446 * Timed wait for message queue event
448 * Must be called with mutex held.
450 * @param mq message queue
451 * @param timeout time in seconds
453 int svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout);
456 svm_msg_q_get_eventfd (svm_msg_q_t *mq)
461 #endif /* SRC_SVM_MESSAGE_QUEUE_H_ */
464 * fd.io coding-style-patch-verification: ON
467 * eval: (c-set-style "gnu")