2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <svm/message_queue.h>
17 #include <vppinfra/mem.h>
18 #include <vppinfra/format.h>
19 #include <sys/eventfd.h>
21 static inline svm_msg_q_ring_t *
22 svm_msg_q_ring_inline (svm_msg_q_t * mq, u32 ring_index)
24 return vec_elt_at_index (mq->rings, ring_index);
28 svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index)
30 return svm_msg_q_ring_inline (mq, ring_index);
34 svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
36 ASSERT (elt_index < ring->nitems);
37 return (ring->shr->data + elt_index * ring->elsize);
41 svm_msg_q_init (void *base, svm_msg_q_cfg_t *cfg)
43 svm_msg_q_ring_shared_t *ring;
44 svm_msg_q_shared_t *smq;
48 q_sz = sizeof (svm_queue_t) + cfg->q_nitems * sizeof (svm_msg_q_msg_t);
50 smq = (svm_msg_q_shared_t *) base;
51 svm_queue_init (&smq->q, cfg->q_nitems, sizeof (svm_msg_q_msg_t));
52 smq->q->consumer_pid = cfg->consumer_pid;
53 smq->n_rings = cfg->n_rings;
54 ring = (void *) ((u8 *) smq->q + q_sz);
55 for (i = 0; i < cfg->n_rings; i++)
57 ring->elsize = cfg->ring_cfgs[i].elsize;
58 ring->nitems = cfg->ring_cfgs[i].nitems;
59 ring->cursize = ring->head = ring->tail = 0;
60 offset = sizeof (*ring) + ring->nitems * ring->elsize;
61 ring = (void *) ((u8 *) ring + offset);
68 svm_msg_q_size_to_alloc (svm_msg_q_cfg_t *cfg)
70 svm_msg_q_ring_cfg_t *ring_cfg;
71 uword rings_sz = 0, mq_sz;
77 rings_sz = sizeof (svm_msg_q_ring_shared_t) * cfg->n_rings;
78 for (i = 0; i < cfg->n_rings; i++)
80 if (cfg->ring_cfgs[i].data)
82 ring_cfg = &cfg->ring_cfgs[i];
83 rings_sz += (uword) ring_cfg->nitems * ring_cfg->elsize;
86 q_sz = sizeof (svm_queue_t) + cfg->q_nitems * sizeof (svm_msg_q_msg_t);
87 mq_sz = sizeof (svm_msg_q_shared_t) + q_sz + rings_sz;
93 svm_msg_q_alloc (svm_msg_q_cfg_t *cfg)
98 mq_sz = svm_msg_q_size_to_alloc (cfg);
99 base = clib_mem_alloc_aligned (mq_sz, CLIB_CACHE_LINE_BYTES);
103 return svm_msg_q_init (base, cfg);
107 svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base)
109 svm_msg_q_ring_shared_t *ring;
110 svm_msg_q_shared_t *smq;
111 u32 i, n_rings, q_sz, offset;
113 smq = (svm_msg_q_shared_t *) smq_base;
115 n_rings = smq->n_rings;
116 vec_validate (mq->rings, n_rings - 1);
117 q_sz = sizeof (svm_queue_t) + mq->q->maxsize * sizeof (svm_msg_q_msg_t);
118 ring = (void *) ((u8 *) smq->q + q_sz);
119 for (i = 0; i < n_rings; i++)
121 mq->rings[i].nitems = ring->nitems;
122 mq->rings[i].elsize = ring->elsize;
123 mq->rings[i].shr = ring;
124 offset = sizeof (*ring) + ring->nitems * ring->elsize;
125 ring = (void *) ((u8 *) ring + offset);
130 svm_msg_q_free (svm_msg_q_t * mq)
132 svm_queue_free (mq->q);
137 svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
139 svm_msg_q_ring_shared_t *sr;
140 svm_msg_q_ring_t *ring;
143 ring = svm_msg_q_ring_inline (mq, ring_index);
146 ASSERT (sr->cursize < ring->nitems);
147 msg.ring_index = ring - mq->rings;
148 msg.elt_index = sr->tail;
149 sr->tail = (sr->tail + 1) % ring->nitems;
150 clib_atomic_fetch_add (&sr->cursize, 1);
155 svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
156 u8 noblock, svm_msg_q_msg_t * msg)
160 if (svm_msg_q_try_lock (mq))
162 if (PREDICT_FALSE (svm_msg_q_is_full (mq)
163 || svm_msg_q_ring_is_full (mq, ring_index)))
165 svm_msg_q_unlock (mq);
168 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
173 while (svm_msg_q_is_full (mq)
174 || svm_msg_q_ring_is_full (mq, ring_index))
176 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
182 svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
184 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
185 svm_msg_q_ring_shared_t *sr;
186 svm_msg_q_ring_t *ring;
188 vec_foreach (ring, mq->rings)
191 if (ring->elsize < nbytes || sr->cursize == ring->nitems)
193 msg.ring_index = ring - mq->rings;
194 msg.elt_index = sr->tail;
195 sr->tail = (sr->tail + 1) % ring->nitems;
196 clib_atomic_fetch_add (&sr->cursize, 1);
203 svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
205 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, msg->ring_index);
206 return svm_msg_q_ring_data (ring, msg->elt_index);
210 svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
212 svm_msg_q_ring_shared_t *sr;
213 svm_msg_q_ring_t *ring;
216 ASSERT (vec_len (mq->rings) > msg->ring_index);
217 ring = svm_msg_q_ring_inline (mq, msg->ring_index);
219 if (msg->elt_index == sr->head)
221 sr->head = (sr->head + 1) % ring->nitems;
225 clib_warning ("message out of order");
226 /* for now, expect messages to be processed in order */
230 need_signal = sr->cursize == ring->nitems;
231 clib_atomic_fetch_sub (&sr->cursize, 1);
233 if (PREDICT_FALSE (need_signal))
234 svm_queue_send_signal (mq->q, 0);
238 svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
240 u32 dist1, dist2, tail, head;
241 svm_msg_q_ring_shared_t *sr;
242 svm_msg_q_ring_t *ring;
244 if (vec_len (mq->rings) <= msg->ring_index)
247 ring = svm_msg_q_ring_inline (mq, msg->ring_index);
252 dist1 = ((ring->nitems + msg->elt_index) - head) % ring->nitems;
254 dist2 = (sr->cursize == 0) ? 0 : ring->nitems;
256 dist2 = ((ring->nitems + tail) - head) % ring->nitems;
257 return (dist1 < dist2);
261 svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
263 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
264 return svm_queue_add (mq->q, (u8 *) msg, nowait);
268 svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
270 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
271 svm_queue_add_raw (mq->q, (u8 *) msg);
272 svm_msg_q_unlock (mq);
276 svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
277 svm_q_conditional_wait_t cond, u32 time)
279 return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
283 svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
285 svm_queue_sub_raw (mq->q, (u8 *) msg);
289 svm_msg_q_set_consumer_eventfd (svm_msg_q_t * mq, int fd)
291 mq->q->consumer_evtfd = fd;
295 svm_msg_q_set_producer_eventfd (svm_msg_q_t * mq, int fd)
297 mq->q->producer_evtfd = fd;
301 svm_msg_q_alloc_consumer_eventfd (svm_msg_q_t * mq)
304 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
306 svm_msg_q_set_consumer_eventfd (mq, fd);
311 svm_msg_q_alloc_producer_eventfd (svm_msg_q_t * mq)
314 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
316 svm_msg_q_set_producer_eventfd (mq, fd);
321 format_svm_msg_q (u8 * s, va_list * args)
323 svm_msg_q_t *mq = va_arg (*args, svm_msg_q_t *);
324 s = format (s, " [Q:%d/%d]", mq->q->cursize, mq->q->maxsize);
325 for (u32 i = 0; i < vec_len (mq->rings); i++)
327 s = format (s, " [R%d:%d/%d]", i, mq->rings[i].shr->cursize,
328 mq->rings[i].nitems);
334 * fd.io coding-style-patch-verification: ON
337 * eval: (c-set-style "gnu")