2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <svm/message_queue.h>
17 #include <vppinfra/mem.h>
18 #include <sys/eventfd.h>
20 static inline svm_msg_q_ring_t *
21 svm_msg_q_ring_inline (svm_msg_q_t * mq, u32 ring_index)
23 return vec_elt_at_index (mq->rings, ring_index);
27 svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index)
29 return svm_msg_q_ring_inline (mq, ring_index);
33 svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
35 ASSERT (elt_index < ring->nitems);
36 return (ring->data + elt_index * ring->elsize);
40 svm_msg_q_alloc (svm_msg_q_cfg_t * cfg)
42 svm_msg_q_ring_cfg_t *ring_cfg;
43 uword rings_sz = 0, mq_sz;
44 svm_msg_q_ring_t *ring;
53 vec_sz = vec_header_bytes (0) + sizeof (svm_msg_q_ring_t) * cfg->n_rings;
54 for (i = 0; i < cfg->n_rings; i++)
56 if (cfg->ring_cfgs[i].data)
58 ring_cfg = &cfg->ring_cfgs[i];
59 rings_sz += (uword) ring_cfg->nitems * ring_cfg->elsize;
62 q_sz = sizeof (svm_queue_t) + cfg->q_nitems * sizeof (svm_msg_q_msg_t);
63 mq_sz = sizeof (svm_msg_q_t) + vec_sz + rings_sz + q_sz;
64 base = clib_mem_alloc_aligned (mq_sz, CLIB_CACHE_LINE_BYTES);
68 mq = (svm_msg_q_t *) base;
69 mq->q = svm_queue_init (base + sizeof (svm_msg_q_t), cfg->q_nitems,
70 sizeof (svm_msg_q_msg_t));
71 mq->q->consumer_pid = cfg->consumer_pid;
72 vh = (vec_header_t *) ((u8 *) mq->q + q_sz);
73 vh->len = cfg->n_rings;
74 mq->rings = (svm_msg_q_ring_t *) (vh + 1);
75 rings_ptr = (u8 *) mq->rings + vec_sz;
76 for (i = 0; i < cfg->n_rings; i++)
79 ring->elsize = cfg->ring_cfgs[i].elsize;
80 ring->nitems = cfg->ring_cfgs[i].nitems;
81 ring->cursize = ring->head = ring->tail = 0;
82 if (cfg->ring_cfgs[i].data)
83 ring->data = cfg->ring_cfgs[i].data;
86 ring->data = rings_ptr;
87 rings_ptr += (uword) ring->nitems * ring->elsize;
95 svm_msg_q_free (svm_msg_q_t * mq)
97 svm_queue_free (mq->q);
102 svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
105 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, ring_index);
107 ASSERT (ring->cursize < ring->nitems);
108 msg.ring_index = ring - mq->rings;
109 msg.elt_index = ring->tail;
110 ring->tail = (ring->tail + 1) % ring->nitems;
111 clib_atomic_fetch_add (&ring->cursize, 1);
116 svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
117 u8 noblock, svm_msg_q_msg_t * msg)
121 if (svm_msg_q_try_lock (mq))
123 if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, ring_index)))
125 svm_msg_q_unlock (mq);
128 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
129 if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (msg)))
131 svm_msg_q_unlock (mq);
138 while (svm_msg_q_ring_is_full (mq, ring_index))
140 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
146 svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
148 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
149 svm_msg_q_ring_t *ring;
151 vec_foreach (ring, mq->rings)
153 if (ring->elsize < nbytes || ring->cursize == ring->nitems)
155 msg.ring_index = ring - mq->rings;
156 msg.elt_index = ring->tail;
157 ring->tail = (ring->tail + 1) % ring->nitems;
158 clib_atomic_fetch_add (&ring->cursize, 1);
165 svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
167 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, msg->ring_index);
168 return svm_msg_q_ring_data (ring, msg->elt_index);
172 svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
174 svm_msg_q_ring_t *ring;
176 ASSERT (vec_len (mq->rings) > msg->ring_index);
177 ring = &mq->rings[msg->ring_index];
178 if (msg->elt_index == ring->head)
180 ring->head = (ring->head + 1) % ring->nitems;
184 clib_warning ("message out of order");
185 /* for now, expect messages to be processed in order */
188 clib_atomic_fetch_sub (&ring->cursize, 1);
192 svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
194 u32 dist1, dist2, tail, head;
195 svm_msg_q_ring_t *ring;
197 if (vec_len (mq->rings) <= msg->ring_index)
199 ring = &mq->rings[msg->ring_index];
203 dist1 = ((ring->nitems + msg->elt_index) - head) % ring->nitems;
205 dist2 = (ring->cursize == 0) ? 0 : ring->nitems;
207 dist2 = ((ring->nitems + tail) - head) % ring->nitems;
208 return (dist1 < dist2);
212 svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
214 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
215 return svm_queue_add (mq->q, (u8 *) msg, nowait);
219 svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
221 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
222 svm_queue_add_raw (mq->q, (u8 *) msg);
223 svm_msg_q_unlock (mq);
227 svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
228 svm_q_conditional_wait_t cond, u32 time)
230 return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
234 svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
236 svm_queue_sub_raw (mq->q, (u8 *) msg);
240 svm_msg_q_set_consumer_eventfd (svm_msg_q_t * mq, int fd)
242 mq->q->consumer_evtfd = fd;
246 svm_msg_q_set_producer_eventfd (svm_msg_q_t * mq, int fd)
248 mq->q->producer_evtfd = fd;
252 svm_msg_q_alloc_consumer_eventfd (svm_msg_q_t * mq)
255 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
257 svm_msg_q_set_consumer_eventfd (mq, fd);
262 svm_msg_q_alloc_producer_eventfd (svm_msg_q_t * mq)
265 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
267 svm_msg_q_set_producer_eventfd (mq, fd);
272 * fd.io coding-style-patch-verification: ON
275 * eval: (c-set-style "gnu")