2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <svm/message_queue.h>
17 #include <vppinfra/mem.h>
19 static inline svm_msg_q_ring_t *
20 svm_msg_q_ring_inline (svm_msg_q_t * mq, u32 ring_index)
22 return vec_elt_at_index (mq->rings, ring_index);
26 svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index)
28 return svm_msg_q_ring_inline (mq, ring_index);
32 svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
34 ASSERT (elt_index < ring->nitems);
35 return (ring->data + elt_index * ring->elsize);
39 svm_msg_q_alloc (svm_msg_q_cfg_t * cfg)
41 svm_msg_q_ring_cfg_t *ring_cfg;
42 svm_msg_q_ring_t *ring;
52 vec_sz = vec_header_bytes (0) + sizeof (svm_msg_q_ring_t) * cfg->n_rings;
53 for (i = 0; i < cfg->n_rings; i++)
55 if (cfg->ring_cfgs[i].data)
57 ring_cfg = &cfg->ring_cfgs[i];
58 rings_sz += (uword) ring_cfg->nitems * ring_cfg->elsize;
61 base = clib_mem_alloc_aligned (sizeof (svm_msg_q_t) + vec_sz + rings_sz,
62 CLIB_CACHE_LINE_BYTES);
66 mq = (svm_msg_q_t *) base;
67 vh = (vec_header_t *) (base + sizeof (svm_msg_q_t));
68 vh->len = cfg->n_rings;
69 mq->rings = (svm_msg_q_ring_t *) (vh + 1);
70 rings_ptr = (u8 *) mq->rings + vec_sz;
71 for (i = 0; i < cfg->n_rings; i++)
74 ring->elsize = cfg->ring_cfgs[i].elsize;
75 ring->nitems = cfg->ring_cfgs[i].nitems;
76 if (cfg->ring_cfgs[i].data)
77 ring->data = cfg->ring_cfgs[i].data;
80 ring->data = rings_ptr;
81 rings_ptr += (uword) ring->nitems * ring->elsize;
84 mq->q = svm_queue_init (cfg->q_nitems, sizeof (svm_msg_q_msg_t),
85 cfg->consumer_pid, 0);
91 svm_msg_q_free (svm_msg_q_t * mq)
93 svm_queue_free (mq->q);
98 svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
100 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
101 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, ring_index);
103 ASSERT (ring->cursize != ring->nitems);
104 msg.ring_index = ring - mq->rings;
105 msg.elt_index = ring->tail;
106 ring->tail = (ring->tail + 1) % ring->nitems;
107 __sync_fetch_and_add (&ring->cursize, 1);
112 svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
113 u8 noblock, svm_msg_q_msg_t * msg)
117 if (svm_msg_q_try_lock (mq))
119 if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, ring_index)))
121 svm_msg_q_unlock (mq);
124 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
125 if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (msg)))
127 svm_msg_q_unlock (mq);
134 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
135 while (svm_msg_q_msg_is_invalid (msg))
138 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
145 svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
147 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
148 svm_msg_q_ring_t *ring;
150 vec_foreach (ring, mq->rings)
152 if (ring->elsize < nbytes || ring->cursize == ring->nitems)
154 msg.ring_index = ring - mq->rings;
155 msg.elt_index = ring->tail;
156 ring->tail = (ring->tail + 1) % ring->nitems;
157 __sync_fetch_and_add (&ring->cursize, 1);
164 svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
166 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, msg->ring_index);
167 return svm_msg_q_ring_data (ring, msg->elt_index);
171 svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
173 svm_msg_q_ring_t *ring;
175 if (vec_len (mq->rings) <= msg->ring_index)
177 ring = &mq->rings[msg->ring_index];
178 if (msg->elt_index == ring->head)
180 ring->head = (ring->head + 1) % ring->nitems;
184 /* for now, expect messages to be processed in order */
187 __sync_fetch_and_sub (&ring->cursize, 1);
191 svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
193 svm_msg_q_ring_t *ring;
196 if (vec_len (mq->rings) <= msg->ring_index)
198 ring = &mq->rings[msg->ring_index];
200 dist1 = ((ring->nitems + msg->elt_index) - ring->head) % ring->nitems;
201 if (ring->tail == ring->head)
202 dist2 = (ring->cursize == 0) ? 0 : ring->nitems;
204 dist2 = ((ring->nitems + ring->tail) - ring->head) % ring->nitems;
205 return (dist1 < dist2);
209 svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
211 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
212 return svm_queue_add (mq->q, (u8 *) msg, nowait);
216 svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
218 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
219 svm_queue_add_raw (mq->q, (u8 *) msg);
220 svm_msg_q_unlock (mq);
224 svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
225 svm_q_conditional_wait_t cond, u32 time)
227 return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
231 svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
233 svm_queue_sub_raw (mq->q, (u8 *) msg);
237 * fd.io coding-style-patch-verification: ON
240 * eval: (c-set-style "gnu")