2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <svm/message_queue.h>
17 #include <vppinfra/mem.h>
20 svm_msg_q_alloc (svm_msg_q_cfg_t * cfg)
22 svm_msg_q_ring_t *ring;
29 mq = clib_mem_alloc_aligned (sizeof (svm_msg_q_t), CLIB_CACHE_LINE_BYTES);
30 memset (mq, 0, sizeof (*mq));
31 mq->q = svm_queue_init (cfg->q_nitems, sizeof (svm_msg_q_msg_t),
32 cfg->consumer_pid, 0);
33 vec_validate (mq->rings, cfg->n_rings - 1);
34 for (i = 0; i < cfg->n_rings; i++)
37 ring->elsize = cfg->ring_cfgs[i].elsize;
38 ring->nitems = cfg->ring_cfgs[i].nitems;
39 if (cfg->ring_cfgs[i].data)
40 ring->data = cfg->ring_cfgs[i].data;
42 ring->data = clib_mem_alloc_aligned (ring->nitems * ring->elsize,
43 CLIB_CACHE_LINE_BYTES);
50 svm_msg_q_free (svm_msg_q_t * mq)
52 svm_msg_q_ring_t *ring;
54 vec_foreach (ring, mq->rings)
56 clib_mem_free (ring->data);
63 svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
65 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
66 svm_msg_q_ring_t *ring;
68 vec_foreach (ring, mq->rings)
70 if (ring->elsize < nbytes || ring->cursize == ring->nitems)
72 msg.ring_index = ring - mq->rings;
73 msg.elt_index = ring->tail;
74 ring->tail = (ring->tail + 1) % ring->nitems;
75 __sync_fetch_and_add (&ring->cursize, 1);
81 static inline svm_msg_q_ring_t *
82 svm_msg_q_get_ring (svm_msg_q_t * mq, u32 ring_index)
84 return vec_elt_at_index (mq->rings, ring_index);
88 svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
90 ASSERT (elt_index < ring->nitems);
91 return (ring->data + elt_index * ring->elsize);
95 svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
97 svm_msg_q_ring_t *ring = svm_msg_q_get_ring (mq, msg->ring_index);
98 return svm_msg_q_ring_data (ring, msg->elt_index);
102 svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
104 svm_msg_q_ring_t *ring;
106 if (vec_len (mq->rings) <= msg->ring_index)
108 ring = &mq->rings[msg->ring_index];
109 if (msg->elt_index == ring->head)
111 ring->head = (ring->head + 1) % ring->nitems;
115 /* for now, expect messages to be processed in order */
118 __sync_fetch_and_sub (&ring->cursize, 1);
122 svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
124 svm_msg_q_ring_t *ring;
127 if (vec_len (mq->rings) <= msg->ring_index)
129 ring = &mq->rings[msg->ring_index];
131 dist1 = ((ring->nitems + msg->ring_index) - ring->head) % ring->nitems;
132 if (ring->tail == ring->head)
133 dist2 = (ring->cursize == 0) ? 0 : ring->nitems;
135 dist2 = ((ring->nitems + ring->tail) - ring->head) % ring->nitems;
136 return (dist1 < dist2);
140 svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t msg, int nowait)
142 ASSERT (svm_msq_q_msg_is_valid (mq, &msg));
143 return svm_queue_add (mq->q, (u8 *) & msg, nowait);
147 svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
148 svm_q_conditional_wait_t cond, u32 time)
150 return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
154 * fd.io coding-style-patch-verification: ON
157 * eval: (c-set-style "gnu")