2 * Copyright (c) 2016 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <rte_common.h>
20 #include <rte_memory.h>
21 #include <rte_spinlock.h>
22 #include <rte_atomic.h>
23 #include <sys/queue.h>
32 * Possible states of the event.
42 TAILQ_ENTRY(tle_event) ql;
45 enum tle_ev_state state;
46 } __rte_cache_aligned;
53 TAILQ_HEAD(, tle_event) armed;
54 TAILQ_HEAD(, tle_event) free;
55 struct tle_event events[0];
59 * event queue creation parameters.
61 struct tle_evq_param {
62 int32_t socket_id; /**< socket ID to allocate memory from. */
63 uint32_t max_events; /**< max number of events in queue. */
69 * Parameters used to create and initialise the queue.
71 * Pointer to new event queue structure,
72 * or NULL on error, with error code set in rte_errno.
73 * Possible rte_errno errors include:
74 * - EINVAL - invalid parameter passed to function
75 * - ENOMEM - out of memory
77 struct tle_evq *tle_evq_create(const struct tle_evq_param *prm);
80 * Destroy given event queue.
83 * event queue to destroy
85 void tle_evq_destroy(struct tle_evq *evq);
88 * allocate a new event within given event queue.
90 * event queue to allocate a new stream within.
92 * User data to be associated with that event.
94 * Pointer to event structure that can be used in future tle_event API calls,
95 * or NULL on error, with error code set in rte_errno.
96 * Possible rte_errno errors include:
97 * - EINVAL - invalid parameter passed to function
98 * - ENOMEM - max limit of allocated events reached for that context
100 struct tle_event *tle_event_alloc(struct tle_evq *evq, const void *data);
103 * free an allocated event.
105 * Pointer to the event to free.
107 void tle_event_free(struct tle_event *ev);
109 static inline enum tle_ev_state
110 tle_event_state(const struct tle_event *ev)
116 * move event from DOWN to UP state.
118 * Pointer to the event.
121 tle_event_raise(struct tle_event *ev)
125 if (ev->state != TLE_SEV_DOWN)
129 rte_compiler_barrier();
131 rte_spinlock_lock(&q->lock);
132 if (ev->state == TLE_SEV_DOWN) {
133 ev->state = TLE_SEV_UP;
134 TAILQ_INSERT_TAIL(&q->armed, ev, ql);
137 rte_spinlock_unlock(&q->lock);
141 * move event from UP to DOWN state.
143 * Pointer to the event.
146 tle_event_down(struct tle_event *ev)
150 if (ev->state != TLE_SEV_UP)
154 rte_compiler_barrier();
156 rte_spinlock_lock(&q->lock);
157 if (ev->state == TLE_SEV_UP) {
158 ev->state = TLE_SEV_DOWN;
159 TAILQ_REMOVE(&q->armed, ev, ql);
162 rte_spinlock_unlock(&q->lock);
166 * move from IDLE to DOWN/UP state.
168 * Pointer to the event.
170 * new state for the event.
173 tle_event_active(struct tle_event *ev, enum tle_ev_state st)
177 if (ev->state != TLE_SEV_IDLE)
181 rte_compiler_barrier();
183 rte_spinlock_lock(&q->lock);
184 if (st > ev->state) {
185 if (st == TLE_SEV_UP) {
186 TAILQ_INSERT_TAIL(&q->armed, ev, ql);
191 rte_spinlock_unlock(&q->lock);
195 * move event IDLE state.
197 * Pointer to the event.
200 tle_event_idle(struct tle_event *ev)
204 if (ev->state == TLE_SEV_IDLE)
208 rte_compiler_barrier();
210 rte_spinlock_lock(&q->lock);
211 if (ev->state == TLE_SEV_UP) {
212 TAILQ_REMOVE(&q->armed, ev, ql);
215 ev->state = TLE_SEV_IDLE;
216 rte_spinlock_unlock(&q->lock);
220 tle_evq_idle(struct tle_evq *evq, struct tle_event *ev[], uint32_t num)
224 rte_spinlock_lock(&evq->lock);
227 for (i = 0; i != num; i++) {
228 if (ev[i]->state == TLE_SEV_UP) {
229 TAILQ_REMOVE(&evq->armed, ev[i], ql);
232 ev[i]->state = TLE_SEV_IDLE;
236 rte_spinlock_unlock(&evq->lock);
241 * return up to *num* user data pointers associated with
242 * the events that were in the UP state.
243 * Each retrieved event is automatically moved into the DOWN state.
245 * event queue to retrieve events from.
247 * An array of user data pointers associated with the events retrieved.
248 * It must be large enough to store up to *num* pointers in it.
250 * Number of elements in the *evd* array.
252 * number of of entries filled inside *evd* array.
254 static inline int32_t
255 tle_evq_get(struct tle_evq *evq, const void *evd[], uint32_t num)
258 struct tle_event *ev;
260 if (evq->nb_armed == 0)
263 rte_compiler_barrier();
265 rte_spinlock_lock(&evq->lock);
266 n = RTE_MIN(num, evq->nb_armed);
267 for (i = 0; i != n; i++) {
268 ev = TAILQ_FIRST(&evq->armed);
269 ev->state = TLE_SEV_DOWN;
270 TAILQ_REMOVE(&evq->armed, ev, ql);
274 rte_spinlock_unlock(&evq->lock);
283 #endif /* _SEV_IMPL_H_ */