2 * Copyright (c) 2016 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <rte_common.h>
20 #include <rte_memory.h>
21 #include <rte_spinlock.h>
22 #include <rte_atomic.h>
23 #include <sys/queue.h>
32 * Possible states of the event.
42 TAILQ_ENTRY(tle_event) ql;
45 enum tle_ev_state state;
46 } __rte_cache_aligned;
53 TAILQ_HEAD(, tle_event) armed;
54 TAILQ_HEAD(, tle_event) free;
55 struct tle_event events[0];
59 * event queue creation parameters.
61 struct tle_evq_param {
62 int32_t socket_id; /**< socket ID to allocate memory from. */
63 uint32_t max_events; /**< max number of events in queue. */
69 * Parameters used to create and initialise the queue.
71 * Pointer to new event queue structure,
72 * or NULL on error, with error code set in rte_errno.
73 * Possible rte_errno errors include:
74 * - EINVAL - invalid parameter passed to function
75 * - ENOMEM - out of memory
77 struct tle_evq *tle_evq_create(const struct tle_evq_param *prm);
80 * Destroy given event queue.
83 * event queue to destroy
85 void tle_evq_destroy(struct tle_evq *evq);
88 * allocate a new event within given event queue.
90 * event queue to allocate a new stream within.
92 * User data to be associated with that event.
94 * Pointer to event structure that can be used in future tle_event API calls,
95 * or NULL on error, with error code set in rte_errno.
96 * Possible rte_errno errors include:
97 * - EINVAL - invalid parameter passed to function
98 * - ENOMEM - max limit of allocated events reached for that context
100 struct tle_event *tle_event_alloc(struct tle_evq *evq, const void *data);
103 * free an allocated event.
105 * Pointer to the event to free.
107 void tle_event_free(struct tle_event *ev);
111 * move event from DOWN to UP state.
113 * Pointer to the event.
116 tle_event_raise(struct tle_event *ev)
120 if (ev->state != TLE_SEV_DOWN)
124 rte_compiler_barrier();
126 rte_spinlock_lock(&q->lock);
127 if (ev->state == TLE_SEV_DOWN) {
128 ev->state = TLE_SEV_UP;
129 TAILQ_INSERT_TAIL(&q->armed, ev, ql);
132 rte_spinlock_unlock(&q->lock);
136 * move event from UP to DOWN state.
138 * Pointer to the event.
141 tle_event_down(struct tle_event *ev)
145 if (ev->state != TLE_SEV_UP)
149 rte_compiler_barrier();
151 rte_spinlock_lock(&q->lock);
152 if (ev->state == TLE_SEV_UP) {
153 ev->state = TLE_SEV_DOWN;
154 TAILQ_REMOVE(&q->armed, ev, ql);
157 rte_spinlock_unlock(&q->lock);
161 * move from IDLE to DOWN/UP state.
163 * Pointer to the event.
165 * new state for the event.
168 tle_event_active(struct tle_event *ev, enum tle_ev_state st)
172 if (ev->state != TLE_SEV_IDLE)
176 rte_compiler_barrier();
178 rte_spinlock_lock(&q->lock);
179 if (st > ev->state) {
180 if (st == TLE_SEV_UP) {
181 TAILQ_INSERT_TAIL(&q->armed, ev, ql);
186 rte_spinlock_unlock(&q->lock);
190 * move event IDLE state.
192 * Pointer to the event.
195 tle_event_idle(struct tle_event *ev)
199 if (ev->state == TLE_SEV_IDLE)
203 rte_compiler_barrier();
205 rte_spinlock_lock(&q->lock);
206 if (ev->state == TLE_SEV_UP) {
207 TAILQ_REMOVE(&q->armed, ev, ql);
210 ev->state = TLE_SEV_IDLE;
211 rte_spinlock_unlock(&q->lock);
215 tle_evq_idle(struct tle_evq *evq, struct tle_event *ev[], uint32_t num)
219 rte_spinlock_lock(&evq->lock);
222 for (i = 0; i != num; i++) {
223 if (ev[i]->state == TLE_SEV_UP) {
224 TAILQ_REMOVE(&evq->armed, ev[i], ql);
227 ev[i]->state = TLE_SEV_IDLE;
231 rte_spinlock_unlock(&evq->lock);
236 * return up to *num* user data pointers associated with
237 * the events that were in the UP state.
238 * Each retrieved event is automatically moved into the DOWN state.
240 * event queue to retrieve events from.
242 * An array of user data pointers associated with the events retrieved.
243 * It must be large enough to store up to *num* pointers in it.
245 * Number of elements in the *evd* array.
247 * number of of entries filled inside *evd* array.
249 static inline int32_t
250 tle_evq_get(struct tle_evq *evq, const void *evd[], uint32_t num)
253 struct tle_event *ev;
255 if (evq->nb_armed == 0)
258 rte_compiler_barrier();
260 rte_spinlock_lock(&evq->lock);
261 n = RTE_MIN(num, evq->nb_armed);
262 for (i = 0; i != n; i++) {
263 ev = TAILQ_FIRST(&evq->armed);
264 ev->state = TLE_SEV_DOWN;
265 TAILQ_REMOVE(&evq->armed, ev, ql);
269 rte_spinlock_unlock(&evq->lock);
278 #endif /* _SEV_IMPL_H_ */