2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_ssvm_fifo_h__
16 #define __included_ssvm_fifo_h__
18 #include <vppinfra/clib.h>
19 #include <vppinfra/vec.h>
20 #include <vppinfra/mheap.h>
21 #include <vppinfra/heap.h>
22 #include <vppinfra/pool.h>
23 #include <vppinfra/format.h>
28 SVM_FIFO_TAG_NOT_HELD = 0,
33 /** Out-of-order segment */
36 u32 next; /**< Next linked-list element pool index */
37 u32 prev; /**< Previous linked-list element pool index */
39 u32 fifo_position; /**< Start of segment, normalized*/
40 u32 length; /**< Length of segment */
43 #define OOO_SEGMENT_INVALID_INDEX ((u32)~0)
47 pthread_mutex_t mutex; /* 8 bytes */
48 pthread_cond_t condvar; /* 8 bytes */
51 volatile u32 cursize; /**< current fifo size */
52 volatile u8 has_event; /**< non-zero if deq event exists */
57 u32 server_session_index;
58 u32 client_session_index;
59 u8 server_thread_index;
60 u8 client_thread_index;
61 CLIB_CACHE_LINE_ALIGN_MARK (end_shared);
63 CLIB_CACHE_LINE_ALIGN_MARK (end_consumer);
68 ooo_segment_t *ooo_segments; /**< Pool of ooo segments */
69 u32 ooos_list_head; /**< Head of out-of-order linked-list */
70 u32 ooos_newest; /**< Last segment to have been updated */
72 CLIB_CACHE_LINE_ALIGN_MARK (data);
76 svm_fifo_lock (svm_fifo_t * f, u32 pid, u32 tag, int nowait)
78 if (PREDICT_TRUE (nowait == 0))
79 pthread_mutex_lock (&f->mutex);
82 if (pthread_mutex_trylock (&f->mutex))
91 svm_fifo_unlock (svm_fifo_t * f)
95 CLIB_MEMORY_BARRIER ();
96 pthread_mutex_unlock (&f->mutex);
100 svm_fifo_max_dequeue (svm_fifo_t * f)
106 svm_fifo_max_enqueue (svm_fifo_t * f)
108 return f->nitems - f->cursize;
112 svm_fifo_has_ooo_data (svm_fifo_t * f)
114 return f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX;
118 * Sets fifo event flag.
120 * @return 1 if flag was not set.
123 svm_fifo_set_event (svm_fifo_t * f)
125 /* Probably doesn't need to be atomic. Still, better avoid surprises */
126 return __sync_lock_test_and_set (&f->has_event, 1) == 0;
130 * Unsets fifo event flag.
133 svm_fifo_unset_event (svm_fifo_t * f)
135 /* Probably doesn't need to be atomic. Still, better avoid surprises */
136 __sync_lock_test_and_set (&f->has_event, 0);
139 svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes);
141 int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes,
142 u8 * copy_from_here);
144 int svm_fifo_enqueue_with_offset (svm_fifo_t * f, int pid,
145 u32 offset, u32 required_bytes,
146 u8 * copy_from_here);
148 int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes,
151 int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes,
153 int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes);
155 always_inline ooo_segment_t *
156 svm_fifo_newest_ooo_segment (svm_fifo_t * f)
158 return f->ooo_segments + f->ooos_newest;
162 ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s)
164 return ((f->nitems + s->fifo_position - f->tail) % f->nitems);
168 ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s)
170 return ((f->nitems + s->fifo_position + s->length - f->tail) % f->nitems);
173 #endif /* __included_ssvm_fifo_h__ */
176 * fd.io coding-style-patch-verification: ON
179 * eval: (c-set-style "gnu")