CLIB_CACHE_LINE_ALIGN_MARK (data);
} svm_fifo_t;
+typedef enum
+{
+ SVM_FIFO_FULL = -2,
+} svm_fifo_err_t;
+
+typedef struct svm_fifo_segment_
+{
+ u8 *data;
+ u32 len;
+} svm_fifo_segment_t;
+
#if SVM_FIFO_TRACE
#define svm_fifo_trace_add(_f, _s, _l, _t) \
{ \
return f->cursize;
}
+static inline int
+svm_fifo_is_full (svm_fifo_t * f)
+{
+ return (f->cursize == f->nitems);
+}
+
+static inline int
+svm_fifo_is_empty (svm_fifo_t * f)
+{
+ return (f->cursize == 0);
+}
+
static inline u32
svm_fifo_max_enqueue (svm_fifo_t * f)
{
return f->nitems - svm_fifo_max_dequeue (f);
}
+static inline int
+svm_fifo_has_event (svm_fifo_t * f)
+{
+ return f->has_event;
+}
+
static inline u8
svm_fifo_has_ooo_data (svm_fifo_t * f)
{
/**
* Sets fifo event flag.
*
+ * Also acts as a release barrier.
+ *
* @return 1 if flag was not set.
*/
always_inline u8
svm_fifo_set_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
- return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+ /* return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+ return __sync_bool_compare_and_swap (&f->has_event, 0, 1); */
+ return !__atomic_exchange_n (&f->has_event, 1, __ATOMIC_RELEASE);
}
/**
* Unsets fifo event flag.
+ *
+ * Also acts as a release barrier.
*/
always_inline void
svm_fifo_unset_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
__sync_lock_release (&f->has_event);
}
int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here);
int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes);
+void svm_fifo_dequeue_drop_all (svm_fifo_t * f);
+int svm_fifo_segments (svm_fifo_t * f, svm_fifo_segment_t * fs);
+void svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs);
u32 svm_fifo_number_ooo_segments (svm_fifo_t * f);
ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f);
void svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer);
+void svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len);
format_function_t format_svm_fifo;
f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
}
+/**
+ * Max contiguous chunk of data that can be read
+ */
+always_inline u32
+svm_fifo_max_read_chunk (svm_fifo_t * f)
+{
+ return ((f->tail > f->head) ? (f->tail - f->head) : (f->nitems - f->head));
+}
+
+/**
+ * Max contiguous chunk of data that can be written
+ */
+always_inline u32
+svm_fifo_max_write_chunk (svm_fifo_t * f)
+{
+ return ((f->tail >= f->head) ? (f->nitems - f->tail) : (f->head - f->tail));
+}
+
+/**
+ * Advance tail pointer
+ *
+ * Useful for moving tail pointer after external enqueue.
+ */
+always_inline void
+svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 bytes)
+{
+ ASSERT (bytes <= svm_fifo_max_enqueue (f));
+ f->tail = (f->tail + bytes) % f->nitems;
+ f->cursize += bytes;
+}
+
+always_inline u8 *
+svm_fifo_head (svm_fifo_t * f)
+{
+ return (f->data + f->head);
+}
+
+always_inline u8 *
+svm_fifo_tail (svm_fifo_t * f)
+{
+ return (f->data + f->tail);
+}
+
always_inline u32
ooo_segment_distance_from_tail (svm_fifo_t * f, u32 pos)
{