SVM_FIFO_FULL = -2,
} svm_fifo_err_t;
+typedef struct svm_fifo_segment_
+{
+ u8 *data;
+ u32 len;
+} svm_fifo_segment_t;
+
#if SVM_FIFO_TRACE
#define svm_fifo_trace_add(_f, _s, _l, _t) \
{ \
return f->cursize;
}
+static inline int
+svm_fifo_is_full (svm_fifo_t * f)
+{
+ return (f->cursize == f->nitems);
+}
+
+static inline int
+svm_fifo_is_empty (svm_fifo_t * f)
+{
+ return (f->cursize == 0);
+}
+
static inline u32
svm_fifo_max_enqueue (svm_fifo_t * f)
{
return f->nitems - svm_fifo_max_dequeue (f);
}
+static inline int
+svm_fifo_has_event (svm_fifo_t * f)
+{
+ return f->has_event;
+}
+
static inline u8
svm_fifo_has_ooo_data (svm_fifo_t * f)
{
/**
* Sets fifo event flag.
*
+ * Also acts as a release barrier.
+ *
* @return 1 if flag was not set.
*/
always_inline u8
svm_fifo_set_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
- return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+ /* return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+ return __sync_bool_compare_and_swap (&f->has_event, 0, 1); */
+ return !__atomic_exchange_n (&f->has_event, 1, __ATOMIC_RELEASE);
}
/**
* Unsets fifo event flag.
+ *
+ * Also acts as a release barrier.
*/
always_inline void
svm_fifo_unset_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
__sync_lock_release (&f->has_event);
}
int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here);
int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes);
+void svm_fifo_dequeue_drop_all (svm_fifo_t * f);
+int svm_fifo_segments (svm_fifo_t * f, svm_fifo_segment_t * fs);
+void svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs);
u32 svm_fifo_number_ooo_segments (svm_fifo_t * f);
ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f);
void svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer);