X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Fsvm_fifo.h;h=1ac5b6363cd3782eefdb628b14e033d8abbd55ad;hb=0e88e851e058f4fb7cc690dbbdb19216ab360d1c;hp=a83cd858f833d5cd867281c88937431565588b93;hpb=3eb5062b40feb3002de09a3caff86232d6e1adea;p=vpp.git diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index a83cd858f83..1ac5b6363cd 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -62,6 +62,7 @@ typedef struct _svm_fifo u32 segment_manager; CLIB_CACHE_LINE_ALIGN_MARK (end_shared); u32 head; + volatile u32 want_tx_evt; /**< producer wants nudge */ CLIB_CACHE_LINE_ALIGN_MARK (end_consumer); /* producer */ @@ -75,9 +76,22 @@ typedef struct _svm_fifo #if SVM_FIFO_TRACE svm_fifo_trace_elem_t *trace; #endif + u32 freelist_index; /**< aka log2(allocated_size) - const. */ + i8 refcnt; /**< reference count */ CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; +typedef enum +{ + SVM_FIFO_FULL = -2, +} svm_fifo_err_t; + +typedef struct svm_fifo_segment_ +{ + u8 *data; + u32 len; +} svm_fifo_segment_t; + #if SVM_FIFO_TRACE #define svm_fifo_trace_add(_f, _s, _l, _t) \ { \ @@ -100,12 +114,30 @@ svm_fifo_max_dequeue (svm_fifo_t * f) return f->cursize; } +static inline int +svm_fifo_is_full (svm_fifo_t * f) +{ + return (f->cursize == f->nitems); +} + +static inline int +svm_fifo_is_empty (svm_fifo_t * f) +{ + return (f->cursize == 0); +} + static inline u32 svm_fifo_max_enqueue (svm_fifo_t * f) { return f->nitems - svm_fifo_max_dequeue (f); } +static inline int +svm_fifo_has_event (svm_fifo_t * f) +{ + return f->has_event; +} + static inline u8 svm_fifo_has_ooo_data (svm_fifo_t * f) { @@ -115,39 +147,59 @@ svm_fifo_has_ooo_data (svm_fifo_t * f) /** * Sets fifo event flag. * + * Also acts as a release barrier. + * * @return 1 if flag was not set. */ always_inline u8 svm_fifo_set_event (svm_fifo_t * f) { - /* Probably doesn't need to be atomic. Still, better avoid surprises */ - return __sync_lock_test_and_set (&f->has_event, 1) == 0; + /* return __sync_lock_test_and_set (&f->has_event, 1) == 0; + return __sync_bool_compare_and_swap (&f->has_event, 0, 1); */ + return !__atomic_exchange_n (&f->has_event, 1, __ATOMIC_RELEASE); } /** * Unsets fifo event flag. + * + * Also acts as a release barrier. */ always_inline void svm_fifo_unset_event (svm_fifo_t * f) { - /* Probably doesn't need to be atomic. Still, better avoid surprises */ __sync_lock_release (&f->has_event); } +static inline void +svm_fifo_set_want_tx_evt (svm_fifo_t * f, u8 want_evt) +{ + f->want_tx_evt = want_evt; +} + +static inline u8 +svm_fifo_want_tx_evt (svm_fifo_t * f) +{ + return f->want_tx_evt; +} + svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); void svm_fifo_free (svm_fifo_t * f); int svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, - u8 * copy_from_here); + const u8 * copy_from_here); int svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 required_bytes, u8 * copy_from_here); int svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here); int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here); int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes); +void svm_fifo_dequeue_drop_all (svm_fifo_t * f); +int svm_fifo_segments (svm_fifo_t * f, svm_fifo_segment_t * fs); +void svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs); u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); void svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer); +void svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len); format_function_t format_svm_fifo; @@ -165,6 +217,49 @@ svm_fifo_newest_ooo_segment_reset (svm_fifo_t * f) f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; } +/** + * Max contiguous chunk of data that can be read + */ +always_inline u32 +svm_fifo_max_read_chunk (svm_fifo_t * f) +{ + return ((f->tail > f->head) ? (f->tail - f->head) : (f->nitems - f->head)); +} + +/** + * Max contiguous chunk of data that can be written + */ +always_inline u32 +svm_fifo_max_write_chunk (svm_fifo_t * f) +{ + return ((f->tail >= f->head) ? (f->nitems - f->tail) : (f->head - f->tail)); +} + +/** + * Advance tail pointer + * + * Useful for moving tail pointer after external enqueue. + */ +always_inline void +svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 bytes) +{ + ASSERT (bytes <= svm_fifo_max_enqueue (f)); + f->tail = (f->tail + bytes) % f->nitems; + f->cursize += bytes; +} + +always_inline u8 * +svm_fifo_head (svm_fifo_t * f) +{ + return (f->data + f->head); +} + +always_inline u8 * +svm_fifo_tail (svm_fifo_t * f) +{ + return (f->data + f->tail); +} + always_inline u32 ooo_segment_distance_from_tail (svm_fifo_t * f, u32 pos) {