X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Fsvm_fifo.h;h=4239e9d78ea10aa4ee2d26d16a89f46d3807b971;hb=c95cfa218;hp=390e1170e27c18c70cb790ea7fb862249a226473;hpb=29a59c3ae18573043d9f9baa2796ab0b841bf6aa;p=vpp.git diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 390e1170e27..4239e9d78ea 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -23,104 +23,30 @@ #include #include #include -#include +#include -/** Out-of-order segment */ -typedef struct -{ - u32 next; /**< Next linked-list element pool index */ - u32 prev; /**< Previous linked-list element pool index */ - u32 start; /**< Start of segment, normalized*/ - u32 length; /**< Length of segment */ -} ooo_segment_t; - -#define SVM_FIFO_TRACE (0) #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) #define SVM_FIFO_INVALID_SESSION_INDEX ((u32)~0) #define SVM_FIFO_INVALID_INDEX ((u32)~0) -#define SVM_FIFO_MAX_EVT_SUBSCRIBERS 7 -typedef enum svm_fifo_tx_ntf_ +typedef enum svm_fifo_deq_ntf_ { - SVM_FIFO_NO_TX_NOTIF = 0, - SVM_FIFO_WANT_TX_NOTIF = 1, - SVM_FIFO_WANT_TX_NOTIF_IF_FULL = 2, -} svm_fifo_tx_ntf_t; - -typedef struct -{ - u32 offset; - u32 len; - u32 action; -} svm_fifo_trace_elem_t; - -typedef struct svm_fifo_chunk_ -{ - u32 start_byte; /**< chunk start byte */ - u32 length; /**< length of chunk in bytes */ - struct svm_fifo_chunk_ *next; /**< pointer to next chunk in linked-lists */ - u8 data[0]; /**< start of chunk data */ -} svm_fifo_chunk_t; + SVM_FIFO_NO_DEQ_NOTIF = 0, /**< No notification requested */ + SVM_FIFO_WANT_DEQ_NOTIF = 1, /**< Notify on dequeue */ + SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL = 2, /**< Notify on transition from full */ + SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY = 4, /**< Notify on transition to empty */ +} svm_fifo_deq_ntf_t; typedef enum svm_fifo_flag_ { - SVM_FIFO_F_SIZE_UPDATE = 1 << 0, - SVM_FIFO_F_MULTI_CHUNK = 1 << 1, - SVM_FIFO_F_LL_TRACKED = 1 << 2, + SVM_FIFO_F_LL_TRACKED = 1 << 0, } svm_fifo_flag_t; -typedef struct _svm_fifo -{ - CLIB_CACHE_LINE_ALIGN_MARK (shared_first); - u32 size; /**< size of the fifo in bytes */ - u32 nitems; /**< usable size (size-1) */ - u8 flags; /**< fifo flags */ - svm_fifo_chunk_t *start_chunk;/**< first chunk in fifo chunk list */ - svm_fifo_chunk_t *end_chunk; /**< end chunk in fifo chunk list */ - svm_fifo_chunk_t *new_chunks; /**< chunks yet to be added to list */ - rb_tree_t chunk_lookup; - - CLIB_CACHE_LINE_ALIGN_MARK (shared_second); - volatile u32 has_event; /**< non-zero if deq event exists */ - u32 master_session_index; /**< session layer session index */ - u32 client_session_index; /**< app session index */ - u8 master_thread_index; /**< session layer thread index */ - u8 client_thread_index; /**< app worker index */ - u32 segment_manager; /**< session layer segment manager index */ - u32 segment_index; /**< segment index in segment manager */ - u32 freelist_index; /**< aka log2(allocated_size) - const. */ - i8 refcnt; /**< reference count */ - struct _svm_fifo *next; /**< next in freelist/active chain */ - struct _svm_fifo *prev; /**< prev in active chain */ - - CLIB_CACHE_LINE_ALIGN_MARK (consumer); - u32 head; /**< fifo head position/byte */ - svm_fifo_chunk_t *head_chunk; /**< tracks chunk where head lands */ - svm_fifo_chunk_t *ooo_deq; /**< last chunk used for ooo dequeue */ - volatile u32 want_tx_ntf; /**< producer wants nudge */ - volatile u32 has_tx_ntf; - - CLIB_CACHE_LINE_ALIGN_MARK (producer); - u32 tail; /**< fifo tail position/byte */ - u32 ooos_list_head; /**< Head of out-of-order linked-list */ - svm_fifo_chunk_t *tail_chunk; /**< tracks chunk where tail lands */ - svm_fifo_chunk_t *ooo_enq; /**< last chunk used for ooo enqueue */ - ooo_segment_t *ooo_segments; /**< Pool of ooo segments */ - u32 ooos_newest; /**< Last segment to have been updated */ - volatile u8 n_subscribers; /**< Number of subscribers for io events */ - u8 subscribers[SVM_FIFO_MAX_EVT_SUBSCRIBERS]; - -#if SVM_FIFO_TRACE - svm_fifo_trace_elem_t *trace; -#endif - - svm_fifo_chunk_t default_chunk; -} svm_fifo_t; - typedef enum { SVM_FIFO_EFULL = -2, SVM_FIFO_EEMPTY = -3, + SVM_FIFO_EGROW = -4, } svm_fifo_err_t; typedef struct svm_fifo_seg_ @@ -172,7 +98,8 @@ f_load_head_tail_prod (svm_fifo_t * f, u32 * head, u32 * tail) *head = clib_atomic_load_acq_n (&f->head); } -/* Load head and tail independent of producer/consumer role +/** + * Load head and tail independent of producer/consumer role * * Internal function. */ @@ -186,47 +113,62 @@ f_load_head_tail_all_acq (svm_fifo_t * f, u32 * head, u32 * tail) } /** - * Distance to a from b, i.e., a - b in the fifo + * Fifo current size, i.e., number of bytes enqueued * * Internal function. */ static inline u32 -f_distance_to (svm_fifo_t * f, u32 a, u32 b) +f_cursize (svm_fifo_t * f, u32 head, u32 tail) { - return ((f->size + a - b) % f->size); + return tail - head; } /** - * Distance from a to b, i.e., b - a in the fifo + * Fifo free bytes, i.e., number of free bytes * - * Internal function. + * Internal function */ static inline u32 -f_distance_from (svm_fifo_t * f, u32 a, u32 b) +f_free_count (svm_fifo_t * f, u32 head, u32 tail) { - return ((f->size + b - a) % f->size); + return (f->size - f_cursize (f, head, tail)); } -/** - * Fifo current size, i.e., number of bytes enqueued - * - * Internal function. - */ -static inline u32 -f_cursize (svm_fifo_t * f, u32 head, u32 tail) +always_inline u32 +f_chunk_end (svm_fifo_chunk_t * c) { - return (head <= tail ? tail - head : f->size + tail - head); + return c->start_byte + c->length; } -/** - * Fifo free bytes, i.e., number of free bytes - * - * Internal function - */ -static inline u32 -f_free_count (svm_fifo_t * f, u32 head, u32 tail) +always_inline int +f_pos_lt (u32 a, u32 b) { - return (f->nitems - f_cursize (f, head, tail)); + return ((i32) (a - b) < 0); +} + +always_inline int +f_pos_leq (u32 a, u32 b) +{ + return ((i32) (a - b) <= 0); +} + +always_inline int +f_pos_gt (u32 a, u32 b) +{ + return ((i32) (a - b) > 0); +} + +always_inline int +f_pos_geq (u32 a, u32 b) +{ + return ((i32) (a - b) >= 0); +} + +always_inline u8 +f_chunk_includes_pos (svm_fifo_chunk_t * c, u32 pos) +{ + return (f_pos_geq (pos, c->start_byte) + && f_pos_lt (pos, c->start_byte + c->length)); } /** @@ -238,10 +180,11 @@ f_free_count (svm_fifo_t * f, u32 head, u32 tail) * rounded to the next highest power-of-two value. * @return pointer to new fifo */ -svm_fifo_t *svm_fifo_create (u32 size); +svm_fifo_t *svm_fifo_alloc (u32 size); /** * Initialize fifo * + * @param f fifo * @param size size for fifo */ void svm_fifo_init (svm_fifo_t * f, u32 size); @@ -257,15 +200,20 @@ void svm_fifo_init (svm_fifo_t * f, u32 size); */ svm_fifo_chunk_t *svm_fifo_chunk_alloc (u32 size); /** - * Grow fifo size by adding chunk to chunk list + * Ensure the whole fifo size is writeable * - * If fifos are allocated on a segment, this should be called with - * the segment's heap pushed. + * Allocates enough chunks to cover the whole fifo size. * - * @param f fifo to be extended - * @param c chunk or linked list of chunks to be added + * @param f fifo */ -void svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c); +int svm_fifo_fill_chunk_list (svm_fifo_t * f); +/** + * Initialize rbtrees used for ooo lookups + * + * @param f fifo + * @param ooo_type type of ooo operation (0 enqueue, 1 dequeue) + */ +void svm_fifo_init_ooo_lookup (svm_fifo_t * f, u8 ooo_type); /** * Free fifo and associated state * @@ -333,11 +281,32 @@ int svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src); */ int svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 len, u8 * src); + +/** + * Advance tail pointer + * + * Useful for moving tail pointer after external enqueue. + * + * @param f fifo + * @param len number of bytes to add to tail + */ +void svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len); +/** + * Enqueue array of @ref svm_fifo_seg_t in order + * + * @param f fifo + * @param segs array of segments to enqueue + * @param n_segs number of segments + * @param allow_partial if set partial enqueues are allowed + * @return len if enqueue was successful, error otherwise + */ +int svm_fifo_enqueue_segments (svm_fifo_t * f, const svm_fifo_seg_t segs[], + u32 n_segs, u8 allow_partial); /** * Overwrite fifo head with new data * * This should be typically used by dgram transport protocols that need - * to update the dgram header after dequeueing a chunk of data. It assumes + * to update the dgram header after dequeuing a chunk of data. It assumes * that the dgram header is at most spread over two chunks. * * @param f fifo @@ -349,7 +318,9 @@ void svm_fifo_overwrite_head (svm_fifo_t * f, u8 * src, u32 len); * Dequeue data from fifo * * Data is dequeued to consumer provided buffer and head is atomically - * updated. + * updated. This should not be used in combination with ooo lookups. If + * ooo peeking of data is needed in combination with dequeuing use @ref + * svm_fifo_dequeue_drop. * * @param f fifo * @param len length of data to dequeue @@ -388,8 +359,23 @@ int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 len); * @param f fifo */ void svm_fifo_dequeue_drop_all (svm_fifo_t * f); -int svm_fifo_segments (svm_fifo_t * f, svm_fifo_seg_t * fs); -void svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_seg_t * fs); +/** + * Get pointers to fifo chunks data in @ref svm_fifo_seg_t array + * + * Populates fifo segment array with pointers to fifo chunk data and lengths. + * Because this returns pointers to data, it must be paired with + * @ref svm_fifo_dequeue_drop to actually release the fifo chunks after the + * data is consumed. + * + * @param f fifo + * @param offset offset from where to retrieve segments + * @param fs array of fifo segments allocated by caller + * @param n_segs number of fifo segments in array + * @param max_bytes max bytes to be mapped to fifo segments + * @return number of bytes in fifo segments or SVM_FIFO_EEMPTY + */ +int svm_fifo_segments (svm_fifo_t * f, u32 offset, svm_fifo_seg_t * fs, + u32 n_segs, u32 max_bytes); /** * Add io events subscriber to list * @@ -411,13 +397,27 @@ void svm_fifo_del_subscriber (svm_fifo_t * f, u8 subscriber); * @return number of out of order segments */ u32 svm_fifo_n_ooo_segments (svm_fifo_t * f); -/* +/** * First out-of-order segment for fifo * * @param f fifo * @return first out-of-order segment for fifo */ ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); +/** + * Check if fifo is sane. Debug only. + * + * @param f fifo + * @return 1 if sane, 0 otherwise + */ +u8 svm_fifo_is_sane (svm_fifo_t * f); +/** + * Number of chunks linked into the fifo + * + * @param f fifo + * @return number of chunks in fifo linked list + */ +u32 svm_fifo_n_chunks (svm_fifo_t * f); format_function_t format_svm_fifo; /** @@ -472,7 +472,7 @@ svm_fifo_max_dequeue (svm_fifo_t * f) static inline int svm_fifo_is_full_prod (svm_fifo_t * f) { - return (svm_fifo_max_dequeue_prod (f) == f->nitems); + return (svm_fifo_max_dequeue_prod (f) == f->size); } /* Check if fifo is full. @@ -484,7 +484,7 @@ svm_fifo_is_full_prod (svm_fifo_t * f) static inline int svm_fifo_is_full (svm_fifo_t * f) { - return (svm_fifo_max_dequeue (f) == f->nitems); + return (svm_fifo_max_dequeue (f) == f->size); } /** @@ -569,53 +569,64 @@ svm_fifo_max_enqueue (svm_fifo_t * f) } /** - * Max contiguous chunk of data that can be read + * Max contiguous chunk of data that can be read. + * + * Should only be called by consumers. */ -static inline u32 -svm_fifo_max_read_chunk (svm_fifo_t * f) -{ - u32 head, tail; - f_load_head_tail_cons (f, &head, &tail); - return tail >= head ? (tail - head) : (f->size - head); -} +u32 svm_fifo_max_read_chunk (svm_fifo_t * f); /** * Max contiguous chunk of data that can be written + * + * Should only be called by producers */ -static inline u32 -svm_fifo_max_write_chunk (svm_fifo_t * f) -{ - u32 head, tail; - f_load_head_tail_prod (f, &head, &tail); - return tail > head ? f->size - tail : f_free_count (f, head, tail); -} +u32 svm_fifo_max_write_chunk (svm_fifo_t * f); /** - * Advance tail pointer - * - * Useful for moving tail pointer after external enqueue. + * Fifo head chunk getter * - * @param f fifo - * @param len number of bytes to add to tail + * @param f fifo + * @return head chunk pointer */ -static inline void -svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len) +static inline svm_fifo_chunk_t * +svm_fifo_head_chunk (svm_fifo_t * f) { - ASSERT (len <= svm_fifo_max_enqueue_prod (f)); - /* load-relaxed: producer owned index */ - u32 tail = f->tail; - tail = (tail + len) % f->size; - /* store-rel: producer owned index (paired with load-acq in consumer) */ - clib_atomic_store_rel_n (&f->tail, tail); + return f->head_chunk; } +/** + * Fifo head pointer getter + * + * @param f fifo + * @return head pointer + */ static inline u8 * svm_fifo_head (svm_fifo_t * f) { + if (!f->head_chunk) + return 0; /* load-relaxed: consumer owned index */ return (f->head_chunk->data + (f->head - f->head_chunk->start_byte)); } +/** + * Fifo tail chunk getter + * + * @param f fifo + * @return tail chunk pointer + */ +static inline svm_fifo_chunk_t * +svm_fifo_tail_chunk (svm_fifo_t * f) +{ + return f->tail_chunk; +} + +/** + * Fifo tail pointer getter + * + * @param f fifo + * @return tail pointer + */ static inline u8 * svm_fifo_tail (svm_fifo_t * f) { @@ -623,6 +634,12 @@ svm_fifo_tail (svm_fifo_t * f) return (f->tail_chunk->data + (f->tail - f->tail_chunk->start_byte)); } +/** + * Fifo number of subscribers getter + * + * @param f fifo + * @return number of subscribers + */ static inline u8 svm_fifo_n_subscribers (svm_fifo_t * f) { @@ -662,7 +679,7 @@ ooo_segment_offset_prod (svm_fifo_t * f, ooo_segment_t * s) /* load-relaxed: producer owned index */ tail = f->tail; - return f_distance_to (f, s->start, tail); + return (s->start - tail); } static inline u32 @@ -671,6 +688,21 @@ ooo_segment_length (svm_fifo_t * f, ooo_segment_t * s) return s->length; } +static inline u32 +svm_fifo_size (svm_fifo_t * f) +{ + return f->size; +} + +static inline void +svm_fifo_set_size (svm_fifo_t * f, u32 size) +{ + if (size > (1 << f->fs_hdr->max_log2_chunk_size)) + return; + fsh_virtual_mem_update (f->fs_hdr, f->slice_index, (int) f->size - size); + f->size = size; +} + /** * Check if fifo has io event * @@ -711,95 +743,97 @@ svm_fifo_unset_event (svm_fifo_t * f) } /** - * Set specific want tx notification flag + * Set specific want notification flag * - * For list of flags see @ref svm_fifo_tx_ntf_t + * For list of flags see @ref svm_fifo_deq_ntf_t * * @param f fifo * @param ntf_type type of notification requested */ static inline void -svm_fifo_add_want_tx_ntf (svm_fifo_t * f, u8 ntf_type) +svm_fifo_add_want_deq_ntf (svm_fifo_t * f, u8 ntf_type) { - f->want_tx_ntf |= ntf_type; + f->want_deq_ntf |= ntf_type; } /** - * Clear specific want tx notification flag + * Clear specific want notification flag * - * For list of flags see @ref svm_fifo_tx_ntf_t + * For list of flags see @ref svm_fifo_ntf_t * * @param f fifo * @param ntf_type type of notification to be cleared */ static inline void -svm_fifo_del_want_tx_ntf (svm_fifo_t * f, u8 ntf_type) +svm_fifo_del_want_deq_ntf (svm_fifo_t * f, u8 ntf_type) { - f->want_tx_ntf &= ~ntf_type; + f->want_deq_ntf &= ~ntf_type; } /** - * Clear the want tx notification flag and set has tx notification + * Clear the want notification flag and set has notification * - * Should be used after enqueuing a tx event. This clears the - * SVM_FIFO_WANT_TX_NOTIF flag but it does not clear - * SVM_FIFO_WANT_TX_NOTIF_IF_FULL. If the latter was set, has_tx_ntf is - * set to avoid enqueueing tx events for for all dequeue operations until + * Should be used after enqueuing an event. This clears the + * SVM_FIFO_WANT_NOTIF flag but it does not clear + * SVM_FIFO_WANT_NOTIF_IF_FULL. If the latter was set, has_ntf is + * set to avoid enqueueing events for for all dequeue operations until * it is manually cleared. * * @param f fifo */ static inline void -svm_fifo_clear_tx_ntf (svm_fifo_t * f) +svm_fifo_clear_deq_ntf (svm_fifo_t * f) { - /* Set the flag if want_tx_notif_if_full was the only ntf requested */ - f->has_tx_ntf = f->want_tx_ntf == SVM_FIFO_WANT_TX_NOTIF_IF_FULL; - svm_fifo_del_want_tx_ntf (f, SVM_FIFO_WANT_TX_NOTIF); + /* Set the flag if want_notif_if_full was the only ntf requested */ + f->has_deq_ntf = f->want_deq_ntf == SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL; + svm_fifo_del_want_deq_ntf (f, SVM_FIFO_WANT_DEQ_NOTIF); } /** - * Clear has tx notification flag + * Clear has notification flag * - * The fifo generates only one event per SVM_FIFO_WANT_TX_NOTIF_IF_FULL - * request and sets has_tx_ntf. To received new events the flag must be + * The fifo generates only one event per SVM_FIFO_WANT_NOTIF_IF_FULL + * request and sets has_ntf. To received new events the flag must be * cleared using this function. * * @param f fifo */ static inline void -svm_fifo_reset_tx_ntf (svm_fifo_t * f) +svm_fifo_reset_has_deq_ntf (svm_fifo_t * f) { - f->has_tx_ntf = 0; + f->has_deq_ntf = 0; } /** - * Check if fifo needs tx notification + * Check if fifo needs dequeue notification * - * Determines based on tx notification request flags and state of the fifo if - * a tx io event should be generated. + * Determines based on notification request flags and state of the fifo if + * an event should be generated. * * @param f fifo * @param n_last_deq number of bytes last dequeued - * @return 1 if tx io event should be generated, 0 otherwise + * @return 1 if event should be generated, 0 otherwise */ static inline u8 -svm_fifo_needs_tx_ntf (svm_fifo_t * f, u32 n_last_deq) +svm_fifo_needs_deq_ntf (svm_fifo_t * f, u32 n_last_deq) { - u8 want_ntf = f->want_tx_ntf; + u8 want_ntf = f->want_deq_ntf; - if (PREDICT_TRUE (want_ntf == SVM_FIFO_NO_TX_NOTIF)) + if (PREDICT_TRUE (want_ntf == SVM_FIFO_NO_DEQ_NOTIF)) return 0; - else if (want_ntf & SVM_FIFO_WANT_TX_NOTIF) + else if (want_ntf & SVM_FIFO_WANT_DEQ_NOTIF) return 1; - else if (want_ntf & SVM_FIFO_WANT_TX_NOTIF_IF_FULL) + if (want_ntf & SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL) { u32 max_deq = svm_fifo_max_dequeue_cons (f); - u32 nitems = f->nitems; - if (!f->has_tx_ntf && max_deq < nitems - && max_deq + n_last_deq >= nitems) + u32 size = f->size; + if (!f->has_deq_ntf && max_deq < size && max_deq + n_last_deq >= size) + return 1; + } + if (want_ntf & SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY) + { + if (!f->has_deq_ntf && svm_fifo_is_empty (f)) return 1; - - return 0; } return 0; }