X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Fsvm_fifo.c;h=6b4ea68ca17290b58670d4c38a83ca6fa74012b9;hb=f2922422d972644e67d1ca989e40cd0100ecb06d;hp=d56387772909e0851098e8268331009f2745fbda;hpb=29a59c3ae18573043d9f9baa2796ab0b841bf6aa;p=vpp.git diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index d5638777290..6b4ea68ca17 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -135,7 +135,7 @@ svm_fifo_free_ooo_data (svm_fifo_t * f) } static inline ooo_segment_t * -ooo_segment_get_prev (svm_fifo_t * f, ooo_segment_t * s) +ooo_segment_prev (svm_fifo_t * f, ooo_segment_t * s) { if (s->prev == OOO_SEGMENT_INVALID_INDEX) return 0; @@ -222,7 +222,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length) s = pool_elt_at_index (f->ooo_segments, s->next); /* If we have a previous and we overlap it, use it as starting point */ - prev = ooo_segment_get_prev (f, s); + prev = ooo_segment_prev (f, s); if (prev && position_leq (f, offset_pos, ooo_segment_end_pos (f, prev), tail)) { @@ -374,6 +374,20 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail) return bytes; } +static ooo_segment_t * +ooo_segment_last (svm_fifo_t * f) +{ + ooo_segment_t *s; + + if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX) + return 0; + + s = svm_fifo_first_ooo_segment (f); + while (s->next != OOO_SEGMENT_INVALID_INDEX) + s = pool_elt_at_index (f->ooo_segments, s->next); + return s; +} + void svm_fifo_init (svm_fifo_t * f, u32 size) { @@ -386,9 +400,7 @@ svm_fifo_init (svm_fifo_t * f, u32 size) f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; f->segment_index = SVM_FIFO_INVALID_INDEX; f->refcnt = 1; - f->default_chunk.start_byte = 0; - f->default_chunk.length = f->size; - f->default_chunk.next = f->start_chunk = f->end_chunk = &f->default_chunk; + f->flags = 0; f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = f->start_chunk; } @@ -398,17 +410,31 @@ svm_fifo_init (svm_fifo_t * f, u32 size) svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) { - svm_fifo_t *f; u32 rounded_data_size; + svm_fifo_chunk_t *c; + svm_fifo_t *f; - /* always round fifo data size to the next highest power-of-two */ - rounded_data_size = (1 << (max_log2 (data_size_in_bytes))); - f = clib_mem_alloc_aligned_or_null (sizeof (*f) + rounded_data_size, - CLIB_CACHE_LINE_BYTES); + f = clib_mem_alloc_aligned_or_null (sizeof (*f), CLIB_CACHE_LINE_BYTES); if (f == 0) return 0; clib_memset (f, 0, sizeof (*f)); + + /* always round fifo data size to the next highest power-of-two */ + rounded_data_size = (1 << (max_log2 (data_size_in_bytes))); + c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size, + CLIB_CACHE_LINE_BYTES); + if (!c) + { + clib_mem_free (f); + return 0; + } + + c->next = c; + c->start_byte = 0; + c->length = data_size_in_bytes; + f->start_chunk = f->end_chunk = c; + svm_fifo_init (f, data_size_in_bytes); return f; } @@ -434,8 +460,63 @@ svm_fifo_chunk_alloc (u32 size) return c; } +static inline u8 +svm_fifo_chunk_includes_pos (svm_fifo_chunk_t * c, u32 pos) +{ + return (pos >= c->start_byte && pos < c->start_byte + c->length); +} + +/** + * Find chunk for given byte position + * + * @param f fifo + * @param pos normalized position in fifo + * + * @return chunk that includes given position or 0 + */ +static svm_fifo_chunk_t * +svm_fifo_find_chunk (svm_fifo_t * f, u32 pos) +{ + rb_tree_t *rt = &f->chunk_lookup; + rb_node_t *cur, *prev; + svm_fifo_chunk_t *c; + + cur = rb_node (rt, rt->root); + while (pos != cur->key) + { + prev = cur; + if (pos < cur->key) + cur = rb_node_left (rt, cur); + else + cur = rb_node_right (rt, cur); + + if (rb_node_is_tnil (rt, cur)) + { + /* Hit tnil as a left child. Find predecessor */ + if (pos < prev->key) + { + cur = rb_tree_predecessor (rt, prev); + c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); + if (svm_fifo_chunk_includes_pos (c, pos)) + return c; + return 0; + } + /* Hit tnil as a right child. Check if this is the one */ + c = uword_to_pointer (prev->opaque, svm_fifo_chunk_t *); + if (svm_fifo_chunk_includes_pos (c, pos)) + return c; + + return 0; + } + } + + if (!rb_node_is_tnil (rt, cur)) + return uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); + return 0; +} + static inline void -svm_fifo_size_update (svm_fifo_t * f, svm_fifo_chunk_t * c) +svm_fifo_grow (svm_fifo_t * f, svm_fifo_chunk_t * c) { svm_fifo_chunk_t *prev; u32 add_bytes = 0; @@ -458,13 +539,13 @@ svm_fifo_size_update (svm_fifo_t * f, svm_fifo_chunk_t * c) } static void -svm_fifo_try_size_update (svm_fifo_t * f, u32 new_head) +svm_fifo_try_grow (svm_fifo_t * f, u32 new_head) { if (new_head > f->tail) return; - svm_fifo_size_update (f, f->new_chunks); - f->flags &= ~SVM_FIFO_F_SIZE_UPDATE; + svm_fifo_grow (f, f->new_chunks); + f->flags &= ~SVM_FIFO_F_GROW; } void @@ -472,7 +553,8 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) { svm_fifo_chunk_t *cur, *prev; - /* Initialize rbtree if needed and add default chunk to it */ + /* Initialize rbtree if needed and add default chunk to it. Expectation is + * that this is called with the heap where the rbtree's pool is pushed. */ if (!(f->flags & SVM_FIFO_F_MULTI_CHUNK)) { rb_tree_init (&f->chunk_lookup); @@ -480,8 +562,7 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) f->flags |= SVM_FIFO_F_MULTI_CHUNK; } - /* Initialize chunks and add to lookup rbtree. Expectation is that this is - * called with the heap where the rbtree's pool is pushed. */ + /* Initialize chunks and add to lookup rbtree */ cur = c; if (f->new_chunks) { @@ -506,7 +587,7 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) if (!svm_fifo_is_wrapped (f)) { ASSERT (!f->new_chunks); - svm_fifo_size_update (f, c); + svm_fifo_grow (f, c); return; } @@ -514,63 +595,132 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) if (!f->new_chunks) { f->new_chunks = c; - f->flags |= SVM_FIFO_F_SIZE_UPDATE; + f->flags |= SVM_FIFO_F_GROW; } } -static inline u8 -svm_fifo_chunk_includes_pos (svm_fifo_chunk_t * c, u32 pos) +/** + * Removes chunks that are after fifo end byte + */ +svm_fifo_chunk_t * +svm_fifo_collect_chunks (svm_fifo_t * f) { - return (pos >= c->start_byte && pos < c->start_byte + c->length); + svm_fifo_chunk_t *list, *cur; + + f->flags &= ~SVM_FIFO_F_COLLECT_CHUNKS; + + list = f->new_chunks; + f->new_chunks = 0; + cur = list; + while (cur) + { + rb_tree_del (&f->chunk_lookup, cur->start_byte); + cur = cur->next; + } + + return list; +} + +void +svm_fifo_try_shrink (svm_fifo_t * f, u32 head, u32 tail) +{ + u32 len_to_shrink = 0, tail_pos, len; + svm_fifo_chunk_t *cur, *prev, *next, *start; + + tail_pos = tail; + if (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX) + { + ooo_segment_t *last = ooo_segment_last (f); + tail_pos = ooo_segment_end_pos (f, last); + } + + if (f->size_decrement) + { + /* Figure out available free space considering that there may be + * ooo segments */ + len = clib_min (f->size_decrement, f_free_count (f, head, tail_pos)); + f->nitems -= len; + f->size_decrement -= len; + } + + /* Remove tail chunks if the following hold: + * - not wrapped + * - last used byte less than start of last chunk + */ + if (tail_pos >= head && tail_pos <= f->end_chunk->start_byte) + { + /* Lookup the last position not to be removed. Since size still needs + * to be nitems + 1, nitems must fall within the usable space */ + tail_pos = tail_pos > 0 ? tail_pos - 1 : tail_pos; + prev = svm_fifo_find_chunk (f, clib_max (f->nitems, tail_pos)); + next = prev->next; + while (next != f->start_chunk) + { + cur = next; + next = cur->next; + len_to_shrink += cur->length; + } + if (len_to_shrink) + { + f->size -= len_to_shrink; + start = prev->next; + prev->next = f->start_chunk; + f->end_chunk = prev; + cur->next = f->new_chunks; + f->new_chunks = start; + } + } + + if (!f->size_decrement && f->size == f->nitems + 1) + { + f->flags &= ~SVM_FIFO_F_SHRINK; + f->flags |= SVM_FIFO_F_COLLECT_CHUNKS; + if (f->start_chunk == f->start_chunk->next) + f->flags &= ~SVM_FIFO_F_MULTI_CHUNK; + } } /** - * Find chunk for given byte position - * - * @param f fifo - * @param pos normalized position in fifo - * - * @return chunk that includes given position or 0 + * Request to reduce fifo size by amount of bytes */ -static svm_fifo_chunk_t * -svm_fifo_find_chunk (svm_fifo_t * f, u32 pos) +int +svm_fifo_reduce_size (svm_fifo_t * f, u32 len, u8 try_shrink) { - rb_tree_t *rt = &f->chunk_lookup; - rb_node_t *cur, *prev; - svm_fifo_chunk_t *c; + svm_fifo_chunk_t *cur; + u32 actual_len = 0; - cur = rb_node (rt, rt->root); - while (pos != cur->key) + /* Abort if trying to reduce by more than fifo size or if + * fifo is undergoing resizing already */ + if (len >= f->size || f->size > f->nitems + 1 + || (f->flags & SVM_FIFO_F_SHRINK) || (f->flags & SVM_FIFO_F_GROW)) + return 0; + + /* last chunk that will not be removed */ + cur = svm_fifo_find_chunk (f, f->nitems - len); + + /* sum length of chunks that will be removed */ + cur = cur->next; + while (cur != f->start_chunk) { - prev = cur; - if (pos < cur->key) - cur = rb_node_left (rt, cur); - else - cur = rb_node_right (rt, cur); + actual_len += cur->length; + cur = cur->next; + } - if (rb_node_is_tnil (rt, cur)) - { - /* Hit tnil as a left child. Find predecessor */ - if (pos < prev->key) - { - cur = rb_tree_predecessor (rt, prev); - c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); - if (svm_fifo_chunk_includes_pos (c, pos)) - return c; - return 0; - } - /* Hit tnil as a right child. Check if this is the one */ - c = uword_to_pointer (prev->opaque, svm_fifo_chunk_t *); - if (svm_fifo_chunk_includes_pos (c, pos)) - return c; + ASSERT (actual_len <= len); + if (!actual_len) + return 0; - return 0; - } + f->size_decrement = actual_len; + f->flags |= SVM_FIFO_F_SHRINK; + + if (try_shrink) + { + u32 head, tail; + f_load_head_tail_prod (f, &head, &tail); + svm_fifo_try_shrink (f, head, tail); } - if (!rb_node_is_tnil (rt, cur)) - return uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); - return 0; + return actual_len; } void @@ -660,6 +810,9 @@ svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 len, u8 * src) f_load_head_tail_prod (f, &head, &tail); + if (PREDICT_FALSE (f->flags & SVM_FIFO_F_SHRINK)) + svm_fifo_try_shrink (f, head, tail); + /* free space in fifo can only increase during enqueue: SPSC */ free_count = f_free_count (f, head, tail); @@ -680,6 +833,22 @@ svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 len, u8 * src) return 0; } +/** + * Advance tail + */ +void +svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len) +{ + u32 tail; + + ASSERT (len <= svm_fifo_max_enqueue_prod (f)); + /* load-relaxed: producer owned index */ + tail = f->tail; + tail = (tail + len) % f->size; + /* store-rel: producer owned index (paired with load-acq in consumer) */ + clib_atomic_store_rel_n (&f->tail, tail); +} + int svm_fifo_dequeue (svm_fifo_t * f, u32 len, u8 * dst) { @@ -697,8 +866,8 @@ svm_fifo_dequeue (svm_fifo_t * f, u32 len, u8 * dst) svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk); head = (head + len) % f->size; - if (PREDICT_FALSE (f->flags & SVM_FIFO_F_SIZE_UPDATE)) - svm_fifo_try_size_update (f, head); + if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) + svm_fifo_try_grow (f, head); /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, head);