X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Fsvm_fifo.c;h=975a82026f7bb008051c4b51a389867ee7370c2f;hb=4339c36157c0579c60963cea4bafd3ce2521d207;hp=3d538293c70cad9f1afe2cb36381321da54a5da8;hpb=8eb3e07dea2c905ab46e0236bca9750224484ee8;p=vpp.git diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 3d538293c70..975a82026f7 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -400,10 +400,35 @@ svm_fifo_init (svm_fifo_t * f, u32 size) f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; f->segment_index = SVM_FIFO_INVALID_INDEX; f->refcnt = 1; - f->flags = 0; + f->head = f->tail = f->flags = 0; f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = f->start_chunk; } +void +svm_fifo_init_chunks (svm_fifo_t * f) +{ + svm_fifo_chunk_t *c, *prev; + + if (f->start_chunk->next == f->start_chunk) + return; + + f->flags |= SVM_FIFO_F_MULTI_CHUNK; + rb_tree_init (&f->chunk_lookup); + rb_tree_add2 (&f->chunk_lookup, 0, pointer_to_uword (f->start_chunk)); + + f->start_chunk->start_byte = 0; + prev = f->start_chunk; + c = prev->next; + + while (c != f->start_chunk) + { + c->start_byte = prev->start_byte + prev->length; + rb_tree_add2 (&f->chunk_lookup, c->start_byte, pointer_to_uword (c)); + prev = c; + c = c->next; + } +} + /** * Creates a fifo in the current heap. Fails vs blow up the process */ @@ -559,11 +584,101 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) * that this is called with the heap where the rbtree's pool is pushed. */ if (!(f->flags & SVM_FIFO_F_MULTI_CHUNK)) { + ASSERT (f->start_chunk->next == f->start_chunk); rb_tree_init (&f->chunk_lookup); rb_tree_add2 (&f->chunk_lookup, 0, pointer_to_uword (f->start_chunk)); f->flags |= SVM_FIFO_F_MULTI_CHUNK; } + /* If fifo is not wrapped, update the size now */ + if (!svm_fifo_is_wrapped (f)) + { + /* Initialize chunks and add to lookup rbtree */ + cur = c; + if (f->new_chunks) + { + prev = f->new_chunks; + while (prev->next) + prev = prev->next; + prev->next = c; + } + else + prev = f->end_chunk; + + while (cur) + { + cur->start_byte = prev->start_byte + prev->length; + rb_tree_add2 (&f->chunk_lookup, cur->start_byte, + pointer_to_uword (cur)); + prev = cur; + cur = cur->next; + } + + ASSERT (!f->new_chunks); + svm_fifo_grow (f, c); + return; + } + + /* Wrapped */ + if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED) + { + ASSERT (f->master_thread_index == os_get_thread_index ()); + + if (!f->new_chunks && f->head_chunk != f->tail_chunk) + { + u32 head = 0, tail = 0; + f_load_head_tail_cons (f, &head, &tail); + + svm_fifo_chunk_t *tmp = f->tail_chunk->next; + + prev = f->tail_chunk; + u32 add_bytes = 0; + cur = prev->next; + while (cur != f->start_chunk) + { + /* remove any existing rb_tree entry */ + rb_tree_del (&f->chunk_lookup, cur->start_byte); + cur = cur->next; + } + + /* insert new chunk after the tail_chunk */ + f->tail_chunk->next = c; + while (c) + { + add_bytes += c->length; + c->start_byte = prev->start_byte + prev->length; + rb_tree_add2 (&f->chunk_lookup, c->start_byte, + pointer_to_uword (c)); + + prev = c; + c = c->next; + } + prev->next = tmp; + + /* shift existing chunks along */ + cur = tmp; + while (cur != f->start_chunk) + { + cur->start_byte = prev->start_byte + prev->length; + rb_tree_add2 (&f->chunk_lookup, cur->start_byte, + pointer_to_uword (cur)); + prev = cur; + cur = cur->next; + } + + f->size += add_bytes; + f->nitems = f->size - 1; + f->new_chunks = 0; + head += add_bytes; + + clib_atomic_store_rel_n (&f->head, head); + ASSERT (svm_fifo_is_sane (f)); + + return; + } + } + + /* Wrapped, and optimization of single-thread-owned fifo cannot be applied */ /* Initialize chunks and add to lookup rbtree */ cur = c; if (f->new_chunks) @@ -585,14 +700,6 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) cur = cur->next; } - /* If fifo is not wrapped, update the size now */ - if (!svm_fifo_is_wrapped (f)) - { - ASSERT (!f->new_chunks); - svm_fifo_grow (f, c); - return; - } - /* Postpone size update */ if (!f->new_chunks) { @@ -626,7 +733,7 @@ svm_fifo_collect_chunks (svm_fifo_t * f) void svm_fifo_try_shrink (svm_fifo_t * f, u32 head, u32 tail) { - u32 len_to_shrink = 0, tail_pos, len; + u32 len_to_shrink = 0, tail_pos, len, last_pos; svm_fifo_chunk_t *cur, *prev, *next, *start; tail_pos = tail; @@ -649,13 +756,24 @@ svm_fifo_try_shrink (svm_fifo_t * f, u32 head, u32 tail) * - not wrapped * - last used byte less than start of last chunk */ - if (tail_pos >= head && tail_pos <= f->end_chunk->start_byte) + if (tail_pos >= head && tail_pos < f->end_chunk->start_byte) { /* Lookup the last position not to be removed. Since size still needs - * to be nitems + 1, nitems must fall within the usable space */ - tail_pos = tail_pos > 0 ? tail_pos - 1 : tail_pos; - prev = svm_fifo_find_chunk (f, clib_max (f->nitems, tail_pos)); + * to be nitems + 1, nitems must fall within the usable space. Also, + * first segment is not removable, so tail_pos can be 0. */ + last_pos = tail_pos > 0 ? tail_pos - 1 : tail_pos; + prev = svm_fifo_find_chunk (f, clib_max (f->nitems, last_pos)); next = prev->next; + /* If tail_pos is first position in next, skip the chunk, otherwise, + * we must update the tail and, if fifo size is 0, even the head. + * We should not invalidate the tail for the caller and must not change + * consumer owned variables from code that's typically called by the + * producer */ + if (next->start_byte == tail_pos) + { + prev = next; + next = next->next; + } while (next != f->start_chunk) { cur = next; @@ -790,7 +908,11 @@ svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src) /* collect out-of-order segments */ if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX)) - len += ooo_segment_try_collect (f, len, &tail); + { + len += ooo_segment_try_collect (f, len, &tail); + if (!svm_fifo_chunk_includes_pos (f->tail_chunk, tail)) + f->tail_chunk = svm_fifo_find_chunk (f, tail); + } /* store-rel: producer owned index (paired with load-acq in consumer) */ clib_atomic_store_rel_n (&f->tail, tail); @@ -847,6 +969,10 @@ svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len) /* load-relaxed: producer owned index */ tail = f->tail; tail = (tail + len) % f->size; + + if (!svm_fifo_chunk_includes_pos (f->tail_chunk, tail)) + f->tail_chunk = svm_fifo_find_chunk (f, tail); + /* store-rel: producer owned index (paired with load-acq in consumer) */ clib_atomic_store_rel_n (&f->tail, tail); } @@ -919,6 +1045,12 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 len) /* move head */ head = (head + total_drop_bytes) % f->size; + if (!svm_fifo_chunk_includes_pos (f->head_chunk, head)) + f->head_chunk = svm_fifo_find_chunk (f, head); + + if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) + svm_fifo_try_grow (f, head); + /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, head); @@ -930,6 +1062,13 @@ svm_fifo_dequeue_drop_all (svm_fifo_t * f) { /* consumer foreign index */ u32 tail = clib_atomic_load_acq_n (&f->tail); + + if (!svm_fifo_chunk_includes_pos (f->head_chunk, tail)) + f->head_chunk = svm_fifo_find_chunk (f, tail); + + if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) + svm_fifo_try_grow (f, tail); + /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, tail); } @@ -1055,6 +1194,64 @@ svm_fifo_del_subscriber (svm_fifo_t * f, u8 subscriber) } } +u8 +svm_fifo_is_sane (svm_fifo_t * f) +{ + if (f->size - 1 != f->nitems && !(f->flags & SVM_FIFO_F_SHRINK)) + return 0; + if (!svm_fifo_chunk_includes_pos (f->head_chunk, f->head)) + return 0; + if (!svm_fifo_chunk_includes_pos (f->tail_chunk, f->tail)) + return 0; + + if (f->start_chunk->next != f->start_chunk) + { + svm_fifo_chunk_t *c, *prev = 0, *tmp; + u32 size = 0; + + if (!(f->flags & SVM_FIFO_F_MULTI_CHUNK)) + return 0; + + c = f->start_chunk; + do + { + tmp = svm_fifo_find_chunk (f, c->start_byte); + if (tmp != c) + return 0; + if (prev && (prev->start_byte + prev->length != c->start_byte)) + return 0; + size += c->length; + prev = c; + c = c->next; + } + while (c != f->start_chunk); + + if (size != f->size) + return 0; + } + + return 1; +} + +u8 +svm_fifo_set_single_thread_owned (svm_fifo_t * f) +{ + if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED) + { + if (f->master_thread_index == os_get_thread_index ()) + { + /* just a duplicate call */ + return 0; + } + + /* already owned by another thread */ + return 1; + } + + f->flags |= SVM_FIFO_F_SINGLE_THREAD_OWNED; + return 0; +} + u8 * format_ooo_segment (u8 * s, va_list * args) {