+ /* If fifo is not wrapped, update the size now */
+ if (!svm_fifo_is_wrapped (f))
+ {
+ /* Initialize chunks and add to lookup rbtree */
+ cur = c;
+ if (f->new_chunks)
+ {
+ prev = f->new_chunks;
+ while (prev->next)
+ prev = prev->next;
+ prev->next = c;
+ }
+ else
+ prev = f->end_chunk;
+
+ while (cur)
+ {
+ cur->start_byte = prev->start_byte + prev->length;
+ cur->rb_index = RBTREE_TNIL_INDEX;
+ prev = cur;
+ cur = cur->next;
+ }
+
+ ASSERT (!f->new_chunks);
+ svm_fifo_grow (f, c);
+ return;
+ }
+
+ /* Wrapped */
+ if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED)
+ {
+ ASSERT (f->master_thread_index == os_get_thread_index ());
+
+ if (!f->new_chunks && f->head_chunk != f->tail_chunk)
+ {
+ u32 head = 0, tail = 0;
+ f_load_head_tail_cons (f, &head, &tail);
+
+ svm_fifo_chunk_t *tmp = f->tail_chunk->next;
+
+ prev = f->tail_chunk;
+ u32 add_bytes = 0;
+ cur = prev->next;
+ while (cur != f->start_chunk)
+ {
+ /* remove any existing rb_tree entry */
+ if (cur->rb_index != RBTREE_TNIL_INDEX)
+ {
+ rb_tree_del (&f->ooo_enq_lookup, cur->start_byte);
+ rb_tree_del (&f->ooo_deq_lookup, cur->start_byte);
+ }
+ cur->rb_index = RBTREE_TNIL_INDEX;
+ cur = cur->next;
+ }
+
+ /* insert new chunk after the tail_chunk */
+ f->tail_chunk->next = c;
+ while (c)
+ {
+ add_bytes += c->length;
+ c->start_byte = prev->start_byte + prev->length;
+ cur->rb_index = RBTREE_TNIL_INDEX;
+
+ prev = c;
+ c = c->next;
+ }
+ prev->next = tmp;
+
+ /* shift existing chunks along */
+ cur = tmp;
+ while (cur != f->start_chunk)
+ {
+ cur->start_byte = prev->start_byte + prev->length;
+ prev = cur;
+ cur = cur->next;
+ }
+
+ f->size += add_bytes;
+ f->nitems = f->size - 1;
+ f->new_chunks = 0;
+ head += add_bytes;
+
+ clib_atomic_store_rel_n (&f->head, head);
+ ASSERT (svm_fifo_is_sane (f));
+
+ return;
+ }
+ }
+
+ /* Wrapped, and optimization of single-thread-owned fifo cannot be applied */