+/**
+ * Advance tail
+ */
+void
+svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len)
+{
+ u32 tail;
+
+ ASSERT (len <= svm_fifo_max_enqueue_prod (f));
+ /* load-relaxed: producer owned index */
+ tail = f->shr->tail;
+ tail = tail + len;
+
+ if (rb_tree_is_init (&f->ooo_enq_lookup))
+ {
+ f->shr->tail_chunk =
+ f_csptr (f, f_lookup_clear_enq_chunks (f, f_tail_cptr (f), tail));
+ f->ooo_enq = 0;
+ }
+ else
+ {
+ f->shr->tail_chunk =
+ f_csptr (f, svm_fifo_find_next_chunk (f, f_tail_cptr (f), tail));
+ }
+
+ /* store-rel: producer owned index (paired with load-acq in consumer) */
+ clib_atomic_store_rel_n (&f->shr->tail, tail);
+}
+
+int
+svm_fifo_enqueue_segments (svm_fifo_t * f, const svm_fifo_seg_t segs[],
+ u32 n_segs, u8 allow_partial)
+{
+ u32 tail, head, free_count, len = 0, i;
+ svm_fifo_chunk_t *old_tail_c;
+
+ f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
+
+ f_load_head_tail_prod (f, &head, &tail);
+
+ /* free space in fifo can only increase during enqueue: SPSC */
+ free_count = f_free_count (f, head, tail);
+
+ if (PREDICT_FALSE (free_count == 0))
+ return SVM_FIFO_EFULL;
+
+ for (i = 0; i < n_segs; i++)
+ len += segs[i].len;
+
+ old_tail_c = f_tail_cptr (f);
+
+ if (!allow_partial)
+ {
+ if (PREDICT_FALSE (free_count < len))
+ return SVM_FIFO_EFULL;
+
+ if (f_pos_gt (tail + len, f_chunk_end (f_end_cptr (f))))
+ {
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
+ return SVM_FIFO_EGROW;
+ }
+
+ for (i = 0; i < n_segs; i++)
+ {
+ svm_fifo_copy_to_chunk (f, f_tail_cptr (f), tail, segs[i].data,
+ segs[i].len, &f->shr->tail_chunk);
+ tail += segs[i].len;
+ }
+ }
+ else
+ {
+ u32 n_left = clib_min (free_count, len);
+
+ if (f_pos_gt (tail + n_left, f_chunk_end (f_end_cptr (f))))
+ {
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, n_left)))
+ {
+ n_left = f_chunk_end (f_end_cptr (f)) - tail;
+ if (!n_left)
+ return SVM_FIFO_EGROW;
+ }
+ }
+
+ len = n_left;
+ i = 0;
+ while (n_left)
+ {
+ u32 to_copy = clib_min (segs[i].len, n_left);
+ svm_fifo_copy_to_chunk (f, f_tail_cptr (f), tail, segs[i].data,
+ to_copy, &f->shr->tail_chunk);
+ n_left -= to_copy;
+ tail += to_copy;
+ i++;
+ }
+ }
+
+ /* collect out-of-order segments */
+ if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
+ {
+ len += ooo_segment_try_collect (f, len, &tail);
+ /* Tail chunk might've changed even if nothing was collected */
+ f->shr->tail_chunk =
+ f_csptr (f, f_lookup_clear_enq_chunks (f, old_tail_c, tail));
+ f->ooo_enq = 0;
+ }
+
+ /* store-rel: producer owned index (paired with load-acq in consumer) */
+ clib_atomic_store_rel_n (&f->shr->tail, tail);
+
+ return len;
+}
+
+always_inline svm_fifo_chunk_t *
+f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
+{
+ svm_fifo_chunk_t *start, *prev = 0, *c;
+ rb_tree_t *rt;
+ rb_node_t *n;
+
+ if (maybe_ooo)
+ rt = &f->ooo_deq_lookup;
+
+ c = f_start_cptr (f);
+ ASSERT (!f_chunk_includes_pos (c, end_pos));
+
+ do
+ {
+ if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
+ {
+ n = rb_node (rt, c->deq_rb_index);
+ ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
+ rb_tree_del_node (rt, n);
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ }
+ if (!c->next)
+ break;
+ prev = c;
+ c = f_cptr (f, c->next);
+ }
+ while (!f_chunk_includes_pos (c, end_pos));
+
+ if (maybe_ooo)
+ {
+ if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
+ f->ooo_deq = 0;
+ }
+ else
+ {
+ if (PREDICT_FALSE (f->ooo_deq != 0))
+ f->ooo_deq = 0;
+ }
+
+ /* Avoid unlinking the last chunk */
+ if (!prev)
+ return 0;
+
+ prev->next = 0;
+ start = f_start_cptr (f);
+ f->shr->start_chunk = f_csptr (f, c);
+
+ return start;
+}
+