+ /* store-rel: producer owned index (paired with load-acq in consumer) */
+ clib_atomic_store_rel_n (&f->tail, tail);
+
+ return len;
+}
+
+/**
+ * Enqueue a future segment.
+ *
+ * Two choices: either copies the entire segment, or copies nothing
+ * Returns 0 of the entire segment was copied
+ * Returns -1 if none of the segment was copied due to lack of space
+ */
+int
+svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 len, u8 * src)
+{
+ u32 tail, head, free_count, enq_pos;
+
+ f_load_head_tail_prod (f, &head, &tail);
+
+ /* free space in fifo can only increase during enqueue: SPSC */
+ free_count = f_free_count (f, head, tail);
+ f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
+
+ /* will this request fit? */
+ if ((len + offset) > free_count)
+ return SVM_FIFO_EFULL;
+
+ enq_pos = tail + offset;
+
+ if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
+ {
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
+ return SVM_FIFO_EGROW;
+ }
+
+ svm_fifo_trace_add (f, offset, len, 1);
+ ooo_segment_add (f, offset, head, tail, len);
+
+ if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos))
+ f_update_ooo_enq (f, enq_pos, enq_pos + len);
+
+ svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq);
+
+ return 0;
+}
+
+/**
+ * Advance tail
+ */
+void
+svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len)
+{
+ u32 tail;
+
+ ASSERT (len <= svm_fifo_max_enqueue_prod (f));
+ /* load-relaxed: producer owned index */
+ tail = f->tail;
+ tail = tail + len;
+
+ if (rb_tree_is_init (&f->ooo_enq_lookup))
+ {
+ f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
+ f->ooo_enq = 0;
+ }
+ else
+ {
+ f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
+ }
+
+ /* store-rel: producer owned index (paired with load-acq in consumer) */
+ clib_atomic_store_rel_n (&f->tail, tail);
+}
+
+always_inline svm_fifo_chunk_t *
+f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
+{
+ svm_fifo_chunk_t *start, *prev = 0, *c;
+ rb_tree_t *rt;
+ rb_node_t *n;
+
+ ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
+
+ if (maybe_ooo)
+ rt = &f->ooo_deq_lookup;
+
+ c = f->start_chunk;
+
+ do
+ {
+ if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
+ {
+ n = rb_node (rt, c->deq_rb_index);
+ ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
+ rb_tree_del_node (rt, n);
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ }
+ if (!c->next)
+ break;
+ prev = c;
+ c = c->next;
+ }
+ while (!f_chunk_includes_pos (c, end_pos));
+
+ if (maybe_ooo)
+ {
+ if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
+ f->ooo_deq = 0;
+ }
+ else
+ {
+ if (PREDICT_FALSE (f->ooo_deq != 0))
+ f->ooo_deq = 0;
+ }
+
+ /* Avoid unlinking the last chunk */
+ if (!prev)
+ return 0;
+
+ prev->next = 0;
+ start = f->start_chunk;
+ f->start_chunk = c;
+
+ return start;
+}
+
+int
+svm_fifo_dequeue (svm_fifo_t * f, u32 len, u8 * dst)
+{
+ u32 tail, head, cursize;
+
+ f_load_head_tail_cons (f, &head, &tail);
+
+ /* current size of fifo can only increase during dequeue: SPSC */
+ cursize = f_cursize (f, head, tail);
+
+ if (PREDICT_FALSE (cursize == 0))
+ return SVM_FIFO_EEMPTY;
+
+ len = clib_min (cursize, len);
+
+ if (!f->head_chunk)
+ f->head_chunk = svm_fifo_find_chunk (f, head);
+
+ svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
+ head = head + len;
+
+ if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
+ fsh_collect_chunks (f->fs_hdr, f->slice_index,
+ f_unlink_chunks (f, head, 0));
+
+ /* store-rel: consumer owned index (paired with load-acq in producer) */
+ clib_atomic_store_rel_n (&f->head, head);
+
+ return len;
+}
+
+int
+svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 len, u8 * dst)
+{
+ u32 tail, head, cursize, head_idx;
+
+ f_load_head_tail_cons (f, &head, &tail);
+
+ /* current size of fifo can only increase during peek: SPSC */
+ cursize = f_cursize (f, head, tail);
+
+ if (PREDICT_FALSE (cursize < offset))
+ return SVM_FIFO_EEMPTY;
+
+ len = clib_min (cursize - offset, len);
+ head_idx = head + offset;
+
+ if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx))
+ f_update_ooo_deq (f, head_idx, head_idx + len);
+
+ svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq);
+ return len;
+}
+
+int
+svm_fifo_dequeue_drop (svm_fifo_t * f, u32 len)
+{
+ u32 total_drop_bytes, tail, head, cursize;
+
+ f_load_head_tail_cons (f, &head, &tail);
+
+ /* number of bytes available */
+ cursize = f_cursize (f, head, tail);
+ if (PREDICT_FALSE (cursize == 0))
+ return SVM_FIFO_EEMPTY;
+
+ /* number of bytes we're going to drop */
+ total_drop_bytes = clib_min (cursize, len);
+
+ svm_fifo_trace_add (f, tail, total_drop_bytes, 3);
+
+ /* move head */
+ head = head + total_drop_bytes;
+
+ if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
+ {
+ fsh_collect_chunks (f->fs_hdr, f->slice_index,
+ f_unlink_chunks (f, head, 1));
+ f->head_chunk =
+ f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
+ }
+
+ /* store-rel: consumer owned index (paired with load-acq in producer) */
+ clib_atomic_store_rel_n (&f->head, head);