+ * Creates a fifo in the current heap. Fails vs blow up the process
+ */
+svm_fifo_t *
+svm_fifo_alloc (u32 data_size_in_bytes)
+{
+ u32 rounded_data_size;
+ svm_fifo_chunk_t *c;
+ svm_fifo_t *f;
+
+ f = clib_mem_alloc_aligned_or_null (sizeof (*f), CLIB_CACHE_LINE_BYTES);
+ if (f == 0)
+ return 0;
+
+ clib_memset (f, 0, sizeof (*f));
+
+ /* always round fifo data size to the next highest power-of-two */
+ rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
+ c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size,
+ CLIB_CACHE_LINE_BYTES);
+ if (!c)
+ {
+ clib_mem_free (f);
+ return 0;
+ }
+
+ clib_memset (c, 0, sizeof (*c));
+ c->start_byte = 0;
+ c->length = data_size_in_bytes;
+ c->enq_rb_index = RBTREE_TNIL_INDEX;
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ f->shr->start_chunk = f->shr->end_chunk = f_csptr (f, c);
+
+ return f;
+}
+
+/**
+ * Creates a fifo chunk in the current heap
+ */
+svm_fifo_chunk_t *
+svm_fifo_chunk_alloc (u32 size)
+{
+ svm_fifo_chunk_t *c;
+ u32 rounded_size;
+
+ /* round chunk size to the next highest power-of-two */
+ rounded_size = (1 << (max_log2 (size)));
+ c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_size,
+ CLIB_CACHE_LINE_BYTES);
+ if (c == 0)
+ return 0;
+
+ clib_memset (c, 0, sizeof (*c));
+ c->length = rounded_size;
+ return c;
+}
+
+/**
+ * Find chunk for given byte position
+ *
+ * @param f fifo
+ * @param pos normalized position in fifo
+ *
+ * @return chunk that includes given position or 0
+ */
+static svm_fifo_chunk_t *
+svm_fifo_find_chunk (svm_fifo_t * f, u32 pos)
+{
+ svm_fifo_chunk_t *c;
+
+ c = f_start_cptr (f);
+ while (c && !f_chunk_includes_pos (c, pos))
+ c = f_cptr (f, c->next);
+
+ return c;
+}
+
+static svm_fifo_chunk_t *
+svm_fifo_find_next_chunk (svm_fifo_t * f, svm_fifo_chunk_t * start, u32 pos)
+{
+ svm_fifo_chunk_t *c;
+
+ ASSERT (start != 0);
+
+ c = start;
+ while (c && !f_chunk_includes_pos (c, pos))
+ c = f_cptr (f, c->next);
+
+ return c;
+}
+
+u32
+svm_fifo_max_read_chunk (svm_fifo_t * f)
+{
+ u32 head, tail, end_chunk;
+
+ f_load_head_tail_cons (f, &head, &tail);
+ ASSERT (!f->shr->head_chunk || f_chunk_includes_pos (f_head_cptr (f), head));
+
+ if (!f->shr->head_chunk)
+ {
+ f->shr->head_chunk = f_csptr (f, svm_fifo_find_chunk (f, head));
+ if (PREDICT_FALSE (!f->shr->head_chunk))
+ return 0;
+ }
+
+ end_chunk = f_chunk_end (f_head_cptr (f));
+
+ return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
+}
+
+u32
+svm_fifo_max_write_chunk (svm_fifo_t * f)
+{
+ svm_fifo_chunk_t *tail_chunk;
+ u32 head, tail;
+
+ f_load_head_tail_prod (f, &head, &tail);
+ tail_chunk = f_tail_cptr (f);
+
+ ASSERT (!tail_chunk || f_chunk_includes_pos (tail_chunk, tail));
+
+ return tail_chunk ? f_chunk_end (tail_chunk) - tail : 0;
+}
+
+static rb_node_t *
+f_find_node_rbtree (rb_tree_t * rt, u32 pos)
+{
+ rb_node_t *cur, *prev;
+
+ cur = rb_node (rt, rt->root);
+ if (PREDICT_FALSE (rb_node_is_tnil (rt, cur)))
+ return 0;
+
+ while (pos != cur->key)
+ {
+ prev = cur;
+ if (f_pos_lt (pos, cur->key))
+ {
+ cur = rb_node_left (rt, cur);
+ if (rb_node_is_tnil (rt, cur))
+ {
+ cur = rb_tree_predecessor (rt, prev);
+ break;
+ }
+ }
+ else
+ {
+ cur = rb_node_right (rt, cur);
+ if (rb_node_is_tnil (rt, cur))
+ {
+ cur = prev;
+ break;
+ }
+ }
+ }
+
+ if (rb_node_is_tnil (rt, cur))
+ return 0;
+
+ return cur;
+}
+
+static svm_fifo_chunk_t *
+f_find_chunk_rbtree (rb_tree_t * rt, u32 pos)
+{
+ svm_fifo_chunk_t *c;
+ rb_node_t *n;
+
+ if (!rb_tree_is_init (rt))
+ return 0;
+
+ n = f_find_node_rbtree (rt, pos);
+ if (!n)
+ return 0;
+ c = uword_to_pointer (n->opaque, svm_fifo_chunk_t *);
+ if (f_chunk_includes_pos (c, pos))
+ return c;
+
+ return 0;
+}
+
+static void
+f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
+{
+ rb_tree_t *rt = &f->ooo_enq_lookup;
+ svm_fifo_chunk_t *c;
+ rb_node_t *cur;
+
+ /* Use linear search if rbtree is not initialized */
+ if (PREDICT_FALSE (!rb_tree_is_init (rt)))
+ {
+ f->ooo_enq = svm_fifo_find_next_chunk (f, f_tail_cptr (f), start_pos);
+ return;
+ }
+
+ if (rt->root == RBTREE_TNIL_INDEX)
+ {
+ c = f_tail_cptr (f);
+ ASSERT (c->enq_rb_index == RBTREE_TNIL_INDEX);
+ c->enq_rb_index = rb_tree_add_custom (rt, c->start_byte,
+ pointer_to_uword (c), f_pos_lt);
+ }
+ else
+ {
+ cur = f_find_node_rbtree (rt, start_pos);
+ c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *);
+ ASSERT (f_pos_leq (c->start_byte, start_pos));
+ }
+
+ if (f_chunk_includes_pos (c, start_pos))
+ f->ooo_enq = c;
+
+ if (f_chunk_includes_pos (c, end_pos))
+ return;
+
+ do
+ {
+ c = f_cptr (f, c->next);
+ if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX)
+ break;
+
+ c->enq_rb_index = rb_tree_add_custom (rt, c->start_byte,
+ pointer_to_uword (c), f_pos_lt);
+
+ if (f_chunk_includes_pos (c, start_pos))
+ f->ooo_enq = c;
+ }
+ while (!f_chunk_includes_pos (c, end_pos));
+}
+
+static void
+f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
+{
+ rb_tree_t *rt = &f->ooo_deq_lookup;
+ rb_node_t *cur;
+ svm_fifo_chunk_t *c;
+
+ /* Use linear search if rbtree is not initialized */
+ if (PREDICT_FALSE (!rb_tree_is_init (rt)))
+ {
+ f->ooo_deq = svm_fifo_find_chunk (f, start_pos);
+ return;
+ }
+
+ if (rt->root == RBTREE_TNIL_INDEX)
+ {
+ c = f_start_cptr (f);
+ ASSERT (c->deq_rb_index == RBTREE_TNIL_INDEX);
+ c->deq_rb_index = rb_tree_add_custom (rt, c->start_byte,
+ pointer_to_uword (c), f_pos_lt);
+ }
+ else
+ {
+ cur = f_find_node_rbtree (rt, start_pos);
+ c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *);
+ ASSERT (f_pos_leq (c->start_byte, start_pos));
+ }
+
+ if (f_chunk_includes_pos (c, start_pos))
+ f->ooo_deq = c;
+
+ if (f_chunk_includes_pos (c, end_pos))
+ return;
+
+ do
+ {
+ c = f_cptr (f, c->next);
+ if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX)
+ break;
+
+ c->deq_rb_index = rb_tree_add_custom (rt, c->start_byte,
+ pointer_to_uword (c), f_pos_lt);
+
+ if (f_chunk_includes_pos (c, start_pos))
+ f->ooo_deq = c;
+ }
+ while (!f_chunk_includes_pos (c, end_pos));
+}
+
+static svm_fifo_chunk_t *
+f_lookup_clear_enq_chunks (svm_fifo_t * f, svm_fifo_chunk_t * start,
+ u32 end_pos)
+{
+ rb_tree_t *rt = &f->ooo_enq_lookup;
+ svm_fifo_chunk_t *c;
+ rb_node_t *n;
+
+ c = start;
+ while (c && !f_chunk_includes_pos (c, end_pos))
+ {
+ if (c->enq_rb_index != RBTREE_TNIL_INDEX)
+ {
+ n = rb_node (rt, c->enq_rb_index);
+ rb_tree_del_node (rt, n);
+ c->enq_rb_index = RBTREE_TNIL_INDEX;
+ }
+
+ c = f_cptr (f, c->next);
+ }
+
+ /* No ooo segments left, so make sure the current chunk
+ * is not tracked in the enq rbtree */
+ if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX
+ && c && c->enq_rb_index != RBTREE_TNIL_INDEX)
+ {
+ n = rb_node (rt, c->enq_rb_index);
+ rb_tree_del_node (rt, n);
+ c->enq_rb_index = RBTREE_TNIL_INDEX;
+ }
+
+ return c;
+}
+
+static svm_fifo_chunk_t *
+f_lookup_clear_deq_chunks (svm_fifo_t * f, svm_fifo_chunk_t * start,
+ u32 end_pos)
+{
+ rb_tree_t *rt = &f->ooo_deq_lookup;
+ svm_fifo_chunk_t *c;
+ rb_node_t *n;
+
+ c = start;
+ while (c && !f_chunk_includes_pos (c, end_pos))
+ {
+ if (c->deq_rb_index != RBTREE_TNIL_INDEX)
+ {
+ n = rb_node (rt, c->deq_rb_index);
+ rb_tree_del_node (rt, n);
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ }
+
+ c = f_cptr (f, c->next);
+ }
+
+ return c;
+}
+
+void
+svm_fifo_free_chunk_lookup (svm_fifo_t * f)
+{
+ rb_tree_free_nodes (&f->ooo_enq_lookup);
+ rb_tree_free_nodes (&f->ooo_deq_lookup);
+}
+
+void
+svm_fifo_free (svm_fifo_t * f)
+{
+ ASSERT (f->refcnt > 0);
+
+ if (--f->refcnt == 0)
+ {
+ /* ooo data is not allocated on segment heap */
+ svm_fifo_free_chunk_lookup (f);
+ clib_mem_free (f);
+ }
+}
+
+void
+svm_fifo_overwrite_head (svm_fifo_t * f, u8 * src, u32 len)
+{
+ u32 n_chunk;
+ u32 head, tail, head_idx;
+ svm_fifo_chunk_t *c;
+
+ ASSERT (len <= f->shr->size);
+
+ f_load_head_tail_cons (f, &head, &tail);
+
+ if (!f->shr->head_chunk)
+ f->shr->head_chunk = f_csptr (f, svm_fifo_find_chunk (f, head));
+
+ c = f_head_cptr (f);
+ head_idx = head - c->start_byte;
+ n_chunk = c->length - head_idx;
+ if (len <= n_chunk)
+ clib_memcpy_fast (&c->data[head_idx], src, len);
+ else
+ {
+ ASSERT (len - n_chunk <= f_cptr (f, c->next)->length);
+ clib_memcpy_fast (&c->data[head_idx], src, n_chunk);
+ clib_memcpy_fast (&f_cptr (f, c->next)->data[0], src + n_chunk,
+ len - n_chunk);
+ }
+}
+
+static int
+f_try_chunk_alloc (svm_fifo_t * f, u32 head, u32 tail, u32 len)
+{
+ svm_fifo_chunk_t *c, *cur, *prev;
+ u32 alloc_size, free_alloced;
+
+ prev = f_end_cptr (f);
+ free_alloced = f_chunk_end (prev) - tail;
+
+ alloc_size = clib_min (f->shr->min_alloc, f->shr->size - (tail - head));
+ alloc_size = clib_max (alloc_size, len - free_alloced);
+
+ c = fsh_alloc_chunk (f->fs_hdr, f->shr->slice_index, alloc_size);
+ if (PREDICT_FALSE (!c))
+ return -1;
+
+ cur = c;
+
+ while (cur)
+ {
+ cur->start_byte = prev->start_byte + prev->length;
+ cur->enq_rb_index = RBTREE_TNIL_INDEX;
+ cur->deq_rb_index = RBTREE_TNIL_INDEX;
+
+ prev = cur;
+ cur = f_cptr (f, cur->next);
+ }
+
+ f_csptr_link (f, f->shr->end_chunk, c);
+ prev->next = 0;
+ f->shr->end_chunk = f_csptr (f, prev);
+
+ if (!f->shr->tail_chunk)
+ f->shr->tail_chunk = f_csptr (f, c);
+
+ return 0;
+}
+
+int
+svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src)
+{
+ u32 tail, head, free_count;
+ svm_fifo_chunk_t *old_tail_c;
+
+ f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
+
+ f_load_head_tail_prod (f, &head, &tail);
+
+ /* free space in fifo can only increase during enqueue: SPSC */
+ free_count = f_free_count (f, head, tail);
+
+ if (PREDICT_FALSE (free_count == 0))
+ return SVM_FIFO_EFULL;
+
+ /* number of bytes we're going to copy */
+ len = clib_min (free_count, len);
+
+ if (f_pos_gt (tail + len, f_chunk_end (f_end_cptr (f))))
+ {
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
+ {
+ len = f_chunk_end (f_end_cptr (f)) - tail;
+ if (!len)
+ return SVM_FIFO_EGROW;
+ }
+ }
+
+ old_tail_c = f_tail_cptr (f);
+
+ svm_fifo_copy_to_chunk (f, old_tail_c, tail, src, len, &f->shr->tail_chunk);
+ tail = tail + len;
+
+ svm_fifo_trace_add (f, head, len, 2);
+
+ /* collect out-of-order segments */
+ if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
+ {
+ len += ooo_segment_try_collect (f, len, &tail);
+ /* Tail chunk might've changed even if nothing was collected */
+ f->shr->tail_chunk =
+ f_csptr (f, f_lookup_clear_enq_chunks (f, old_tail_c, tail));
+ f->ooo_enq = 0;
+ }
+
+ /* store-rel: producer owned index (paired with load-acq in consumer) */
+ clib_atomic_store_rel_n (&f->shr->tail, tail);
+
+ return len;
+}
+
+/**
+ * Enqueue a future segment.
+ *