+static inline fifo_segment_slice_t *
+fsh_slice_get (fifo_segment_header_t * fsh, u32 slice_index)
+{
+ return &fsh->slices[slice_index];
+}
+
+static inline fifo_slice_private_t *
+fs_slice_private_get (fifo_segment_t *fs, u32 slice_index)
+{
+ ASSERT (slice_index < fs->n_slices);
+ return &fs->slices[slice_index];
+}
+
+static char *fifo_segment_mem_status_strings[] = {
+#define _(sym,str) str,
+ foreach_segment_mem_status
+#undef _
+};
+
+static inline uword
+fsh_n_free_bytes (fifo_segment_header_t * fsh)
+{
+ uword cur_pos = clib_atomic_load_relax_n (&fsh->byte_index);
+ ASSERT (fsh->max_byte_index > cur_pos);
+ return fsh->max_byte_index - cur_pos;
+}
+
+static inline void
+fsh_cached_bytes_add (fifo_segment_header_t * fsh, uword size)
+{
+ clib_atomic_fetch_add_rel (&fsh->n_cached_bytes, size);
+}
+
+static inline void
+fsh_cached_bytes_sub (fifo_segment_header_t * fsh, uword size)
+{
+ clib_atomic_fetch_sub_rel (&fsh->n_cached_bytes, size);
+}
+
+static inline uword
+fsh_n_cached_bytes (fifo_segment_header_t * fsh)
+{
+ uword n_cached = clib_atomic_load_relax_n (&fsh->n_cached_bytes);
+ return n_cached;
+}
+
+static inline void
+fsh_active_fifos_update (fifo_segment_header_t * fsh, int inc)
+{
+ clib_atomic_fetch_add_rel (&fsh->n_active_fifos, inc);
+}
+
+static inline u32
+fsh_n_active_fifos (fifo_segment_header_t * fsh)
+{
+ return clib_atomic_load_relax_n (&fsh->n_active_fifos);
+}
+
+static inline uword
+fs_virtual_mem (fifo_segment_t *fs)
+{
+ fifo_segment_header_t *fsh = fs->h;
+ fifo_segment_slice_t *fss;
+ uword total_vm = 0;
+ int i;
+
+ for (i = 0; i < fs->n_slices; i++)
+ {
+ fss = fsh_slice_get (fsh, i);
+ total_vm += clib_atomic_load_relax_n (&fss->virtual_mem);
+ }
+ return total_vm;
+}
+
+void
+fsh_virtual_mem_update (fifo_segment_header_t * fsh, u32 slice_index,
+ int n_bytes)
+{
+ fifo_segment_slice_t *fss = fsh_slice_get (fsh, slice_index);
+ fss->virtual_mem += n_bytes;
+}
+
+static inline int
+fss_chunk_fl_index_is_valid (fifo_segment_slice_t *fss, u32 fl_index)
+{
+ return (fl_index < FS_CHUNK_VEC_LEN);
+}
+
+#define FS_CL_HEAD_MASK 0xFFFFFFFFFFFF
+#define FS_CL_HEAD_TMASK 0xFFFF000000000000
+#define FS_CL_HEAD_TINC (1ULL << 48)
+
+static svm_fifo_chunk_t *
+fss_chunk_free_list_head (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index)
+{
+ fs_sptr_t headsp = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ return fs_chunk_ptr (fsh, headsp & FS_CL_HEAD_MASK);
+}
+
+static void
+fss_chunk_free_list_push (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *c)
+{
+ fs_sptr_t old_head, new_head, csp;
+
+ csp = fs_chunk_sptr (fsh, c);
+ ASSERT (csp <= FS_CL_HEAD_MASK);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
+
+ do
+ {
+ c->next = old_head & FS_CL_HEAD_MASK;
+ new_head = csp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
+ }
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
+}
+
+static void
+fss_chunk_free_list_push_list (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail)
+{
+ fs_sptr_t old_head, new_head, headsp;
+
+ headsp = fs_chunk_sptr (fsh, head);
+ ASSERT (headsp <= FS_CL_HEAD_MASK);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
+
+ do
+ {
+ tail->next = old_head & FS_CL_HEAD_MASK;
+ new_head = headsp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
+ }
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
+}
+
+static svm_fifo_chunk_t *
+fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
+ u32 fl_index)
+{
+ fs_sptr_t old_head, new_head;
+ svm_fifo_chunk_t *c;
+
+ ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
+
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
+
+ /* Lock-free stacks are affected by ABA if a side allocates a chunk and
+ * shortly thereafter frees it. To circumvent that, reuse the upper bits
+ * of the head of the list shared pointer, i.e., offset to where the chunk
+ * is, as a tag. The tag is incremented with each push/pop operation and
+ * therefore collisions can only happen if an element is popped and pushed
+ * exactly after a complete wrap of the tag (16 bits). It's unlikely either
+ * of the sides will be descheduled for that long */
+ do
+ {
+ if (!(old_head & FS_CL_HEAD_MASK))
+ return 0;
+ c = fs_chunk_ptr (fsh, old_head & FS_CL_HEAD_MASK);
+ new_head = c->next + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
+ }
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
+
+ return c;
+}
+
+static void
+fss_fifo_free_list_push (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
+ svm_fifo_shared_t *sf)
+{
+ sf->next = fss->free_fifos;
+ fss->free_fifos = fs_sptr (fsh, sf);
+}
+
+static void
+fss_fifo_free_list_push_list (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss,
+ svm_fifo_shared_t *head, svm_fifo_shared_t *tail)
+{
+ tail->next = fss->free_fifos;
+ fss->free_fifos = fs_sptr (fsh, head);
+}
+
+svm_fifo_shared_t *
+fss_fifo_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss)
+{
+ svm_fifo_shared_t *sf;
+ sf = fs_ptr (fsh, fss->free_fifos);
+ fss->free_fifos = sf->next;
+ return sf;
+}
+
+static inline void
+pfss_fifo_add_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
+{
+ if (pfss->active_fifos)
+ {
+ pfss->active_fifos->prev = f;
+ f->next = pfss->active_fifos;
+ }
+ pfss->active_fifos = f;
+}
+
+static inline void
+pfss_fifo_del_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
+{
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ {
+ if (f->prev)
+ f->prev->next = f->next;
+ else
+ pfss->active_fifos = f->next;
+ if (f->next)
+ f->next->prev = f->prev;
+ }
+}
+
+static inline uword
+fss_fl_chunk_bytes (fifo_segment_slice_t * fss)
+{
+ return clib_atomic_load_relax_n (&fss->n_fl_chunk_bytes);
+}
+
+static inline void
+fss_fl_chunk_bytes_add (fifo_segment_slice_t * fss, uword size)
+{
+ clib_atomic_fetch_add_relax (&fss->n_fl_chunk_bytes, size);
+}
+
+static inline void
+fss_fl_chunk_bytes_sub (fifo_segment_slice_t * fss, uword size)
+{
+ clib_atomic_fetch_sub_relax (&fss->n_fl_chunk_bytes, size);