+static inline void *
+fsh_alloc (fifo_segment_header_t *fsh, uword size)
+{
+ return fsh_alloc_aligned (fsh, size, 8);
+}
+
+static inline fifo_segment_slice_t *
+fsh_slice_get (fifo_segment_header_t * fsh, u32 slice_index)
+{
+ return &fsh->slices[slice_index];
+}
+
+static inline fifo_slice_private_t *
+fs_slice_private_get (fifo_segment_t *fs, u32 slice_index)
+{
+ ASSERT (slice_index < fs->n_slices);
+ return &fs->slices[slice_index];
+}
+
+static char *fifo_segment_mem_status_strings[] = {
+#define _(sym,str) str,
+ foreach_segment_mem_status
+#undef _
+};
+
+static inline uword
+fsh_n_free_bytes (fifo_segment_header_t * fsh)
+{
+ uword cur_pos = clib_atomic_load_relax_n (&fsh->byte_index);
+ ASSERT (fsh->max_byte_index > cur_pos);
+ return fsh->max_byte_index - cur_pos;
+}
+
+static inline void
+fsh_cached_bytes_add (fifo_segment_header_t * fsh, uword size)
+{
+ clib_atomic_fetch_add_rel (&fsh->n_cached_bytes, size);
+}
+
+static inline void
+fsh_cached_bytes_sub (fifo_segment_header_t * fsh, uword size)
+{
+ clib_atomic_fetch_sub_rel (&fsh->n_cached_bytes, size);
+}
+
+static inline uword
+fsh_n_cached_bytes (fifo_segment_header_t * fsh)
+{
+ uword n_cached = clib_atomic_load_relax_n (&fsh->n_cached_bytes);
+ return n_cached;
+}
+
+static inline void
+fsh_active_fifos_update (fifo_segment_header_t * fsh, int inc)
+{
+ clib_atomic_fetch_add_rel (&fsh->n_active_fifos, inc);
+}
+
+static inline u32
+fsh_n_active_fifos (fifo_segment_header_t * fsh)
+{
+ return clib_atomic_load_relax_n (&fsh->n_active_fifos);
+}
+
+static inline uword
+fsh_virtual_mem (fifo_segment_header_t * fsh)
+{
+ fifo_segment_slice_t *fss;
+ uword total_vm = 0;
+ int i;
+
+ for (i = 0; i < fsh->n_slices; i++)
+ {
+ fss = fsh_slice_get (fsh, i);
+ total_vm += clib_atomic_load_relax_n (&fss->virtual_mem);
+ }
+ return total_vm;
+}
+
+void
+fsh_virtual_mem_update (fifo_segment_header_t * fsh, u32 slice_index,
+ int n_bytes)
+{
+ fifo_segment_slice_t *fss = fsh_slice_get (fsh, slice_index);
+ fss->virtual_mem += n_bytes;
+}
+
+static inline void
+fss_chunk_freelist_lock (fifo_segment_slice_t *fss)
+{
+ u32 free = 0;
+ while (!clib_atomic_cmp_and_swap_acq_relax_n (&fss->chunk_lock, &free, 1, 0))
+ {
+ /* atomic load limits number of compare_exchange executions */
+ while (clib_atomic_load_relax_n (&fss->chunk_lock))
+ CLIB_PAUSE ();
+ /* on failure, compare_exchange writes (*p)->lock into free */
+ free = 0;
+ }
+}
+
+static inline void
+fss_chunk_freelist_unlock (fifo_segment_slice_t *fss)
+{
+ /* Make sure all reads/writes are complete before releasing the lock */
+ clib_atomic_release (&fss->chunk_lock);
+}
+
+static inline int
+fss_chunk_fl_index_is_valid (fifo_segment_slice_t * fss, u32 fl_index)
+{
+ return (fl_index < FS_CHUNK_VEC_LEN);
+}
+
+static void
+fss_chunk_free_list_push (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *c)
+{
+ fss_chunk_freelist_lock (fss);
+ c->next = fss->free_chunks[fl_index];
+ fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, c);
+ fss_chunk_freelist_unlock (fss);
+}
+
+static void
+fss_chunk_free_list_push_list (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail)
+{
+ fss_chunk_freelist_lock (fss);
+ tail->next = fss->free_chunks[fl_index];
+ fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, head);
+ fss_chunk_freelist_unlock (fss);
+}
+
+static svm_fifo_chunk_t *
+fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
+ u32 fl_index)
+{
+ svm_fifo_chunk_t *c;
+
+ ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
+
+ fss_chunk_freelist_lock (fss);
+
+ if (!fss->free_chunks[fl_index])
+ {
+ fss_chunk_freelist_unlock (fss);
+ return 0;
+ }
+
+ c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]);
+ fss->free_chunks[fl_index] = c->next;
+
+ fss_chunk_freelist_unlock (fss);
+
+ return c;
+}
+
+static inline void
+pfss_fifo_add_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
+{
+ if (pfss->active_fifos)
+ {
+ pfss->active_fifos->prev = f;
+ f->next = pfss->active_fifos;
+ }
+ pfss->active_fifos = f;
+}
+
+static inline void
+pfss_fifo_del_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
+{
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ {
+ if (f->prev)
+ f->prev->next = f->next;
+ else
+ pfss->active_fifos = f->next;
+ if (f->next)
+ f->next->prev = f->prev;
+ }
+}
+
+static inline uword
+fss_fl_chunk_bytes (fifo_segment_slice_t * fss)
+{
+ return clib_atomic_load_relax_n (&fss->n_fl_chunk_bytes);
+}
+
+static inline void
+fss_fl_chunk_bytes_add (fifo_segment_slice_t * fss, uword size)
+{
+ clib_atomic_fetch_add_relax (&fss->n_fl_chunk_bytes, size);
+}
+
+static inline void
+fss_fl_chunk_bytes_sub (fifo_segment_slice_t * fss, uword size)
+{
+ clib_atomic_fetch_sub_relax (&fss->n_fl_chunk_bytes, size);