+ fsh_check_mem (fsh);
+ n_free = fsh_n_free_bytes (fsh);
+ }
+ if (chunk_size <= fss->n_fl_chunk_bytes)
+ {
+ c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size);
+ if (c)
+ goto done;
+ batch = n_free / FIFO_SEGMENT_MIN_FIFO_SIZE;
+ if (!batch || fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
+ {
+ fsh_check_mem (fsh);
+ goto done;
+ }
+ }
+ if (chunk_size <= fss->n_fl_chunk_bytes + n_free)
+ {
+ u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE;
+
+ batch = (chunk_size - fss->n_fl_chunk_bytes) / min_size;
+ batch = clib_min (batch + 1, n_free / min_size);
+ if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
+ {
+ fsh_check_mem (fsh);
+ goto done;
+ }
+ c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size);
+ }
+ }
+
+done:
+
+ clib_spinlock_unlock (&fss->chunk_lock);
+
+ return c;
+}
+
+static void
+fsh_slice_collect_chunks (fifo_segment_header_t * fsh,
+ fifo_segment_slice_t * fss, svm_fifo_chunk_t * c)
+{
+ svm_fifo_chunk_t *next;
+ int fl_index;
+ u32 n_collect = 0;
+
+ clib_spinlock_lock (&fss->chunk_lock);
+
+ while (c)
+ {
+ next = c->next;
+ fl_index = fs_freelist_for_size (c->length);
+ c->next = fss->free_chunks[fl_index];
+ c->enq_rb_index = RBTREE_TNIL_INDEX;
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ fss->free_chunks[fl_index] = c;
+ n_collect += fs_freelist_index_to_size (fl_index);
+ c = next;
+ }
+
+ fss->n_fl_chunk_bytes += n_collect;
+ fsh_cached_bytes_add (fsh, n_collect);
+
+ clib_spinlock_unlock (&fss->chunk_lock);
+}
+
+void
+fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
+ svm_fifo_chunk_t * c)
+{
+ fifo_segment_slice_t *fss;
+ fss = fsh_slice_get (fsh, slice_index);
+ fsh_slice_collect_chunks (fsh, fss, c);
+}
+
+static inline void
+fss_fifo_add_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (fss->fifos)
+ {
+ fss->fifos->prev = f;
+ f->next = fss->fifos;
+ }
+ fss->fifos = f;
+}
+
+static inline void
+fss_fifo_del_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ {
+ if (f->prev)
+ f->prev->next = f->next;
+ else
+ fss->fifos = f->next;
+ if (f->next)
+ f->next->prev = f->prev;
+ }
+}
+
+/**
+ * Allocate fifo in fifo segment
+ */
+svm_fifo_t *
+fifo_segment_alloc_fifo_w_slice (fifo_segment_t * fs, u32 slice_index,
+ u32 data_bytes, fifo_segment_ftype_t ftype)
+{
+ fifo_segment_header_t *fsh = fs->h;
+ fifo_segment_slice_t *fss;
+ svm_fifo_t *f = 0;
+
+ ASSERT (slice_index < fs->n_slices);
+
+ if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_chunk_size))
+ return 0;
+
+ fss = fsh_slice_get (fsh, slice_index);
+ f = fs_try_alloc_fifo (fsh, fss, data_bytes);
+ if (!f)
+ goto done;
+
+ f->slice_index = slice_index;
+
+ svm_fifo_init (f, data_bytes);
+
+ /* If rx fifo type add to active fifos list. When cleaning up segment,
+ * we need a list of active sessions that should be disconnected. Since
+ * both rx and tx fifos keep pointers to the session, it's enough to track
+ * only one. */
+ if (ftype == FIFO_SEGMENT_RX_FIFO)
+ {
+ fss_fifo_add_active_list (fss, f);
+ f->flags |= SVM_FIFO_F_LL_TRACKED;
+
+ svm_fifo_init_ooo_lookup (f, 0 /* ooo enq */ );
+ }
+ else
+ {
+ svm_fifo_init_ooo_lookup (f, 1 /* ooo deq */ );
+ }
+
+ fsh_active_fifos_update (fsh, 1);
+ fss->virtual_mem += svm_fifo_size (f);
+
+done:
+ return (f);
+}
+
+/**
+ * Free fifo allocated in fifo segment