+ uword n_free_bytes;
+ u32 min_size;
+
+ min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096);
+ fl_index = fs_freelist_for_size (min_size);
+
+ clib_spinlock_lock (&fss->chunk_lock);
+
+ if (fss->free_fifos && fss->free_chunks[fl_index])
+ {
+ f = fs_try_alloc_fifo_freelist (fss, fl_index);
+ if (f)
+ {
+ fsh_cached_bytes_sub (fsh, fs_freelist_index_to_size (fl_index));
+ goto done;
+ }
+ }
+
+ fifo_sz = sizeof (svm_fifo_t) + sizeof (svm_fifo_chunk_t);
+ fifo_sz += 1 << max_log2 (min_size);
+ n_free_bytes = fsh_n_free_bytes (fsh);
+
+ if (fifo_sz * FIFO_SEGMENT_ALLOC_BATCH_SIZE < n_free_bytes)
+ {
+ if (!fs_try_alloc_fifo_batch (fsh, fss, fl_index,
+ FIFO_SEGMENT_ALLOC_BATCH_SIZE))
+ {
+ f = fs_try_alloc_fifo_freelist (fss, fl_index);
+ if (f)
+ {
+ fsh_cached_bytes_sub (fsh,
+ fs_freelist_index_to_size (fl_index));
+ goto done;
+ }
+ }
+ else
+ {
+ fsh_check_mem (fsh);
+ n_free_bytes = fsh_n_free_bytes (fsh);
+ }
+ }
+ if (fifo_sz <= n_free_bytes)
+ {
+ void *oldheap = ssvm_push_heap (fsh->ssvm_sh);
+ f = svm_fifo_alloc (min_size);
+ ssvm_pop_heap (oldheap);
+ if (f)
+ {
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
+ fsh_free_bytes_sub (fsh, fifo_sz);
+ goto done;
+ }
+ fsh_check_mem (fsh);
+ }
+ /* All failed, try to allocate min of data bytes and fifo sz */
+ fifo_sz = clib_min (fifo_sz, data_bytes);
+ if (fifo_sz <= fss->n_fl_chunk_bytes)
+ f = fs_try_alloc_fifo_freelist_multi_chunk (fsh, fss, fifo_sz);
+
+done:
+ clib_spinlock_unlock (&fss->chunk_lock);
+
+ if (f)
+ {
+ f->size = data_bytes;
+ f->fs_hdr = fsh;
+ }
+ return f;
+}
+
+svm_fifo_chunk_t *
+fsh_alloc_chunk (fifo_segment_header_t * fsh, u32 slice_index, u32 chunk_size)
+{
+ fifo_segment_slice_t *fss;
+ svm_fifo_chunk_t *c;
+ int fl_index;
+
+ fl_index = fs_freelist_for_size (chunk_size);
+ fss = fsh_slice_get (fsh, slice_index);
+
+ clib_spinlock_lock (&fss->chunk_lock);
+
+ c = fss->free_chunks[fl_index];
+
+ if (c)
+ {
+ fss->free_chunks[fl_index] = c->next;
+ c->next = 0;
+ fss->n_fl_chunk_bytes -= fs_freelist_index_to_size (fl_index);
+ fsh_cached_bytes_sub (fsh, fs_freelist_index_to_size (fl_index));
+ }
+ else
+ {
+ void *oldheap;
+ uword n_free;
+ u32 batch;
+
+ chunk_size = fs_freelist_index_to_size (fl_index);
+ n_free = fsh_n_free_bytes (fsh);
+
+ if (chunk_size <= n_free)
+ {
+ oldheap = ssvm_push_heap (fsh->ssvm_sh);
+ c = svm_fifo_chunk_alloc (chunk_size);
+ ssvm_pop_heap (oldheap);
+
+ if (c)
+ {
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
+ fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
+ goto done;
+ }
+
+ fsh_check_mem (fsh);
+ n_free = fsh_n_free_bytes (fsh);
+ }
+ if (chunk_size <= fss->n_fl_chunk_bytes)
+ {
+ c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size);
+ if (c)
+ goto done;
+ batch = n_free / FIFO_SEGMENT_MIN_FIFO_SIZE;
+ if (!batch || fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
+ {
+ fsh_check_mem (fsh);
+ goto done;
+ }
+ }
+ if (chunk_size <= fss->n_fl_chunk_bytes + n_free)
+ {
+ u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE;
+
+ batch = (chunk_size - fss->n_fl_chunk_bytes) / min_size;
+ batch = clib_min (batch + 1, n_free / min_size);
+ if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
+ {
+ fsh_check_mem (fsh);
+ goto done;
+ }
+ c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size);
+ }
+ }
+
+done:
+
+ clib_spinlock_unlock (&fss->chunk_lock);
+
+ return c;
+}
+
+static void
+fsh_slice_collect_chunks (fifo_segment_header_t * fsh,
+ fifo_segment_slice_t * fss, svm_fifo_chunk_t * c)
+{
+ svm_fifo_chunk_t *next;