csp = fs_chunk_sptr (fsh, c);
ASSERT (csp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
headsp = fs_chunk_sptr (fsh, head);
ASSERT (headsp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
/* Lock-free stacks are affected by ABA if a side allocates a chunk and
* shortly thereafter frees it. To circumvent that, reuse the upper bits
if (data_bytes <= fss_fl_chunk_bytes (fss) + n_free)
{
u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE;
-
+ if (n_free < min_size)
+ goto done;
batch = (data_bytes - fss_fl_chunk_bytes (fss)) / min_size;
batch = clib_min (batch + 1, n_free / min_size);
if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
return (u8 *) f->shr - (u8 *) f->fs_hdr;
}
+svm_fifo_chunk_t *
+fifo_segment_alloc_chunk_w_slice (fifo_segment_t *fs, u32 slice_index,
+ u32 chunk_size)
+{
+ fifo_segment_header_t *fsh = fs->h;
+ fifo_segment_slice_t *fss;
+
+ fss = fsh_slice_get (fsh, slice_index);
+ return fsh_try_alloc_chunk (fsh, fss, chunk_size);
+}
+
+void
+fifo_segment_collect_chunk (fifo_segment_t *fs, u32 slice_index,
+ svm_fifo_chunk_t *c)
+{
+ fsh_collect_chunks (fs->h, slice_index, c);
+}
+
+uword
+fifo_segment_chunk_offset (fifo_segment_t *fs, svm_fifo_chunk_t *c)
+{
+ return (u8 *) c - (u8 *) fs->h;
+}
+
svm_msg_q_t *
fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index,
svm_msg_q_cfg_t *cfg)