csp = fs_chunk_sptr (fsh, c);
ASSERT (csp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
c->next = old_head & FS_CL_HEAD_MASK;
new_head = csp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
}
- while (!clib_atomic_cmp_and_swap_acq_relax (
- &fss->free_chunks[fl_index], &old_head, &new_head, 1 /* weak */));
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
}
static void
headsp = fs_chunk_sptr (fsh, head);
ASSERT (headsp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
tail->next = old_head & FS_CL_HEAD_MASK;
new_head = headsp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
}
- while (!clib_atomic_cmp_and_swap_acq_relax (
- &fss->free_chunks[fl_index], &old_head, &new_head, 1 /* weak */));
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
}
static svm_fifo_chunk_t *
ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
/* Lock-free stacks are affected by ABA if a side allocates a chunk and
* shortly thereafter frees it. To circumvent that, reuse the upper bits
c = fs_chunk_ptr (fsh, old_head & FS_CL_HEAD_MASK);
new_head = c->next + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
}
- while (!clib_atomic_cmp_and_swap_acq_relax (
- &fss->free_chunks[fl_index], &old_head, &new_head, 1 /* weak */));
+ while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
+ &new_head, 0 /* weak */, __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE));
return c;
}
if (data_bytes <= fss_fl_chunk_bytes (fss) + n_free)
{
u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE;
-
+ if (n_free < min_size)
+ goto done;
batch = (data_bytes - fss_fl_chunk_bytes (fss)) / min_size;
batch = clib_min (batch + 1, n_free / min_size);
if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
return f;
}
+svm_fifo_t *
+fifo_segment_duplicate_fifo (fifo_segment_t *fs, svm_fifo_t *f)
+{
+ svm_fifo_t *nf = fs_fifo_alloc (fs, 0);
+ clib_memcpy (nf, f, sizeof (*f));
+ return nf;
+}
+
/**
* Free fifo allocated in fifo segment
*/
return (u8 *) f->shr - (u8 *) f->fs_hdr;
}
+svm_fifo_chunk_t *
+fifo_segment_alloc_chunk_w_slice (fifo_segment_t *fs, u32 slice_index,
+ u32 chunk_size)
+{
+ fifo_segment_header_t *fsh = fs->h;
+ fifo_segment_slice_t *fss;
+
+ fss = fsh_slice_get (fsh, slice_index);
+ return fsh_try_alloc_chunk (fsh, fss, chunk_size);
+}
+
+void
+fifo_segment_collect_chunk (fifo_segment_t *fs, u32 slice_index,
+ svm_fifo_chunk_t *c)
+{
+ fsh_collect_chunks (fs->h, slice_index, c);
+}
+
+uword
+fifo_segment_chunk_offset (fifo_segment_t *fs, svm_fifo_chunk_t *c)
+{
+ return (u8 *) c - (u8 *) fs->h;
+}
+
svm_msg_q_t *
fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index,
svm_msg_q_cfg_t *cfg)