csp = fs_chunk_sptr (fsh, c);
ASSERT (csp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
headsp = fs_chunk_sptr (fsh, head);
ASSERT (headsp <= FS_CL_HEAD_MASK);
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
do
{
ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
- old_head = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
+ old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
/* Lock-free stacks are affected by ABA if a side allocates a chunk and
* shortly thereafter frees it. To circumvent that, reuse the upper bits