X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Ffifo_segment.c;h=48d019c9aace4ffd286ac5a086b38f3211eab839;hb=2711ca710affe0c52bf63e08e0cf0588094e6198;hp=00fb023f3cbad9ddbe38fc44d3848cdc252f67db;hpb=8c517c8fb44e7003ecdb615bc03163536e24d4d4;p=vpp.git diff --git a/src/svm/fifo_segment.c b/src/svm/fifo_segment.c index 00fb023f3cb..48d019c9aac 100644 --- a/src/svm/fifo_segment.c +++ b/src/svm/fifo_segment.c @@ -105,13 +105,14 @@ fsh_n_active_fifos (fifo_segment_header_t * fsh) } static inline uword -fsh_virtual_mem (fifo_segment_header_t * fsh) +fs_virtual_mem (fifo_segment_t *fs) { + fifo_segment_header_t *fsh = fs->h; fifo_segment_slice_t *fss; uword total_vm = 0; int i; - for (i = 0; i < fsh->n_slices; i++) + for (i = 0; i < fs->n_slices; i++) { fss = fsh_slice_get (fsh, i); total_vm += clib_atomic_load_relax_n (&fss->virtual_mem); @@ -127,31 +128,22 @@ fsh_virtual_mem_update (fifo_segment_header_t * fsh, u32 slice_index, fss->virtual_mem += n_bytes; } -static inline void -fss_chunk_freelist_lock (fifo_segment_slice_t *fss) +static inline int +fss_chunk_fl_index_is_valid (fifo_segment_slice_t *fss, u32 fl_index) { - u32 free = 0; - while (!clib_atomic_cmp_and_swap_acq_relax_n (&fss->chunk_lock, &free, 1, 0)) - { - /* atomic load limits number of compare_exchange executions */ - while (clib_atomic_load_relax_n (&fss->chunk_lock)) - CLIB_PAUSE (); - /* on failure, compare_exchange writes (*p)->lock into free */ - free = 0; - } + return (fl_index < FS_CHUNK_VEC_LEN); } -static inline void -fss_chunk_freelist_unlock (fifo_segment_slice_t *fss) -{ - /* Make sure all reads/writes are complete before releasing the lock */ - clib_atomic_release (&fss->chunk_lock); -} +#define FS_CL_HEAD_MASK 0xFFFFFFFFFFFF +#define FS_CL_HEAD_TMASK 0xFFFF000000000000 +#define FS_CL_HEAD_TINC (1ULL << 48) -static inline int -fss_chunk_fl_index_is_valid (fifo_segment_slice_t * fss, u32 fl_index) +static svm_fifo_chunk_t * +fss_chunk_free_list_head (fifo_segment_header_t *fsh, + fifo_segment_slice_t *fss, u32 fl_index) { - return (fl_index < FS_CHUNK_VEC_LEN); + fs_sptr_t headsp = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]); + return fs_chunk_ptr (fsh, headsp & FS_CL_HEAD_MASK); } static void @@ -159,10 +151,20 @@ fss_chunk_free_list_push (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss, u32 fl_index, svm_fifo_chunk_t *c) { - fss_chunk_freelist_lock (fss); - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, c); - fss_chunk_freelist_unlock (fss); + fs_sptr_t old_head, new_head, csp; + + csp = fs_chunk_sptr (fsh, c); + ASSERT (csp <= FS_CL_HEAD_MASK); + old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]); + + do + { + c->next = old_head & FS_CL_HEAD_MASK; + new_head = csp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK); + } + while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head, + &new_head, 0 /* weak */, __ATOMIC_RELEASE, + __ATOMIC_ACQUIRE)); } static void @@ -170,32 +172,50 @@ fss_chunk_free_list_push_list (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss, u32 fl_index, svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail) { - fss_chunk_freelist_lock (fss); - tail->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, head); - fss_chunk_freelist_unlock (fss); + fs_sptr_t old_head, new_head, headsp; + + headsp = fs_chunk_sptr (fsh, head); + ASSERT (headsp <= FS_CL_HEAD_MASK); + old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]); + + do + { + tail->next = old_head & FS_CL_HEAD_MASK; + new_head = headsp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK); + } + while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head, + &new_head, 0 /* weak */, __ATOMIC_RELEASE, + __ATOMIC_ACQUIRE)); } static svm_fifo_chunk_t * fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss, u32 fl_index) { + fs_sptr_t old_head, new_head; svm_fifo_chunk_t *c; ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index)); - fss_chunk_freelist_lock (fss); + old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]); - if (!fss->free_chunks[fl_index]) + /* Lock-free stacks are affected by ABA if a side allocates a chunk and + * shortly thereafter frees it. To circumvent that, reuse the upper bits + * of the head of the list shared pointer, i.e., offset to where the chunk + * is, as a tag. The tag is incremented with each push/pop operation and + * therefore collisions can only happen if an element is popped and pushed + * exactly after a complete wrap of the tag (16 bits). It's unlikely either + * of the sides will be descheduled for that long */ + do { - fss_chunk_freelist_unlock (fss); - return 0; + if (!(old_head & FS_CL_HEAD_MASK)) + return 0; + c = fs_chunk_ptr (fsh, old_head & FS_CL_HEAD_MASK); + new_head = c->next + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK); } - - c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]); - fss->free_chunks[fl_index] = c->next; - - fss_chunk_freelist_unlock (fss); + while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head, + &new_head, 0 /* weak */, __ATOMIC_RELEASE, + __ATOMIC_ACQUIRE)); return c; } @@ -275,7 +295,7 @@ fss_fl_chunk_bytes_sub (fifo_segment_slice_t * fss, uword size) int fifo_segment_init (fifo_segment_t * fs) { - u32 align = 8, offset = 2 * 4096, slices_sz, i; + u32 align = 8, offset = FIFO_SEGMENT_ALLOC_OVERHEAD, slices_sz, i; uword max_fifo, seg_start, seg_sz; fifo_segment_header_t *fsh; ssvm_shared_header_t *sh; @@ -678,7 +698,8 @@ free_list: if (data_bytes <= fss_fl_chunk_bytes (fss) + n_free) { u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE; - + if (n_free < min_size) + goto done; batch = (data_bytes - fss_fl_chunk_bytes (fss)) / min_size; batch = clib_min (batch + 1, n_free / min_size); if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch)) @@ -883,6 +904,14 @@ fifo_segment_alloc_fifo_w_offset (fifo_segment_t *fs, uword offset) return f; } +svm_fifo_t * +fifo_segment_duplicate_fifo (fifo_segment_t *fs, svm_fifo_t *f) +{ + svm_fifo_t *nf = fs_fifo_alloc (fs, 0); + clib_memcpy (nf, f, sizeof (*f)); + return nf; +} + /** * Free fifo allocated in fifo segment */ @@ -1022,6 +1051,30 @@ fifo_segment_fifo_offset (svm_fifo_t *f) return (u8 *) f->shr - (u8 *) f->fs_hdr; } +svm_fifo_chunk_t * +fifo_segment_alloc_chunk_w_slice (fifo_segment_t *fs, u32 slice_index, + u32 chunk_size) +{ + fifo_segment_header_t *fsh = fs->h; + fifo_segment_slice_t *fss; + + fss = fsh_slice_get (fsh, slice_index); + return fsh_try_alloc_chunk (fsh, fss, chunk_size); +} + +void +fifo_segment_collect_chunk (fifo_segment_t *fs, u32 slice_index, + svm_fifo_chunk_t *c) +{ + fsh_collect_chunks (fs->h, slice_index, c); +} + +uword +fifo_segment_chunk_offset (fifo_segment_t *fs, svm_fifo_chunk_t *c) +{ + return (u8 *) c - (u8 *) fs->h; +} + svm_msg_q_t * fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index, svm_msg_q_cfg_t *cfg) @@ -1271,7 +1324,7 @@ fs_slice_num_free_chunks (fifo_segment_header_t *fsh, { for (i = 0; i < FS_CHUNK_VEC_LEN; i++) { - c = fs_chunk_ptr (fsh, fss->free_chunks[i]); + c = fss_chunk_free_list_head (fsh, fss, i); if (c == 0) continue; @@ -1290,7 +1343,7 @@ fs_slice_num_free_chunks (fifo_segment_header_t *fsh, if (fl_index >= FS_CHUNK_VEC_LEN) return 0; - c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]); + c = fss_chunk_free_list_head (fsh, fss, fl_index); if (c == 0) return 0; @@ -1324,16 +1377,16 @@ fifo_segment_size (fifo_segment_t * fs) return fs->h->max_byte_index - fs->h->n_reserved_bytes; } -u8 -fsh_has_reached_mem_limit (fifo_segment_header_t * fsh) +static u8 +fs_has_reached_mem_limit (fifo_segment_t *fs) { - return (fsh->flags & FIFO_SEGMENT_F_MEM_LIMIT) ? 1 : 0; + return (fs->flags & FIFO_SEGMENT_F_MEM_LIMIT) ? 1 : 0; } -void -fsh_reset_mem_limit (fifo_segment_header_t * fsh) +static void +fs_reset_mem_limit (fifo_segment_t *fs) { - fsh->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT; + fs->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT; } void * @@ -1408,26 +1461,26 @@ fifo_segment_get_mem_usage (fifo_segment_t * fs) } fifo_segment_mem_status_t -fifo_segment_determine_status (fifo_segment_header_t * fsh, u8 usage) +fifo_segment_determine_status (fifo_segment_t *fs, u8 usage) { - if (!fsh->high_watermark || !fsh->low_watermark) + if (!fs->high_watermark || !fs->low_watermark) return MEMORY_PRESSURE_NO_PRESSURE; /* once the no-memory is detected, the status continues * until memory usage gets below the high watermark */ - if (fsh_has_reached_mem_limit (fsh)) + if (fs_has_reached_mem_limit (fs)) { - if (usage >= fsh->high_watermark) + if (usage >= fs->high_watermark) return MEMORY_PRESSURE_NO_MEMORY; else - fsh_reset_mem_limit (fsh); + fs_reset_mem_limit (fs); } - if (usage >= fsh->high_watermark) + if (usage >= fs->high_watermark) return MEMORY_PRESSURE_HIGH_PRESSURE; - else if (usage >= fsh->low_watermark) + else if (usage >= fs->low_watermark) return MEMORY_PRESSURE_LOW_PRESSURE; return MEMORY_PRESSURE_NO_PRESSURE; @@ -1436,10 +1489,9 @@ fifo_segment_determine_status (fifo_segment_header_t * fsh, u8 usage) fifo_segment_mem_status_t fifo_segment_get_mem_status (fifo_segment_t * fs) { - fifo_segment_header_t *fsh = fs->h; u8 usage = fifo_segment_get_mem_usage (fs); - return fifo_segment_determine_status (fsh, usage); + return fifo_segment_determine_status (fs, usage); } u8 * @@ -1519,7 +1571,7 @@ format_fifo_segment (u8 * s, va_list * args) fss = fsh_slice_get (fsh, slice_index); for (i = 0; i < FS_CHUNK_VEC_LEN; i++) { - c = fs_chunk_ptr (fsh, fss->free_chunks[i]); + c = fss_chunk_free_list_head (fsh, fss, i); if (c == 0 && fss->num_chunks[i] == 0) continue; count = 0; @@ -1546,7 +1598,7 @@ format_fifo_segment (u8 * s, va_list * args) in_use = fifo_segment_size (fs) - est_free_seg_bytes - tracked_cached_bytes; usage = (100.0 * in_use) / allocated; mem_st = fifo_segment_get_mem_status (fs); - virt = fsh_virtual_mem (fsh); + virt = fs_virtual_mem (fs); reserved = fsh->n_reserved_bytes; s = format (s, "\n%Useg free bytes: %U (%lu) estimated: %U (%lu) reserved:"