X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fsvm%2Ffifo_segment.c;h=0c3a79a613b009db039d181e26ba4e2607885552;hb=b462418890240b2e38dbf522f9dd0196b79e0fa8;hp=a5ffd4af2ed0d9ea0d1ae1f975323cecccbcaaae;hpb=8c4fa01d1360cd5315e671de96dfeff7dae246f5;p=vpp.git diff --git a/src/svm/fifo_segment.c b/src/svm/fifo_segment.c index a5ffd4af2ed..0c3a79a613b 100644 --- a/src/svm/fifo_segment.c +++ b/src/svm/fifo_segment.c @@ -14,6 +14,37 @@ */ #include +#include + +static inline void * +fsh_alloc_aligned (fifo_segment_header_t *fsh, uword size, uword align) +{ + uword cur_pos, cur_pos_align, new_pos; + + cur_pos = clib_atomic_load_relax_n (&fsh->byte_index); + cur_pos_align = round_pow2_u64 (cur_pos, align); + size = round_pow2_u64 (size, align); + new_pos = cur_pos_align + size; + + if (new_pos >= fsh->max_byte_index) + return 0; + + while (!clib_atomic_cmp_and_swap_acq_relax (&fsh->byte_index, &cur_pos, + &new_pos, 1 /* weak */)) + { + cur_pos_align = round_pow2_u64 (cur_pos, align); + new_pos = cur_pos_align + size; + if (new_pos >= fsh->max_byte_index) + return 0; + } + return uword_to_pointer ((u8 *) fsh + cur_pos_align, void *); +} + +static inline void * +fsh_alloc (fifo_segment_header_t *fsh, uword size) +{ + return fsh_alloc_aligned (fsh, size, 8); +} static inline fifo_segment_slice_t * fsh_slice_get (fifo_segment_header_t * fsh, u32 slice_index) @@ -21,55 +52,35 @@ fsh_slice_get (fifo_segment_header_t * fsh, u32 slice_index) return &fsh->slices[slice_index]; } +static inline fifo_slice_private_t * +fs_slice_private_get (fifo_segment_t *fs, u32 slice_index) +{ + ASSERT (slice_index < fs->n_slices); + return &fs->slices[slice_index]; +} + static char *fifo_segment_mem_status_strings[] = { #define _(sym,str) str, foreach_segment_mem_status #undef _ }; -/** - * Fifo segment free space - * - * Queries the underlying memory manager, dlmalloc, for free space. Since this - * ends up walking the internal data structures, it should not be called - * indiscriminately. - * - * @param fs fifo segment - * @return number of free bytes - */ -static uword -fsh_free_space (fifo_segment_header_t * fsh) -{ - return clib_mem_get_heap_free_space (fsh->ssvm_sh->heap); -} - -static inline void -fsh_free_bytes_sub (fifo_segment_header_t * fsh, int size) -{ - clib_atomic_fetch_sub_rel (&fsh->n_free_bytes, size); -} - static inline uword fsh_n_free_bytes (fifo_segment_header_t * fsh) { - uword n_free = clib_atomic_load_relax_n (&fsh->n_free_bytes); - return n_free > fsh->n_reserved_bytes ? n_free - fsh->n_reserved_bytes : 0; -} - -static inline void -fsh_update_free_bytes (fifo_segment_header_t * fsh) -{ - clib_atomic_store_rel_n (&fsh->n_free_bytes, fsh_free_space (fsh)); + uword cur_pos = clib_atomic_load_relax_n (&fsh->byte_index); + ASSERT (fsh->max_byte_index > cur_pos); + return fsh->max_byte_index - cur_pos; } static inline void -fsh_cached_bytes_add (fifo_segment_header_t * fsh, int size) +fsh_cached_bytes_add (fifo_segment_header_t * fsh, uword size) { clib_atomic_fetch_add_rel (&fsh->n_cached_bytes, size); } static inline void -fsh_cached_bytes_sub (fifo_segment_header_t * fsh, int size) +fsh_cached_bytes_sub (fifo_segment_header_t * fsh, uword size) { clib_atomic_fetch_sub_rel (&fsh->n_cached_bytes, size); } @@ -116,21 +127,120 @@ fsh_virtual_mem_update (fifo_segment_header_t * fsh, u32 slice_index, fss->virtual_mem += n_bytes; } +static inline void +fss_chunk_freelist_lock (fifo_segment_slice_t *fss) +{ + u32 free = 0; + while (!clib_atomic_cmp_and_swap_acq_relax_n (&fss->chunk_lock, &free, 1, 0)) + { + /* atomic load limits number of compare_exchange executions */ + while (clib_atomic_load_relax_n (&fss->chunk_lock)) + CLIB_PAUSE (); + /* on failure, compare_exchange writes (*p)->lock into free */ + free = 0; + } +} + +static inline void +fss_chunk_freelist_unlock (fifo_segment_slice_t *fss) +{ + /* Make sure all reads/writes are complete before releasing the lock */ + clib_atomic_release (&fss->chunk_lock); +} + +static inline int +fss_chunk_fl_index_is_valid (fifo_segment_slice_t * fss, u32 fl_index) +{ + return (fl_index < FS_CHUNK_VEC_LEN); +} + static void -fsh_check_mem (fifo_segment_header_t * fsh) +fss_chunk_free_list_push (fifo_segment_header_t *fsh, + fifo_segment_slice_t *fss, u32 fl_index, + svm_fifo_chunk_t *c) { - uword thresh; + fss_chunk_freelist_lock (fss); + c->next = fss->free_chunks[fl_index]; + fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, c); + fss_chunk_freelist_unlock (fss); +} - if (fsh->flags & FIFO_SEGMENT_F_MEM_LIMIT) - return; +static void +fss_chunk_free_list_push_list (fifo_segment_header_t *fsh, + fifo_segment_slice_t *fss, u32 fl_index, + svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail) +{ + fss_chunk_freelist_lock (fss); + tail->next = fss->free_chunks[fl_index]; + fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, head); + fss_chunk_freelist_unlock (fss); +} - thresh = clib_max (0.01 * fsh->ssvm_sh->ssvm_size, - 2 * fsh->n_reserved_bytes); - if (fsh->n_free_bytes > thresh) - return; +static svm_fifo_chunk_t * +fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss, + u32 fl_index) +{ + svm_fifo_chunk_t *c; + + ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index)); - fsh->flags |= FIFO_SEGMENT_F_MEM_LIMIT; - fsh_update_free_bytes (fsh); + fss_chunk_freelist_lock (fss); + + if (!fss->free_chunks[fl_index]) + { + fss_chunk_freelist_unlock (fss); + return 0; + } + + c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]); + fss->free_chunks[fl_index] = c->next; + + fss_chunk_freelist_unlock (fss); + + return c; +} + +static inline void +pfss_fifo_add_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f) +{ + if (pfss->active_fifos) + { + pfss->active_fifos->prev = f; + f->next = pfss->active_fifos; + } + pfss->active_fifos = f; +} + +static inline void +pfss_fifo_del_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f) +{ + if (f->flags & SVM_FIFO_F_LL_TRACKED) + { + if (f->prev) + f->prev->next = f->next; + else + pfss->active_fifos = f->next; + if (f->next) + f->next->prev = f->prev; + } +} + +static inline uword +fss_fl_chunk_bytes (fifo_segment_slice_t * fss) +{ + return clib_atomic_load_relax_n (&fss->n_fl_chunk_bytes); +} + +static inline void +fss_fl_chunk_bytes_add (fifo_segment_slice_t * fss, uword size) +{ + clib_atomic_fetch_add_relax (&fss->n_fl_chunk_bytes, size); +} + +static inline void +fss_fl_chunk_bytes_sub (fifo_segment_slice_t * fss, uword size) +{ + clib_atomic_fetch_sub_relax (&fss->n_fl_chunk_bytes, size); } /** @@ -139,54 +249,42 @@ fsh_check_mem (fifo_segment_header_t * fsh) int fifo_segment_init (fifo_segment_t * fs) { + u32 align = 8, offset = 2 * 4096, slices_sz, i; + uword max_fifo, seg_start, seg_sz; fifo_segment_header_t *fsh; - fifo_segment_slice_t *fss; ssvm_shared_header_t *sh; - u32 max_chunk_sz; - uword max_fifo; - void *oldheap; - int i; + void *seg_data; + /* TODO remove ssvm heap entirely */ sh = fs->ssvm.sh; - oldheap = ssvm_push_heap (sh); - /* - * Manually align the fifo segment header to sizeof(uword) = 8 bytes. - * Long story made short: the "process-private" fifo segment - * is allocated from the main heap, not mmapped. dlmalloc - * only guarantees 4-byte alignment, and on aarch64 - * the fsh can end up 4-byte but not 8-byte aligned. - * That eventually causes the atomic op in fifo_segment_update_free_bytes - * to backfire. - */ - fsh = clib_mem_alloc_aligned (sizeof (*fsh), sizeof (uword)); - clib_memset (fsh, 0, sizeof (*fsh)); - fs->h = sh->opaque[0] = fsh; + seg_data = (u8 *) sh + offset; + seg_sz = sh->ssvm_size - offset; + fs->n_slices = clib_max (fs->n_slices, 1); + slices_sz = sizeof (fifo_segment_slice_t) * fs->n_slices; + + seg_start = round_pow2_u64 (pointer_to_uword (seg_data), align); + fsh = uword_to_pointer (seg_start, void *); + memset (fsh, 0, sizeof (*fsh) + slices_sz); - fsh->ssvm_sh = fs->ssvm.sh; + fsh->byte_index = sizeof (*fsh) + slices_sz; + fsh->max_byte_index = seg_sz; fsh->n_slices = fs->n_slices; - max_fifo = clib_min ((fsh_free_space (fsh) - 4096) / 2, - FIFO_SEGMENT_MAX_FIFO_SIZE); - fsh->max_log2_chunk_size = max_log2 (max_fifo); + max_fifo = clib_min ((seg_sz - slices_sz) / 2, FIFO_SEGMENT_MAX_FIFO_SIZE); + fsh->max_log2_fifo_size = min_log2 (max_fifo); + fsh->n_cached_bytes = 0; + fsh->n_reserved_bytes = fsh->byte_index; + ASSERT (fsh->max_byte_index <= sh->ssvm_size - offset); - fsh->slices = clib_mem_alloc (sizeof (*fss) * fs->n_slices); - clib_memset (fsh->slices, 0, sizeof (*fss) * fs->n_slices); - max_chunk_sz = fsh->max_log2_chunk_size - FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE; + fs->max_byte_index = fsh->max_byte_index; + fs->h = sh->opaque[0] = fsh; + vec_validate (fs->slices, fs->n_slices - 1); for (i = 0; i < fs->n_slices; i++) - { - fss = fsh_slice_get (fsh, i); - vec_validate_init_empty (fss->free_chunks, max_chunk_sz, 0); - vec_validate_init_empty (fss->num_chunks, max_chunk_sz, 0); - clib_spinlock_init (&fss->chunk_lock); - } + fs->slices[i].fifos = + clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32); - ssvm_pop_heap (oldheap); - - fsh->n_free_bytes = fsh_free_space (fsh); - fsh->n_cached_bytes = 0; - fsh->n_reserved_bytes = clib_min (0.01 * fsh->n_free_bytes, 256 << 10); sh->ready = 1; return (0); } @@ -231,6 +329,7 @@ fifo_segment_create (fifo_segment_main_t * sm, fifo_segment_create_args_t * a) int fifo_segment_attach (fifo_segment_main_t * sm, fifo_segment_create_args_t * a) { + fifo_segment_header_t *fsh; fifo_segment_t *fs; int rv; @@ -252,8 +351,18 @@ fifo_segment_attach (fifo_segment_main_t * sm, fifo_segment_create_args_t * a) } /* Fish the segment header */ - fs->h = fs->ssvm.sh->opaque[0]; + fsh = fs->h = fs->ssvm.sh->opaque[0]; + /* Probably a segment without fifos */ + if (!fsh) + goto done; + + fs->max_byte_index = fsh->max_byte_index; + vec_validate (fs->slices, 0); + fs->slices[0].fifos = + clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32); + +done: vec_add1 (a->new_segment_indices, fs - sm->segments); return (0); } @@ -261,6 +370,7 @@ fifo_segment_attach (fifo_segment_main_t * sm, fifo_segment_create_args_t * a) void fifo_segment_delete (fifo_segment_main_t * sm, fifo_segment_t * s) { + fifo_segment_cleanup (s); ssvm_delete (&s->ssvm); clib_memset (s, 0xfe, sizeof (*s)); pool_put (sm->segments, s); @@ -298,7 +408,8 @@ fs_freelist_for_size (u32 size) { if (PREDICT_FALSE (size < FIFO_SEGMENT_MIN_FIFO_SIZE)) return 0; - return max_log2 (size) - FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE; + return clib_min (max_log2 (size) - FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE, + FS_CHUNK_VEC_LEN - 1); } static inline u32 @@ -314,32 +425,8 @@ fs_chunk_size_is_valid (fifo_segment_header_t * fsh, u32 size) * 4K minimum. It's not likely that anything good will happen * with a smaller FIFO. */ - return size >= FIFO_SEGMENT_MIN_FIFO_SIZE - && size <= (1ULL << fsh->max_log2_chunk_size); -} - -static svm_fifo_t * -fs_try_alloc_fifo_freelist (fifo_segment_slice_t * fss, u32 fl_index) -{ - svm_fifo_chunk_t *c; - svm_fifo_t *f; - - f = fss->free_fifos; - c = fss->free_chunks[fl_index]; - - if (!f || !c) - return 0; - - fss->free_fifos = f->next; - fss->free_chunks[fl_index] = c->next; - c->next = 0; - c->start_byte = 0; - memset (f, 0, sizeof (*f)); - f->start_chunk = c; - f->end_chunk = c; - - fss->n_fl_chunk_bytes -= fs_freelist_index_to_size (fl_index); - return f; + return size >= FIFO_SEGMENT_MIN_FIFO_SIZE && + size <= (1ULL << fsh->max_log2_fifo_size); } svm_fifo_chunk_t * @@ -357,11 +444,10 @@ fs_try_alloc_multi_chunk (fifo_segment_header_t * fsh, while (req_bytes) { - c = fss->free_chunks[fl_index]; + c = fss_chunk_free_list_pop (fsh, fss, fl_index); if (c) { - fss->free_chunks[fl_index] = c->next; - c->next = first; + c->next = fs_chunk_sptr (fsh, first); first = c; n_alloc += fl_size; req_bytes -= clib_min (fl_size, req_bytes); @@ -371,28 +457,29 @@ fs_try_alloc_multi_chunk (fifo_segment_header_t * fsh, /* Failed to allocate with smaller chunks */ if (fl_index == 0) { - /* free all chunks if any allocated */ + /* Free all chunks if any allocated */ c = first; while (c) { fl_index = fs_freelist_for_size (c->length); - fl_size = fs_freelist_index_to_size (fl_index); - next = c->next; - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = c; - fss->n_fl_chunk_bytes += fl_size; + next = fs_chunk_ptr (fsh, c->next); + fss_chunk_free_list_push (fsh, fss, fl_index, c); c = next; } n_alloc = 0; first = 0; - fl_index = fs_freelist_for_size (data_bytes); - if (fss->free_chunks[fl_index + 1]) + /* As last attempt, try allocating a chunk larger than + * the requested size, if possible */ + fl_index = fs_freelist_for_size (data_bytes) + 1; + if (!fss_chunk_fl_index_is_valid (fss, fl_index)) + return 0; + first = fss_chunk_free_list_pop (fsh, fss, fl_index); + if (first) { - fl_index += 1; - fl_size = fs_freelist_index_to_size (fl_index); - continue; + first->next = 0; + n_alloc = fs_freelist_index_to_size (fl_index); + goto done; } - return 0; } fl_index -= 1; @@ -400,100 +487,40 @@ fs_try_alloc_multi_chunk (fifo_segment_header_t * fsh, } } - fss->n_fl_chunk_bytes -= n_alloc; +done: + fss_fl_chunk_bytes_sub (fss, n_alloc); fsh_cached_bytes_sub (fsh, n_alloc); return first; } -static svm_fifo_t * -fs_try_alloc_fifo_freelist_multi_chunk (fifo_segment_header_t * fsh, - fifo_segment_slice_t * fss, - u32 data_bytes) +static int +fsh_try_alloc_fifo_hdr_batch (fifo_segment_header_t * fsh, + fifo_segment_slice_t * fss, u32 batch_size) { - svm_fifo_chunk_t *c, *first = 0, *last = 0, *next; - u32 fl_index, fl_size, n_alloc = 0; - svm_fifo_t *f; + svm_fifo_shared_t *f; + uword size; + u8 *fmem; + int i; - f = fss->free_fifos; - if (!f) - { - if (PREDICT_FALSE (fsh_n_free_bytes (fsh) < sizeof (svm_fifo_t))) - return 0; + ASSERT (batch_size != 0); - void *oldheap = ssvm_push_heap (fsh->ssvm_sh); - f = clib_mem_alloc_aligned_or_null (sizeof (*f), CLIB_CACHE_LINE_BYTES); - ssvm_pop_heap (oldheap); - if (!f) - return 0; - memset (f, 0, sizeof (*f)); - fsh_free_bytes_sub (fsh, sizeof (*f)); - } - else - { - fss->free_fifos = f->next; - } + size = (uword) sizeof (*f) * batch_size; - fl_index = fs_freelist_for_size (data_bytes); - if (fl_index > 0) - fl_index -= 1; - - fl_size = fs_freelist_index_to_size (fl_index); + fmem = fsh_alloc_aligned (fsh, size, CLIB_CACHE_LINE_BYTES); + if (fmem == 0) + return -1; - while (data_bytes) + /* Carve fifo hdr space */ + for (i = 0; i < batch_size; i++) { - c = fss->free_chunks[fl_index]; - if (c) - { - fss->free_chunks[fl_index] = c->next; - if (!last) - last = c; - c->next = first; - first = c; - n_alloc += fl_size; - data_bytes -= clib_min (fl_size, data_bytes); - } - else - { - /* Failed to allocate with smaller chunks */ - if (fl_index == 0) - { - /* free all chunks if any allocated */ - c = first; - while (c) - { - fl_index = fs_freelist_for_size (c->length); - fl_size = fs_freelist_index_to_size (fl_index); - next = c->next; - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = c; - fss->n_fl_chunk_bytes += fl_size; - n_alloc -= fl_size; - data_bytes += fl_size; - c = next; - } - first = last = 0; - fl_index = fs_freelist_for_size (data_bytes); - if (fss->free_chunks[fl_index + 1]) - { - fl_index += 1; - fl_size = fs_freelist_index_to_size (fl_index); - continue; - } - - f->next = fss->free_fifos; - fss->free_fifos = f; - return 0; - } - fl_index -= 1; - fl_size = fl_size >> 1; - } + f = (svm_fifo_shared_t *) fmem; + memset (f, 0, sizeof (*f)); + f->next = fss->free_fifos; + fss->free_fifos = f; + fmem += sizeof (*f); } - f->start_chunk = first; - f->end_chunk = last; - fss->n_fl_chunk_bytes -= n_alloc; - fsh_cached_bytes_sub (fsh, n_alloc); - return f; + return 0; } static int @@ -501,43 +528,38 @@ fsh_try_alloc_chunk_batch (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss, u32 fl_index, u32 batch_size) { + svm_fifo_chunk_t *c, *head = 0, *tail; + uword size, total_chunk_bytes; u32 rounded_data_size; - svm_fifo_chunk_t *c; - void *oldheap; - uword size; u8 *cmem; int i; + ASSERT (batch_size != 0); + rounded_data_size = fs_freelist_index_to_size (fl_index); + total_chunk_bytes = (uword) batch_size *rounded_data_size; size = (uword) (sizeof (*c) + rounded_data_size) * batch_size; - oldheap = ssvm_push_heap (fsh->ssvm_sh); - cmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES, - 0 /* align_offset */ , - 0 /* os_out_of_memory */ ); - ssvm_pop_heap (oldheap); - - /* Out of space.. */ + cmem = fsh_alloc_aligned (fsh, size, 8 /* chunk hdr is 24B */); if (cmem == 0) return -1; /* Carve fifo + chunk space */ + tail = c = (svm_fifo_chunk_t *) cmem; for (i = 0; i < batch_size; i++) { - c = (svm_fifo_chunk_t *) cmem; c->start_byte = 0; c->length = rounded_data_size; - c->enq_rb_index = RBTREE_TNIL_INDEX; - c->deq_rb_index = RBTREE_TNIL_INDEX; - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = c; + c->next = fs_chunk_sptr (fsh, head); + head = c; cmem += sizeof (*c) + rounded_data_size; + c = (svm_fifo_chunk_t *) cmem; } + fss_chunk_free_list_push_list (fsh, fss, fl_index, head, tail); fss->num_chunks[fl_index] += batch_size; - fss->n_fl_chunk_bytes += batch_size * rounded_data_size; - fsh_cached_bytes_add (fsh, batch_size * rounded_data_size); - fsh_free_bytes_sub (fsh, size); + fss_fl_chunk_bytes_add (fss, total_chunk_bytes); + fsh_cached_bytes_add (fsh, total_chunk_bytes); return 0; } @@ -547,224 +569,142 @@ fs_try_alloc_fifo_batch (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss, u32 fl_index, u32 batch_size) { - u32 hdrs, rounded_data_size; - svm_fifo_chunk_t *c; - svm_fifo_t *f; - void *oldheap; - uword size; - u8 *fmem; - int i; - - rounded_data_size = fs_freelist_index_to_size (fl_index); - hdrs = sizeof (*f) + sizeof (*c); - size = (uword) (hdrs + rounded_data_size) * batch_size; - - oldheap = ssvm_push_heap (fsh->ssvm_sh); - fmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES, - 0 /* align_offset */ , - 0 /* os_out_of_memory */ ); - ssvm_pop_heap (oldheap); - - /* Out of space.. */ - if (fmem == 0) - return -1; - - /* Carve fifo hdr space */ - for (i = 0; i < batch_size; i++) - { - f = (svm_fifo_t *) fmem; - memset (f, 0, sizeof (*f)); - f->next = fss->free_fifos; - fss->free_fifos = f; - fmem += sizeof (*f); - } - - /* Carve chunk space */ - for (i = 0; i < batch_size; i++) - { - c = (svm_fifo_chunk_t *) fmem; - c->start_byte = 0; - c->length = rounded_data_size; - c->enq_rb_index = RBTREE_TNIL_INDEX; - c->deq_rb_index = RBTREE_TNIL_INDEX; - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = c; - fmem += sizeof (svm_fifo_chunk_t) + rounded_data_size; - } - - fss->num_chunks[fl_index] += batch_size; - fss->n_fl_chunk_bytes += batch_size * rounded_data_size; - fsh_cached_bytes_add (fsh, batch_size * rounded_data_size); - fsh_free_bytes_sub (fsh, size); - - return 0; + if (fsh_try_alloc_fifo_hdr_batch (fsh, fss, batch_size)) + return 0; + return fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch_size); } -/** - * Try to allocate new fifo - * - * Tries the following steps in order: - * - grab fifo and chunk from freelists - * - batch fifo and chunk allocation - * - single fifo allocation - * - grab multiple fifo chunks from freelists - */ -static svm_fifo_t * -fs_try_alloc_fifo (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss, - u32 data_bytes) +static svm_fifo_shared_t * +fsh_try_alloc_fifo_hdr (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss) { - u32 fifo_sz, fl_index; - svm_fifo_t *f = 0; - uword n_free_bytes; - u32 min_size; - - min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096); - fl_index = fs_freelist_for_size (min_size); + svm_fifo_shared_t *f; - if (fl_index >= vec_len (fss->free_chunks)) - return 0; - - clib_spinlock_lock (&fss->chunk_lock); - - if (fss->free_fifos && fss->free_chunks[fl_index]) + if (!fss->free_fifos) { - f = fs_try_alloc_fifo_freelist (fss, fl_index); - if (f) - { - fsh_cached_bytes_sub (fsh, fs_freelist_index_to_size (fl_index)); - goto done; - } - } - - fifo_sz = sizeof (svm_fifo_t) + sizeof (svm_fifo_chunk_t); - fifo_sz += 1 << max_log2 (min_size); - n_free_bytes = fsh_n_free_bytes (fsh); - - if (fifo_sz * FIFO_SEGMENT_ALLOC_BATCH_SIZE < n_free_bytes) - { - if (!fs_try_alloc_fifo_batch (fsh, fss, fl_index, - FIFO_SEGMENT_ALLOC_BATCH_SIZE)) - { - f = fs_try_alloc_fifo_freelist (fss, fl_index); - if (f) - { - fsh_cached_bytes_sub (fsh, - fs_freelist_index_to_size (fl_index)); - goto done; - } - } - else - { - fsh_check_mem (fsh); - n_free_bytes = fsh_n_free_bytes (fsh); - } - } - if (fifo_sz <= n_free_bytes) - { - void *oldheap = ssvm_push_heap (fsh->ssvm_sh); - f = svm_fifo_alloc (min_size); - ssvm_pop_heap (oldheap); - if (f) - { - clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1); - fsh_free_bytes_sub (fsh, fifo_sz); - goto done; - } - fsh_check_mem (fsh); + if (fsh_try_alloc_fifo_hdr_batch (fsh, fss, + FIFO_SEGMENT_ALLOC_BATCH_SIZE)) + return 0; } - /* All failed, try to allocate min of data bytes and fifo sz */ - fifo_sz = clib_min (fifo_sz, data_bytes); - if (fifo_sz <= fss->n_fl_chunk_bytes) - f = fs_try_alloc_fifo_freelist_multi_chunk (fsh, fss, fifo_sz); -done: - clib_spinlock_unlock (&fss->chunk_lock); - - if (f) - { - f->size = data_bytes; - f->fs_hdr = fsh; - } + f = fss->free_fifos; + fss->free_fifos = f->next; + memset (f, 0, sizeof (*f)); return f; } -svm_fifo_chunk_t * -fsh_alloc_chunk (fifo_segment_header_t * fsh, u32 slice_index, u32 chunk_size) +static svm_fifo_chunk_t * +fsh_try_alloc_chunk (fifo_segment_header_t * fsh, + fifo_segment_slice_t * fss, u32 data_bytes) { - fifo_segment_slice_t *fss; svm_fifo_chunk_t *c; - int fl_index; - - fl_index = fs_freelist_for_size (chunk_size); - fss = fsh_slice_get (fsh, slice_index); - - clib_spinlock_lock (&fss->chunk_lock); + u32 fl_index; - ASSERT (vec_len (fss->free_chunks) > fl_index); - c = fss->free_chunks[fl_index]; + fl_index = fs_freelist_for_size (data_bytes); +free_list: + c = fss_chunk_free_list_pop (fsh, fss, fl_index); if (c) { - fss->free_chunks[fl_index] = c->next; c->next = 0; - fss->n_fl_chunk_bytes -= fs_freelist_index_to_size (fl_index); + fss_fl_chunk_bytes_sub (fss, fs_freelist_index_to_size (fl_index)); fsh_cached_bytes_sub (fsh, fs_freelist_index_to_size (fl_index)); } else { - void *oldheap; + u32 chunk_size, batch = FIFO_SEGMENT_ALLOC_BATCH_SIZE; uword n_free; - u32 batch; chunk_size = fs_freelist_index_to_size (fl_index); n_free = fsh_n_free_bytes (fsh); if (chunk_size <= n_free) { - oldheap = ssvm_push_heap (fsh->ssvm_sh); - c = svm_fifo_chunk_alloc (chunk_size); - ssvm_pop_heap (oldheap); - - if (c) - { - clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1); - fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c)); - goto done; - } - - fsh_check_mem (fsh); - n_free = fsh_n_free_bytes (fsh); + batch = chunk_size * batch <= n_free ? batch : 1; + if (!fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch)) + goto free_list; } - if (chunk_size <= fss->n_fl_chunk_bytes) + /* Failed to allocate larger chunk, try to allocate multi-chunk + * that is close to what was actually requested */ + if (data_bytes <= fss_fl_chunk_bytes (fss)) { - c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size); + c = fs_try_alloc_multi_chunk (fsh, fss, data_bytes); if (c) goto done; batch = n_free / FIFO_SEGMENT_MIN_FIFO_SIZE; if (!batch || fsh_try_alloc_chunk_batch (fsh, fss, 0, batch)) - { - fsh_check_mem (fsh); - goto done; - } + goto done; } - if (chunk_size <= fss->n_fl_chunk_bytes + n_free) + if (data_bytes <= fss_fl_chunk_bytes (fss) + n_free) { u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE; - batch = (chunk_size - fss->n_fl_chunk_bytes) / min_size; + batch = (data_bytes - fss_fl_chunk_bytes (fss)) / min_size; batch = clib_min (batch + 1, n_free / min_size); if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch)) - { - fsh_check_mem (fsh); - goto done; - } - c = fs_try_alloc_multi_chunk (fsh, fss, chunk_size); + goto done; + c = fs_try_alloc_multi_chunk (fsh, fss, data_bytes); } } done: - clib_spinlock_unlock (&fss->chunk_lock); + return c; +} + +/** + * Try to allocate new fifo + * + * Tries the following steps in order: + * - grab fifo and chunk from freelists + * - batch fifo and chunk allocation + * - single fifo allocation + * - grab multiple fifo chunks from freelists + */ +static svm_fifo_shared_t * +fs_try_alloc_fifo (fifo_segment_header_t *fsh, u32 slice_index, u32 data_bytes) +{ + fifo_segment_slice_t *fss; + u32 fl_index, min_size; + svm_fifo_chunk_t *c; + svm_fifo_shared_t *sf = 0; + + fss = fsh_slice_get (fsh, slice_index); + min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096); + fl_index = fs_freelist_for_size (min_size); + + if (!fss_chunk_fl_index_is_valid (fss, fl_index)) + return 0; + + sf = fsh_try_alloc_fifo_hdr (fsh, fss); + if (!sf) + return 0; + + c = fsh_try_alloc_chunk (fsh, fss, min_size); + if (!c) + { + sf->next = fss->free_fifos; + fss->free_fifos = sf; + return 0; + } + + sf->start_chunk = fs_chunk_sptr (fsh, c); + while (c->next) + c = fs_chunk_ptr (fsh, c->next); + sf->end_chunk = fs_chunk_sptr (fsh, c); + sf->size = data_bytes; + sf->slice_index = slice_index; + + return sf; +} + +svm_fifo_chunk_t * +fsh_alloc_chunk (fifo_segment_header_t * fsh, u32 slice_index, u32 chunk_size) +{ + fifo_segment_slice_t *fss; + svm_fifo_chunk_t *c; + + fss = fsh_slice_get (fsh, slice_index); + c = fsh_try_alloc_chunk (fsh, fss, chunk_size); return c; } @@ -773,29 +713,21 @@ static void fsh_slice_collect_chunks (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss, svm_fifo_chunk_t * c) { + u32 n_collect = 0, fl_index; svm_fifo_chunk_t *next; - int fl_index; - u32 n_collect = 0; - - clib_spinlock_lock (&fss->chunk_lock); while (c) { CLIB_MEM_UNPOISON (c, sizeof (*c)); - next = c->next; + next = fs_chunk_ptr (fsh, c->next); fl_index = fs_freelist_for_size (c->length); - c->next = fss->free_chunks[fl_index]; - c->enq_rb_index = RBTREE_TNIL_INDEX; - c->deq_rb_index = RBTREE_TNIL_INDEX; - fss->free_chunks[fl_index] = c; + fss_chunk_free_list_push (fsh, fss, fl_index, c); n_collect += fs_freelist_index_to_size (fl_index); c = next; } - fss->n_fl_chunk_bytes += n_collect; + fss_fl_chunk_bytes_add (fss, n_collect); fsh_cached_bytes_add (fsh, n_collect); - - clib_spinlock_unlock (&fss->chunk_lock); } void @@ -807,29 +739,40 @@ fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index, fsh_slice_collect_chunks (fsh, fss, c); } -static inline void -fss_fifo_add_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f) +svm_fifo_t * +fs_fifo_alloc (fifo_segment_t *fs, u32 slice_index) { - if (fss->fifos) - { - fss->fifos->prev = f; - f->next = fss->fifos; - } - fss->fifos = f; + fifo_slice_private_t *pfss = &fs->slices[slice_index]; + svm_fifo_t *f; + + f = clib_mem_bulk_alloc (pfss->fifos); + clib_memset (f, 0, sizeof (*f)); + return f; } -static inline void -fss_fifo_del_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f) +void +fs_fifo_free (fifo_segment_t *fs, svm_fifo_t *f) { - if (f->flags & SVM_FIFO_F_LL_TRACKED) - { - if (f->prev) - f->prev->next = f->next; - else - fss->fifos = f->next; - if (f->next) - f->next->prev = f->prev; - } + u32 slice_index = f->shr->slice_index; + fifo_slice_private_t *pfss; + + pfss = &fs->slices[slice_index]; + clib_mem_bulk_free (pfss->fifos, f); +} + +void +fifo_segment_cleanup (fifo_segment_t *fs) +{ + int slice_index; + svm_msg_q_t *mq = 0; + + for (slice_index = 0; slice_index < fs->n_slices; slice_index++) + clib_mem_bulk_destroy (fs->slices[slice_index].fifos); + + vec_foreach (fs->mqs, mq) + vec_free (mq->rings); + + vec_free (fs->mqs); } /** @@ -840,30 +783,36 @@ fifo_segment_alloc_fifo_w_slice (fifo_segment_t * fs, u32 slice_index, u32 data_bytes, fifo_segment_ftype_t ftype) { fifo_segment_header_t *fsh = fs->h; + fifo_slice_private_t *pfss; fifo_segment_slice_t *fss; + svm_fifo_shared_t *sf; svm_fifo_t *f = 0; ASSERT (slice_index < fs->n_slices); - if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_chunk_size)) + if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_fifo_size)) return 0; - fss = fsh_slice_get (fsh, slice_index); - f = fs_try_alloc_fifo (fsh, fss, data_bytes); - if (!f) + sf = fs_try_alloc_fifo (fsh, slice_index, data_bytes); + if (!sf) goto done; - f->slice_index = slice_index; + f = fs_fifo_alloc (fs, slice_index); + f->fs_hdr = fsh; + f->shr = sf; svm_fifo_init (f, data_bytes); + fss = fsh_slice_get (fsh, slice_index); + pfss = fs_slice_private_get (fs, slice_index); + /* If rx fifo type add to active fifos list. When cleaning up segment, * we need a list of active sessions that should be disconnected. Since * both rx and tx fifos keep pointers to the session, it's enough to track * only one. */ if (ftype == FIFO_SEGMENT_RX_FIFO) { - fss_fifo_add_active_list (fss, f); + pfss_fifo_add_active_list (pfss, f); f->flags |= SVM_FIFO_F_LL_TRACKED; } @@ -874,6 +823,19 @@ done: return (f); } +svm_fifo_t * +fifo_segment_alloc_fifo_w_shared (fifo_segment_t *fs, svm_fifo_shared_t *sf) +{ + svm_fifo_t *f = fs_fifo_alloc (fs, 0); + f->fs_hdr = fs->h; + f->shr = sf; + + f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + f->segment_index = SVM_FIFO_INVALID_INDEX; + f->refcnt = 1; + return f; +} + /** * Free fifo allocated in fifo segment */ @@ -881,44 +843,59 @@ void fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f) { fifo_segment_header_t *fsh = fs->h; + fifo_slice_private_t *pfss; fifo_segment_slice_t *fss; + svm_fifo_shared_t *sf; ASSERT (f->refcnt > 0); if (--f->refcnt > 0) return; - fss = fsh_slice_get (fsh, f->slice_index); + /* + * Cleanup shared state + */ + + sf = f->shr; + fss = fsh_slice_get (fsh, sf->slice_index); + pfss = fs_slice_private_get (fs, sf->slice_index); + + /* Free fifo chunks */ + fsh_slice_collect_chunks (fsh, fss, fs_chunk_ptr (fsh, f->shr->start_chunk)); + + sf->start_chunk = sf->end_chunk = 0; + sf->head_chunk = sf->tail_chunk = 0; + + /* Add to free list */ + sf->next = fss->free_fifos; + fss->free_fifos = sf; + + fss->virtual_mem -= svm_fifo_size (f); + + /* + * Cleanup private state + */ /* Remove from active list. Only rx fifos are tracked */ if (f->flags & SVM_FIFO_F_LL_TRACKED) { - fss_fifo_del_active_list (fss, f); + pfss_fifo_del_active_list (pfss, f); f->flags &= ~SVM_FIFO_F_LL_TRACKED; } - /* Free fifo chunks */ - fsh_slice_collect_chunks (fsh, fss, f->start_chunk); - - f->start_chunk = f->end_chunk = 0; - f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = 0; - - /* not allocated on segment heap */ svm_fifo_free_chunk_lookup (f); svm_fifo_free_ooo_data (f); if (CLIB_DEBUG) { - f->master_session_index = ~0; + sf->master_session_index = ~0; f->master_thread_index = ~0; } - fss->virtual_mem -= svm_fifo_size (f); - - /* Add to free list */ - f->next = fss->free_fifos; + f->ooo_enq = f->ooo_deq = 0; f->prev = 0; - fss->free_fifos = f; + + fs_fifo_free (fs, f); fsh_active_fifos_update (fsh, -1); } @@ -926,23 +903,25 @@ fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f) void fifo_segment_detach_fifo (fifo_segment_t * fs, svm_fifo_t * f) { + fifo_slice_private_t *pfss; fifo_segment_slice_t *fss; svm_fifo_chunk_t *c; u32 fl_index; ASSERT (f->refcnt == 1); - fss = fsh_slice_get (fs->h, f->slice_index); + fss = fsh_slice_get (fs->h, f->shr->slice_index); + pfss = fs_slice_private_get (fs, f->shr->slice_index); fss->virtual_mem -= svm_fifo_size (f); if (f->flags & SVM_FIFO_F_LL_TRACKED) - fss_fifo_del_active_list (fss, f); + pfss_fifo_del_active_list (pfss, f); - c = f->start_chunk; + c = fs_chunk_ptr (fs->h, f->shr->start_chunk); while (c) { fl_index = fs_freelist_for_size (c->length); clib_atomic_fetch_sub_rel (&fss->num_chunks[fl_index], 1); - c = c->next; + c = fs_chunk_ptr (fs->h, c->next); } } @@ -950,63 +929,99 @@ void fifo_segment_attach_fifo (fifo_segment_t * fs, svm_fifo_t * f, u32 slice_index) { + fifo_slice_private_t *pfss; fifo_segment_slice_t *fss; svm_fifo_chunk_t *c; u32 fl_index; - f->slice_index = slice_index; - fss = fsh_slice_get (fs->h, f->slice_index); + f->shr->slice_index = slice_index; + fss = fsh_slice_get (fs->h, f->shr->slice_index); + pfss = fs_slice_private_get (fs, f->shr->slice_index); fss->virtual_mem += svm_fifo_size (f); if (f->flags & SVM_FIFO_F_LL_TRACKED) - fss_fifo_add_active_list (fss, f); + pfss_fifo_add_active_list (pfss, f); - c = f->start_chunk; + c = fs_chunk_ptr (fs->h, f->shr->start_chunk); while (c) { fl_index = fs_freelist_for_size (c->length); clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1); - c = c->next; + c = fs_chunk_ptr (fs->h, c->next); } } -int -fifo_segment_prealloc_fifo_hdrs (fifo_segment_t * fs, u32 slice_index, - u32 batch_size) +svm_msg_q_t * +fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index, + svm_msg_q_cfg_t *cfg) { fifo_segment_header_t *fsh = fs->h; - fifo_segment_slice_t *fss; - svm_fifo_t *f; - void *oldheap; - uword size; - u8 *fmem; - int i; + svm_msg_q_shared_t *smq; + svm_msg_q_t *mq; + void *base; + u32 size; - fss = fsh_slice_get (fsh, slice_index); - size = (uword) (sizeof (*f)) * batch_size; + if (!fs->mqs) + { + u32 n_mqs = clib_max (fs->h->n_mqs, 1); + vec_validate (fs->mqs, n_mqs - 1); + } - oldheap = ssvm_push_heap (fsh->ssvm_sh); - fmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES, - 0 /* align_offset */ , - 0 /* os_out_of_memory */ ); - ssvm_pop_heap (oldheap); + size = svm_msg_q_size_to_alloc (cfg); + base = fsh_alloc_aligned (fsh, size, 8); + fsh->n_reserved_bytes += size; - /* Out of space.. */ - if (fmem == 0) - return -1; + smq = svm_msg_q_init (base, cfg); + mq = vec_elt_at_index (fs->mqs, mq_index); + svm_msg_q_attach (mq, smq); - /* Carve fifo + chunk space */ - for (i = 0; i < batch_size; i++) + return mq; +} + +svm_msg_q_t * +fifo_segment_msg_q_attach (fifo_segment_t *fs, uword offset, u32 mq_index) +{ + svm_msg_q_t *mq; + + if (!fs->mqs) { - f = (svm_fifo_t *) fmem; - memset (f, 0, sizeof (*f)); - f->next = fss->free_fifos; - fss->free_fifos = f; - fmem += sizeof (*f); + u32 n_mqs = clib_max (fs->h->n_mqs, 1); + vec_validate (fs->mqs, n_mqs - 1); } - fsh_free_bytes_sub (fsh, size); + mq = vec_elt_at_index (fs->mqs, mq_index); - return 0; + if (!mq->q) + { + svm_msg_q_shared_t *smq; + smq = (svm_msg_q_shared_t *) ((u8 *) fs->h + offset); + svm_msg_q_attach (mq, smq); + } + + ASSERT (fifo_segment_msg_q_offset (fs, mq_index) == offset); + + return mq; +} + +uword +fifo_segment_msg_q_offset (fifo_segment_t *fs, u32 mq_index) +{ + svm_msg_q_t *mq = vec_elt_at_index (fs->mqs, mq_index); + + if (mq->q == 0) + return ~0ULL; + + return (uword) ((u8 *) mq->q - (u8 *) fs->h) - sizeof (svm_msg_q_shared_t); +} + +int +fifo_segment_prealloc_fifo_hdrs (fifo_segment_t * fs, u32 slice_index, + u32 batch_size) +{ + fifo_segment_header_t *fsh = fs->h; + fifo_segment_slice_t *fss; + + fss = fsh_slice_get (fsh, slice_index); + return fsh_try_alloc_fifo_hdr_batch (fsh, fss, batch_size); } int @@ -1014,13 +1029,8 @@ fifo_segment_prealloc_fifo_chunks (fifo_segment_t * fs, u32 slice_index, u32 chunk_size, u32 batch_size) { fifo_segment_header_t *fsh = fs->h; - u32 rounded_data_size, fl_index; fifo_segment_slice_t *fss; - svm_fifo_chunk_t *c; - void *oldheap; - uword size; - u8 *cmem; - int i; + u32 fl_index; if (!fs_chunk_size_is_valid (fsh, chunk_size)) { @@ -1029,38 +1039,9 @@ fifo_segment_prealloc_fifo_chunks (fifo_segment_t * fs, u32 slice_index, } fl_index = fs_freelist_for_size (chunk_size); - rounded_data_size = fs_freelist_index_to_size (fl_index); - size = (uword) (sizeof (*c) + rounded_data_size) * batch_size; - - oldheap = ssvm_push_heap (fsh->ssvm_sh); - cmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES, - 0 /* align_offset */ , - 0 /* os_out_of_memory */ ); - ssvm_pop_heap (oldheap); - - /* Out of space.. */ - if (cmem == 0) - return -1; - fss = fsh_slice_get (fsh, slice_index); - /* Carve fifo + chunk space */ - for (i = 0; i < batch_size; i++) - { - c = (svm_fifo_chunk_t *) cmem; - c->start_byte = 0; - c->length = rounded_data_size; - c->next = fss->free_chunks[fl_index]; - fss->free_chunks[fl_index] = c; - cmem += sizeof (*c) + rounded_data_size; - fsh_cached_bytes_add (fsh, rounded_data_size); - } - - fss->num_chunks[fl_index] += batch_size; - fss->n_fl_chunk_bytes += batch_size * rounded_data_size; - fsh_free_bytes_sub (fsh, size); - - return 0; + return fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch_size); } /** @@ -1103,7 +1084,7 @@ fifo_segment_preallocate_fifo_pairs (fifo_segment_t * fs, /* Calculate space requirements */ pair_size = 2 * hdrs + rx_rounded_data_size + tx_rounded_data_size; - space_available = fsh_free_space (fsh); + space_available = fsh_n_free_bytes (fsh); pairs_to_alloc = space_available / pair_size; pairs_to_alloc = clib_min (pairs_to_alloc, *n_fifo_pairs); pairs_per_slice = pairs_to_alloc / fs->n_slices; @@ -1114,8 +1095,11 @@ fifo_segment_preallocate_fifo_pairs (fifo_segment_t * fs, for (i = 0; i < fs->n_slices; i++) { - fss = fsh_slice_get (fsh, i); alloc_now = clib_min (pairs_per_slice, *n_fifo_pairs); + if (0 == alloc_now) + break; + + fss = fsh_slice_get (fsh, i); if (fs_try_alloc_fifo_batch (fsh, fss, rx_fl_index, alloc_now)) clib_warning ("rx prealloc failed: pairs %u", alloc_now); if (fs_try_alloc_fifo_batch (fsh, fss, tx_fl_index, alloc_now)) @@ -1138,7 +1122,7 @@ fifo_segment_num_fifos (fifo_segment_t * fs) static u32 fs_slice_num_free_fifos (fifo_segment_slice_t * fss) { - svm_fifo_t *f; + svm_fifo_shared_t *f; u32 count = 0; f = fss->free_fifos; @@ -1170,7 +1154,8 @@ fifo_segment_num_free_fifos (fifo_segment_t * fs) } static u32 -fs_slice_num_free_chunks (fifo_segment_slice_t * fss, u32 size) +fs_slice_num_free_chunks (fifo_segment_header_t *fsh, + fifo_segment_slice_t *fss, u32 size) { u32 count = 0, rounded_size, fl_index; svm_fifo_chunk_t *c; @@ -1179,15 +1164,15 @@ fs_slice_num_free_chunks (fifo_segment_slice_t * fss, u32 size) /* Count all free chunks? */ if (size == ~0) { - for (i = 0; i < vec_len (fss->free_chunks); i++) + for (i = 0; i < FS_CHUNK_VEC_LEN; i++) { - c = fss->free_chunks[i]; + c = fs_chunk_ptr (fsh, fss->free_chunks[i]); if (c == 0) continue; while (c) { - c = c->next; + c = fs_chunk_ptr (fsh, c->next); count++; } } @@ -1197,16 +1182,16 @@ fs_slice_num_free_chunks (fifo_segment_slice_t * fss, u32 size) rounded_size = (1 << (max_log2 (size))); fl_index = fs_freelist_for_size (rounded_size); - if (fl_index >= vec_len (fss->free_chunks)) + if (fl_index >= FS_CHUNK_VEC_LEN) return 0; - c = fss->free_chunks[fl_index]; + c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]); if (c == 0) return 0; while (c) { - c = c->next; + c = fs_chunk_ptr (fsh, c->next); count++; } return count; @@ -1223,21 +1208,15 @@ fifo_segment_num_free_chunks (fifo_segment_t * fs, u32 size) for (slice_index = 0; slice_index < fs->n_slices; slice_index++) { fss = fsh_slice_get (fsh, slice_index); - count += fs_slice_num_free_chunks (fss, size); + count += fs_slice_num_free_chunks (fsh, fss, size); } return count; } -void -fifo_segment_update_free_bytes (fifo_segment_t * fs) -{ - fsh_update_free_bytes (fs->h); -} - uword fifo_segment_size (fifo_segment_t * fs) { - return fs->ssvm.ssvm_size; + return fs->h->max_byte_index - fs->h->n_reserved_bytes; } u8 @@ -1252,6 +1231,16 @@ fsh_reset_mem_limit (fifo_segment_header_t * fsh) fsh->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT; } +void * +fifo_segment_alloc (fifo_segment_t *fs, uword size) +{ + void *rv = fsh_alloc (fs->h, size); + /* Mark externally allocated bytes as reserved. This helps + * @ref fifo_segment_size report bytes used only for fifos */ + fs->h->n_reserved_bytes += size; + return rv; +} + uword fifo_segment_free_bytes (fifo_segment_t * fs) { @@ -1281,7 +1270,7 @@ fifo_segment_fl_chunk_bytes (fifo_segment_t * fs) for (slice_index = 0; slice_index < fs->n_slices; slice_index++) { fss = fsh_slice_get (fsh, slice_index); - n_bytes += fss->n_fl_chunk_bytes; + n_bytes += fss_fl_chunk_bytes (fss); } return n_bytes; @@ -1296,11 +1285,10 @@ fifo_segment_has_fifos (fifo_segment_t * fs) svm_fifo_t * fifo_segment_get_slice_fifo_list (fifo_segment_t * fs, u32 slice_index) { - fifo_segment_header_t *fsh = fs->h; - fifo_segment_slice_t *fss; + fifo_slice_private_t *pfss; - fss = fsh_slice_get (fsh, slice_index); - return fss->fifos; + pfss = fs_slice_private_get (fs, slice_index); + return pfss->active_fifos; } u8 @@ -1424,15 +1412,15 @@ format_fifo_segment (u8 * s, va_list * args) for (slice_index = 0; slice_index < fs->n_slices; slice_index++) { fss = fsh_slice_get (fsh, slice_index); - for (i = 0; i < vec_len (fss->free_chunks); i++) + for (i = 0; i < FS_CHUNK_VEC_LEN; i++) { - c = fss->free_chunks[i]; + c = fs_chunk_ptr (fsh, fss->free_chunks[i]); if (c == 0 && fss->num_chunks[i] == 0) continue; count = 0; while (c) { - c = c->next; + c = fs_chunk_ptr (fsh, c->next); count++; } @@ -1447,7 +1435,6 @@ format_fifo_segment (u8 * s, va_list * args) fifo_hdr = free_fifos * sizeof (svm_fifo_t); est_chunk_bytes = fifo_segment_fl_chunk_bytes (fs); est_free_seg_bytes = fifo_segment_free_bytes (fs); - fifo_segment_update_free_bytes (fs); free_seg_bytes = fifo_segment_free_bytes (fs); tracked_cached_bytes = fifo_segment_cached_bytes (fs); allocated = fifo_segment_size (fs);