* @return number of free bytes
*/
static uword
-fs_free_space (fifo_segment_t * fs)
+fsh_free_space (fifo_segment_header_t * fsh)
{
struct dlmallinfo dlminfo;
- dlminfo = mspace_mallinfo (fs->ssvm.sh->heap);
+ dlminfo = mspace_mallinfo (fsh->ssvm_sh->heap);
return dlminfo.fordblks;
}
}
static inline void
-fsh_update_free_btes (fifo_segment_header_t * fsh)
+fsh_update_free_bytes (fifo_segment_header_t * fsh)
{
- clib_atomic_store_rel_n (&fsh->n_free_bytes,
- fs_free_space (fsh->ssvm_sh->heap));
+ clib_atomic_store_rel_n (&fsh->n_free_bytes, fsh_free_space (fsh));
}
static void
return;
fsh->flags |= FIFO_SEGMENT_F_MEM_LIMIT;
- fsh_update_free_btes (fsh);
+ fsh_update_free_bytes (fsh);
}
static inline fifo_segment_slice_t *
fsh->ssvm_sh = fs->ssvm.sh;
fsh->n_slices = fs->n_slices;
- max_fifo = clib_min ((fs_free_space (fs) - 4096) / 2,
+ max_fifo = clib_min ((fsh_free_space (fsh) - 4096) / 2,
FIFO_SEGMENT_MAX_FIFO_SIZE);
fsh->max_log2_chunk_size = max_log2 (max_fifo);
ssvm_pop_heap (oldheap);
- fsh->n_free_bytes = fs_free_space (fs);
+ fsh->n_free_bytes = fsh_free_space (fsh);
max_chunks = fsh->n_free_bytes / FIFO_SEGMENT_MIN_FIFO_SIZE;
fsh->n_reserved_bytes = (max_chunks / 4) * sizeof (rb_node_t);
sh->ready = 1;
fss->free_fifos = f->next;
}
- fl_index = fs_freelist_for_size (data_bytes) - 1;
+ fl_index = fs_freelist_for_size (data_bytes);
+ if (fl_index > 0)
+ fl_index -= 1;
+
fl_size = fs_freelist_index_to_size (fl_index);
while (data_bytes)
}
else
{
- ASSERT (fl_index > 0);
+ /* Failed to allocate with smaller chunks */
+ if (fl_index == 0)
+ {
+ /* free all chunks if any allocated */
+ c = first;
+ while (c)
+ {
+ fl_index = fs_freelist_for_size (c->length);
+ fl_size = fs_freelist_index_to_size (fl_index);
+ c->next = fss->free_chunks[fl_index];
+ fss->free_chunks[fl_index] = c;
+ fss->n_fl_chunk_bytes += fl_size;
+ data_bytes += fl_size;
+ }
+ first = last = 0;
+ fl_index = fs_freelist_for_size (data_bytes);
+ if (fss->free_chunks[fl_index + 1])
+ {
+ fl_index += 1;
+ fl_size = fs_freelist_index_to_size (fl_index);
+ continue;
+ }
+
+ f->next = fss->free_fifos;
+ fss->free_fifos = f;
+ return 0;
+ }
fl_index -= 1;
fl_size = fl_size >> 1;
}
}
+
f->start_chunk = first;
f->end_chunk = last;
last->next = first;
fifo_segment_slice_t * fss,
u32 fl_index, u32 batch_size)
{
- u32 size, hdrs, rounded_data_size;
+ u32 hdrs, rounded_data_size;
svm_fifo_chunk_t *c;
svm_fifo_t *f;
void *oldheap;
+ uword size;
u8 *fmem;
int i;
rounded_data_size = fs_freelist_index_to_size (fl_index);
hdrs = sizeof (*f) + sizeof (*c);
- size = (hdrs + rounded_data_size) * batch_size;
+ size = (uword) (hdrs + rounded_data_size) * batch_size;
oldheap = ssvm_push_heap (fsh->ssvm_sh);
fmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES,
c = (svm_fifo_chunk_t *) (fmem + sizeof (*f));
c->start_byte = 0;
c->length = rounded_data_size;
+ c->rb_index = RBTREE_TNIL_INDEX;
c->next = fss->free_chunks[fl_index];
fss->free_chunks[fl_index] = c;
fmem += hdrs + rounded_data_size;
/* Initialize chunks and rbtree for multi-chunk fifos */
if (f->start_chunk->next != f->start_chunk)
- {
- void *oldheap = ssvm_push_heap (fsh->ssvm_sh);
- svm_fifo_init_chunks (f);
- ssvm_pop_heap (oldheap);
- }
+ svm_fifo_init_chunks (f);
/* If rx fifo type add to active fifos list. When cleaning up segment,
* we need a list of active sessions that should be disconnected. Since
fifo_segment_header_t *fsh = fs->h;
svm_fifo_chunk_t *cur, *next;
fifo_segment_slice_t *fss;
- void *oldheap;
int fl_index;
ASSERT (f->refcnt > 0);
fl_index = fs_freelist_for_size (cur->length);
ASSERT (fl_index < vec_len (fss->free_chunks));
cur->next = fss->free_chunks[fl_index];
+ cur->rb_index = RBTREE_TNIL_INDEX;
fss->free_chunks[fl_index] = cur;
fss->n_fl_chunk_bytes += fs_freelist_index_to_size (fl_index);
cur = next;
f->start_chunk = f->end_chunk = f->new_chunks = 0;
f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = 0;
- oldheap = ssvm_push_heap (fsh->ssvm_sh);
svm_fifo_free_chunk_lookup (f);
- ssvm_pop_heap (oldheap);
/* not allocated on segment heap */
svm_fifo_free_ooo_data (f);
fifo_segment_slice_t *fss;
svm_fifo_t *f;
void *oldheap;
- u32 size;
+ uword size;
u8 *fmem;
int i;
fss = fsh_slice_get (fsh, slice_index);
- size = (sizeof (*f)) * batch_size;
+ size = (uword) (sizeof (*f)) * batch_size;
oldheap = ssvm_push_heap (fsh->ssvm_sh);
fmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES,
fifo_segment_prealloc_fifo_chunks (fifo_segment_t * fs, u32 slice_index,
u32 chunk_size, u32 batch_size)
{
- u32 size, rounded_data_size, fl_index;
fifo_segment_header_t *fsh = fs->h;
+ u32 rounded_data_size, fl_index;
fifo_segment_slice_t *fss;
svm_fifo_chunk_t *c;
void *oldheap;
+ uword size;
u8 *cmem;
int i;
fl_index = fs_freelist_for_size (chunk_size);
rounded_data_size = fs_freelist_index_to_size (fl_index);
- size = (sizeof (*c) + rounded_data_size) * batch_size;
+ size = (uword) (sizeof (*c) + rounded_data_size) * batch_size;
oldheap = ssvm_push_heap (fsh->ssvm_sh);
cmem = clib_mem_alloc_aligned_at_offset (size, CLIB_CACHE_LINE_BYTES,
u32 * n_fifo_pairs)
{
u32 rx_rounded_data_size, tx_rounded_data_size, pair_size, pairs_to_alloc;
+ u32 hdrs, pairs_per_slice, alloc_now;
fifo_segment_header_t *fsh = fs->h;
int rx_fl_index, tx_fl_index, i;
fifo_segment_slice_t *fss;
- u32 hdrs, pairs_per_slice;
uword space_available;
/* Parameter check */
/* Calculate space requirements */
pair_size = 2 * hdrs + rx_rounded_data_size + tx_rounded_data_size;
- space_available = fs_free_space (fs);
+ space_available = fsh_free_space (fsh);
pairs_to_alloc = space_available / pair_size;
pairs_to_alloc = clib_min (pairs_to_alloc, *n_fifo_pairs);
pairs_per_slice = pairs_to_alloc / fs->n_slices;
+ pairs_per_slice += pairs_to_alloc % fs->n_slices ? 1 : 0;
if (!pairs_per_slice)
return;
for (i = 0; i < fs->n_slices; i++)
{
fss = fsh_slice_get (fsh, i);
- if (fs_try_alloc_fifo_batch (fsh, fss, rx_fl_index, pairs_to_alloc))
- clib_warning ("rx prealloc failed: pairs %u", pairs_to_alloc);
- if (fs_try_alloc_fifo_batch (fsh, fss, tx_fl_index, pairs_to_alloc))
- clib_warning ("tx prealloc failed: pairs %u", pairs_to_alloc);
+ alloc_now = clib_min (pairs_per_slice, *n_fifo_pairs);
+ if (fs_try_alloc_fifo_batch (fsh, fss, rx_fl_index, alloc_now))
+ clib_warning ("rx prealloc failed: pairs %u", alloc_now);
+ if (fs_try_alloc_fifo_batch (fsh, fss, tx_fl_index, alloc_now))
+ clib_warning ("tx prealloc failed: pairs %u", alloc_now);
+
+ /* Account for the pairs allocated */
+ *n_fifo_pairs -= alloc_now;
}
-
- /* Account for the pairs allocated */
- *n_fifo_pairs -= pairs_per_slice * fs->n_slices;
}
int
fl_index = fs_freelist_for_size (chunk_size);
fss = fsh_slice_get (fsh, f->slice_index);
- oldheap = ssvm_push_heap (fsh->ssvm_sh);
-
c = fss->free_chunks[fl_index];
if (!c)
{
fsh_check_mem (fsh);
if (fsh_n_free_bytes (fsh) < chunk_size)
- {
- ssvm_pop_heap (oldheap);
- return -1;
- }
+ return -1;
+ oldheap = ssvm_push_heap (fsh->ssvm_sh);
c = svm_fifo_chunk_alloc (chunk_size);
+ ssvm_pop_heap (oldheap);
+
if (!c)
- {
- ssvm_pop_heap (oldheap);
- return -1;
- }
+ return -1;
+
fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
}
else
svm_fifo_add_chunk (f, c);
- ssvm_pop_heap (oldheap);
return 0;
}
fifo_segment_header_t *fsh = fs->h;
svm_fifo_chunk_t *cur, *next;
fifo_segment_slice_t *fss;
- void *oldheap;
int fl_index;
- oldheap = ssvm_push_heap (fsh->ssvm_sh);
cur = svm_fifo_collect_chunks (f);
fss = fsh_slice_get (fsh, f->slice_index);
cur = next;
}
- ssvm_pop_heap (oldheap);
-
return 0;
}
void
fifo_segment_update_free_bytes (fifo_segment_t * fs)
{
- fifo_segment_header_t *fsh = fs->h;
- clib_atomic_store_rel_n (&fsh->n_free_bytes, fs_free_space (fs));
+ fsh_update_free_bytes (fs->h);
}
uword
int i;
indent = format_get_indent (s) + 2;
-#if USE_DLMALLOC == 0
- s = format (s, "%U segment heap: %U\n", format_white_space, indent,
- format_mheap, fsh->ssvm_sh->heap, verbose);
- s = format (s, "%U segment has %u active fifos\n",
- format_white_space, indent, fifo_segment_num_fifos (fsh));
-#endif
if (fs == 0)
{