static uword
fsh_free_space (fifo_segment_header_t * fsh)
{
- struct dlmallinfo dlminfo;
-
- dlminfo = mspace_mallinfo (fsh->ssvm_sh->heap);
- return dlminfo.fordblks;
+ return clib_mem_get_heap_free_space (fsh->ssvm_sh->heap);
}
static inline void
baseva = a->segment_type == SSVM_SEGMENT_PRIVATE ? ~0ULL : sm->next_baseva;
fs->ssvm.ssvm_size = a->segment_size;
- fs->ssvm.i_am_master = 1;
+ fs->ssvm.is_server = 1;
fs->ssvm.my_pid = getpid ();
fs->ssvm.name = format (0, "%s%c", a->segment_name, 0);
fs->ssvm.requested_va = baseva;
- if ((rv = ssvm_master_init (&fs->ssvm, a->segment_type)))
+ if ((rv = ssvm_server_init (&fs->ssvm, a->segment_type)))
{
pool_put (sm->segments, fs);
return (rv);
else
fs->ssvm.attach_timeout = sm->timeout_in_seconds;
- if ((rv = ssvm_slave_init (&fs->ssvm, a->segment_type)))
+ if ((rv = ssvm_client_init (&fs->ssvm, a->segment_type)))
{
_vec_len (fs) = vec_len (fs) - 1;
return (rv);
f = fss->free_fifos;
if (!f)
{
+ if (PREDICT_FALSE (fsh_n_free_bytes (fsh) < sizeof (svm_fifo_t)))
+ return 0;
+
void *oldheap = ssvm_push_heap (fsh->ssvm_sh);
- f = clib_mem_alloc_aligned (sizeof (*f), CLIB_CACHE_LINE_BYTES);
+ f = clib_mem_alloc_aligned_or_null (sizeof (*f), CLIB_CACHE_LINE_BYTES);
ssvm_pop_heap (oldheap);
if (!f)
return 0;
if (fmem == 0)
return -1;
- /* Carve fifo + chunk space */
+ /* Carve fifo hdr space */
for (i = 0; i < batch_size; i++)
{
f = (svm_fifo_t *) fmem;
memset (f, 0, sizeof (*f));
f->next = fss->free_fifos;
fss->free_fifos = f;
- c = (svm_fifo_chunk_t *) (fmem + sizeof (*f));
+ fmem += sizeof (*f);
+ }
+
+ /* Carve chunk space */
+ for (i = 0; i < batch_size; i++)
+ {
+ c = (svm_fifo_chunk_t *) fmem;
c->start_byte = 0;
c->length = rounded_data_size;
c->enq_rb_index = RBTREE_TNIL_INDEX;
c->deq_rb_index = RBTREE_TNIL_INDEX;
c->next = fss->free_chunks[fl_index];
fss->free_chunks[fl_index] = c;
- fmem += hdrs + rounded_data_size;
+ fmem += sizeof (svm_fifo_chunk_t) + rounded_data_size;
}
fss->num_chunks[fl_index] += batch_size;
min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096);
fl_index = fs_freelist_for_size (min_size);
+ if (fl_index >= vec_len (fss->free_chunks))
+ return 0;
+
clib_spinlock_lock (&fss->chunk_lock);
if (fss->free_fifos && fss->free_chunks[fl_index])
ssvm_pop_heap (oldheap);
if (f)
{
- fss->num_chunks[fl_index] += 1;
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
fsh_free_bytes_sub (fsh, fifo_sz);
goto done;
}
clib_spinlock_lock (&fss->chunk_lock);
+ ASSERT (vec_len (fss->free_chunks) > fl_index);
c = fss->free_chunks[fl_index];
if (c)
if (c)
{
- fss->num_chunks[fl_index] += 1;
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
goto done;
}
fsh_slice_collect_chunks (fsh, fss, c);
}
+static inline void
+fss_fifo_add_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (fss->fifos)
+ {
+ fss->fifos->prev = f;
+ f->next = fss->fifos;
+ }
+ fss->fifos = f;
+}
+
+static inline void
+fss_fifo_del_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ {
+ if (f->prev)
+ f->prev->next = f->next;
+ else
+ fss->fifos = f->next;
+ if (f->next)
+ f->next->prev = f->prev;
+ }
+}
+
/**
* Allocate fifo in fifo segment
*/
ASSERT (slice_index < fs->n_slices);
+ if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_chunk_size))
+ return 0;
+
fss = fsh_slice_get (fsh, slice_index);
f = fs_try_alloc_fifo (fsh, fss, data_bytes);
if (!f)
* only one. */
if (ftype == FIFO_SEGMENT_RX_FIFO)
{
- if (fss->fifos)
- {
- fss->fifos->prev = f;
- f->next = fss->fifos;
- }
- fss->fifos = f;
+ fss_fifo_add_active_list (fss, f);
f->flags |= SVM_FIFO_F_LL_TRACKED;
svm_fifo_init_ooo_lookup (f, 0 /* ooo enq */ );
/* Remove from active list. Only rx fifos are tracked */
if (f->flags & SVM_FIFO_F_LL_TRACKED)
{
- if (f->prev)
- f->prev->next = f->next;
- else
- fss->fifos = f->next;
- if (f->next)
- f->next->prev = f->prev;
+ fss_fifo_del_active_list (fss, f);
f->flags &= ~SVM_FIFO_F_LL_TRACKED;
}
fsh_active_fifos_update (fsh, -1);
}
+void
+fifo_segment_detach_fifo (fifo_segment_t * fs, svm_fifo_t * f)
+{
+ fifo_segment_slice_t *fss;
+ svm_fifo_chunk_t *c;
+ u32 fl_index;
+
+ ASSERT (f->refcnt == 1);
+
+ fss = fsh_slice_get (fs->h, f->slice_index);
+ fss->virtual_mem -= svm_fifo_size (f);
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ fss_fifo_del_active_list (fss, f);
+
+ c = f->start_chunk;
+ while (c)
+ {
+ fl_index = fs_freelist_for_size (c->length);
+ clib_atomic_fetch_sub_rel (&fss->num_chunks[fl_index], 1);
+ c = c->next;
+ }
+}
+
+void
+fifo_segment_attach_fifo (fifo_segment_t * fs, svm_fifo_t * f,
+ u32 slice_index)
+{
+ fifo_segment_slice_t *fss;
+ svm_fifo_chunk_t *c;
+ u32 fl_index;
+
+ f->slice_index = slice_index;
+ fss = fsh_slice_get (fs->h, f->slice_index);
+ fss->virtual_mem += svm_fifo_size (f);
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ fss_fifo_add_active_list (fss, f);
+
+ c = f->start_chunk;
+ while (c)
+ {
+ fl_index = fs_freelist_for_size (c->length);
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
+ c = c->next;
+ }
+}
+
int
fifo_segment_prealloc_fifo_hdrs (fifo_segment_t * fs, u32 slice_index,
u32 batch_size)
ssvm_segment_type_t st = ssvm_type (&sp->ssvm);
if (st == SSVM_SEGMENT_PRIVATE)
- s = format (s, "%s", "private-heap");
+ s = format (s, "%s", "private");
else if (st == SSVM_SEGMENT_MEMFD)
s = format (s, "%s", "memfd");
else if (st == SSVM_SEGMENT_SHM)
if (fs == 0)
{
- s = format (s, "%-15s%15s%15s%15s%15s%15s", "Name", "Type",
+ s = format (s, "%-20s%10s%15s%15s%15s%15s", "Name", "Type",
"HeapSize (M)", "ActiveFifos", "FreeFifos", "Address");
return s;
}
active_fifos = fifo_segment_num_fifos (fs);
free_fifos = fifo_segment_num_free_fifos (fs);
- s = format (s, "%-15v%15U%15llu%15u%15u%15llx", ssvm_name (&fs->ssvm),
+ s = format (s, "%-20v%10U%15llu%15u%15u%15llx", ssvm_name (&fs->ssvm),
format_fifo_segment_type, fs, size >> 20ULL, active_fifos,
free_fifos, address);