clib_atomic_fetch_add_rel (&fsh->n_active_fifos, inc);
}
+static inline u32
+fsh_n_active_fifos (fifo_segment_header_t * fsh)
+{
+ return clib_atomic_load_relax_n (&fsh->n_active_fifos);
+}
+
static inline uword
fsh_virtual_mem (fifo_segment_header_t * fsh)
{
f = fss->free_fifos;
if (!f)
{
+ if (PREDICT_FALSE (fsh_n_free_bytes (fsh) < sizeof (svm_fifo_t)))
+ return 0;
+
void *oldheap = ssvm_push_heap (fsh->ssvm_sh);
- f = clib_mem_alloc_aligned (sizeof (*f), CLIB_CACHE_LINE_BYTES);
+ f = clib_mem_alloc_aligned_or_null (sizeof (*f), CLIB_CACHE_LINE_BYTES);
ssvm_pop_heap (oldheap);
if (!f)
return 0;
if (fmem == 0)
return -1;
- /* Carve fifo + chunk space */
+ /* Carve fifo hdr space */
for (i = 0; i < batch_size; i++)
{
f = (svm_fifo_t *) fmem;
memset (f, 0, sizeof (*f));
f->next = fss->free_fifos;
fss->free_fifos = f;
- c = (svm_fifo_chunk_t *) (fmem + sizeof (*f));
+ fmem += sizeof (*f);
+ }
+
+ /* Carve chunk space */
+ for (i = 0; i < batch_size; i++)
+ {
+ c = (svm_fifo_chunk_t *) fmem;
c->start_byte = 0;
c->length = rounded_data_size;
c->enq_rb_index = RBTREE_TNIL_INDEX;
c->deq_rb_index = RBTREE_TNIL_INDEX;
c->next = fss->free_chunks[fl_index];
fss->free_chunks[fl_index] = c;
- fmem += hdrs + rounded_data_size;
+ fmem += sizeof (svm_fifo_chunk_t) + rounded_data_size;
}
fss->num_chunks[fl_index] += batch_size;
min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096);
fl_index = fs_freelist_for_size (min_size);
+ if (fl_index >= vec_len (fss->free_chunks))
+ return 0;
+
clib_spinlock_lock (&fss->chunk_lock);
if (fss->free_fifos && fss->free_chunks[fl_index])
ssvm_pop_heap (oldheap);
if (f)
{
- fss->num_chunks[fl_index] += 1;
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
fsh_free_bytes_sub (fsh, fifo_sz);
goto done;
}
clib_spinlock_lock (&fss->chunk_lock);
+ ASSERT (vec_len (fss->free_chunks) > fl_index);
c = fss->free_chunks[fl_index];
if (c)
if (c)
{
- fss->num_chunks[fl_index] += 1;
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
goto done;
}
fsh_slice_collect_chunks (fsh, fss, c);
}
+static inline void
+fss_fifo_add_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (fss->fifos)
+ {
+ fss->fifos->prev = f;
+ f->next = fss->fifos;
+ }
+ fss->fifos = f;
+}
+
+static inline void
+fss_fifo_del_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+{
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ {
+ if (f->prev)
+ f->prev->next = f->next;
+ else
+ fss->fifos = f->next;
+ if (f->next)
+ f->next->prev = f->prev;
+ }
+}
+
/**
* Allocate fifo in fifo segment
*/
ASSERT (slice_index < fs->n_slices);
+ if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_chunk_size))
+ return 0;
+
fss = fsh_slice_get (fsh, slice_index);
f = fs_try_alloc_fifo (fsh, fss, data_bytes);
if (!f)
* only one. */
if (ftype == FIFO_SEGMENT_RX_FIFO)
{
- if (fss->fifos)
- {
- fss->fifos->prev = f;
- f->next = fss->fifos;
- }
- fss->fifos = f;
+ fss_fifo_add_active_list (fss, f);
f->flags |= SVM_FIFO_F_LL_TRACKED;
svm_fifo_init_ooo_lookup (f, 0 /* ooo enq */ );
/* Remove from active list. Only rx fifos are tracked */
if (f->flags & SVM_FIFO_F_LL_TRACKED)
{
- if (f->prev)
- f->prev->next = f->next;
- else
- fss->fifos = f->next;
- if (f->next)
- f->next->prev = f->prev;
+ fss_fifo_del_active_list (fss, f);
f->flags &= ~SVM_FIFO_F_LL_TRACKED;
}
fsh_active_fifos_update (fsh, -1);
}
+void
+fifo_segment_detach_fifo (fifo_segment_t * fs, svm_fifo_t * f)
+{
+ fifo_segment_slice_t *fss;
+ svm_fifo_chunk_t *c;
+ u32 fl_index;
+
+ ASSERT (f->refcnt == 1);
+
+ fss = fsh_slice_get (fs->h, f->slice_index);
+ fss->virtual_mem -= svm_fifo_size (f);
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ fss_fifo_del_active_list (fss, f);
+
+ c = f->start_chunk;
+ while (c)
+ {
+ fl_index = fs_freelist_for_size (c->length);
+ clib_atomic_fetch_sub_rel (&fss->num_chunks[fl_index], 1);
+ c = c->next;
+ }
+}
+
+void
+fifo_segment_attach_fifo (fifo_segment_t * fs, svm_fifo_t * f,
+ u32 slice_index)
+{
+ fifo_segment_slice_t *fss;
+ svm_fifo_chunk_t *c;
+ u32 fl_index;
+
+ f->slice_index = slice_index;
+ fss = fsh_slice_get (fs->h, f->slice_index);
+ fss->virtual_mem += svm_fifo_size (f);
+ if (f->flags & SVM_FIFO_F_LL_TRACKED)
+ fss_fifo_add_active_list (fss, f);
+
+ c = f->start_chunk;
+ while (c)
+ {
+ fl_index = fs_freelist_for_size (c->length);
+ clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
+ c = c->next;
+ }
+}
+
int
fifo_segment_prealloc_fifo_hdrs (fifo_segment_t * fs, u32 slice_index,
u32 batch_size)
u32
fifo_segment_num_fifos (fifo_segment_t * fs)
{
- return clib_atomic_load_relax_n (&fs->h->n_active_fifos);
+ return fsh_n_active_fifos (fs->h);
}
static u32
u8
fifo_segment_has_fifos (fifo_segment_t * fs)
{
- fifo_segment_header_t *fsh = fs->h;
- fifo_segment_slice_t *fss;
- int slice_index;
-
- for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
- {
- fss = fsh_slice_get (fsh, slice_index);
- if (fss->fifos)
- return 1;
- }
- return 0;
+ return (fsh_n_active_fifos (fs->h) != 0);
}
svm_fifo_t *