return &fsh->slices[slice_index];
}
+static inline fifo_slice_private_t *
+fs_slice_private_get (fifo_segment_t *fs, u32 slice_index)
+{
+ ASSERT (slice_index < fs->n_slices);
+ return &fs->slices[slice_index];
+}
+
static char *fifo_segment_mem_status_strings[] = {
#define _(sym,str) str,
foreach_segment_mem_status
}
static void
-fss_chunk_free_list_push (fifo_segment_slice_t * fss, u32 fl_index,
- svm_fifo_chunk_t * c)
+fss_chunk_free_list_push (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *c)
{
fss_chunk_freelist_lock (fss);
c->next = fss->free_chunks[fl_index];
- fss->free_chunks[fl_index] = c;
+ fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, c);
fss_chunk_freelist_unlock (fss);
}
static void
-fss_chunk_free_list_push_list (fifo_segment_slice_t * fss, u32 fl_index,
- svm_fifo_chunk_t * head,
- svm_fifo_chunk_t * tail)
+fss_chunk_free_list_push_list (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 fl_index,
+ svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail)
{
fss_chunk_freelist_lock (fss);
tail->next = fss->free_chunks[fl_index];
- fss->free_chunks[fl_index] = head;
+ fss->free_chunks[fl_index] = fs_chunk_sptr (fsh, head);
fss_chunk_freelist_unlock (fss);
}
static svm_fifo_chunk_t *
-fss_chunk_free_list_pop (fifo_segment_slice_t * fss, u32 fl_index)
+fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
+ u32 fl_index)
{
svm_fifo_chunk_t *c;
return 0;
}
- c = fss->free_chunks[fl_index];
+ c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]);
fss->free_chunks[fl_index] = c->next;
fss_chunk_freelist_unlock (fss);
}
static inline void
-fss_fifo_add_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+pfss_fifo_add_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
{
- if (fss->fifos)
+ if (pfss->active_fifos)
{
- fss->fifos->prev = f;
- f->next = fss->fifos;
+ pfss->active_fifos->prev = f;
+ f->next = pfss->active_fifos;
}
- fss->fifos = f;
+ pfss->active_fifos = f;
}
static inline void
-fss_fifo_del_active_list (fifo_segment_slice_t * fss, svm_fifo_t * f)
+pfss_fifo_del_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
{
if (f->flags & SVM_FIFO_F_LL_TRACKED)
{
if (f->prev)
f->prev->next = f->next;
else
- fss->fifos = f->next;
+ pfss->active_fifos = f->next;
if (f->next)
f->next->prev = f->prev;
}
while (req_bytes)
{
- c = fss_chunk_free_list_pop (fss, fl_index);
+ c = fss_chunk_free_list_pop (fsh, fss, fl_index);
if (c)
{
- c->next = first;
+ c->next = fs_chunk_sptr (fsh, first);
first = c;
n_alloc += fl_size;
req_bytes -= clib_min (fl_size, req_bytes);
while (c)
{
fl_index = fs_freelist_for_size (c->length);
- next = c->next;
- fss_chunk_free_list_push (fss, fl_index, c);
+ next = fs_chunk_ptr (fsh, c->next);
+ fss_chunk_free_list_push (fsh, fss, fl_index, c);
c = next;
}
n_alloc = 0;
fl_index = fs_freelist_for_size (data_bytes) + 1;
if (!fss_chunk_fl_index_is_valid (fss, fl_index))
return 0;
- first = fss_chunk_free_list_pop (fss, fl_index);
+ first = fss_chunk_free_list_pop (fsh, fss, fl_index);
if (first)
{
first->next = 0;
{
c->start_byte = 0;
c->length = rounded_data_size;
- c->next = head;
+ c->next = fs_chunk_sptr (fsh, head);
head = c;
cmem += sizeof (*c) + rounded_data_size;
c = (svm_fifo_chunk_t *) cmem;
}
- fss_chunk_free_list_push_list (fss, fl_index, head, tail);
+ fss_chunk_free_list_push_list (fsh, fss, fl_index, head, tail);
fss->num_chunks[fl_index] += batch_size;
fss_fl_chunk_bytes_add (fss, total_chunk_bytes);
fsh_cached_bytes_add (fsh, total_chunk_bytes);
fl_index = fs_freelist_for_size (data_bytes);
free_list:
- c = fss_chunk_free_list_pop (fss, fl_index);
+ c = fss_chunk_free_list_pop (fsh, fss, fl_index);
if (c)
{
c->next = 0;
return 0;
}
- sf->start_chunk = c;
+ sf->start_chunk = fs_chunk_sptr (fsh, c);
while (c->next)
- c = c->next;
- sf->end_chunk = c;
+ c = fs_chunk_ptr (fsh, c->next);
+ sf->end_chunk = fs_chunk_sptr (fsh, c);
sf->size = data_bytes;
sf->slice_index = slice_index;
while (c)
{
CLIB_MEM_UNPOISON (c, sizeof (*c));
- next = c->next;
+ next = fs_chunk_ptr (fsh, c->next);
fl_index = fs_freelist_for_size (c->length);
- fss_chunk_free_list_push (fss, fl_index, c);
+ fss_chunk_free_list_push (fsh, fss, fl_index, c);
n_collect += fs_freelist_index_to_size (fl_index);
c = next;
}
fifo_segment_cleanup (fifo_segment_t *fs)
{
int slice_index;
+ svm_msg_q_t *mq = 0;
for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
clib_mem_bulk_destroy (fs->slices[slice_index].fifos);
+
+ vec_foreach (fs->mqs, mq)
+ vec_free (mq->rings);
+
+ vec_free (fs->mqs);
}
/**
u32 data_bytes, fifo_segment_ftype_t ftype)
{
fifo_segment_header_t *fsh = fs->h;
+ fifo_slice_private_t *pfss;
fifo_segment_slice_t *fss;
svm_fifo_shared_t *sf;
svm_fifo_t *f = 0;
svm_fifo_init (f, data_bytes);
fss = fsh_slice_get (fsh, slice_index);
+ pfss = fs_slice_private_get (fs, slice_index);
/* If rx fifo type add to active fifos list. When cleaning up segment,
* we need a list of active sessions that should be disconnected. Since
* only one. */
if (ftype == FIFO_SEGMENT_RX_FIFO)
{
- fss_fifo_add_active_list (fss, f);
+ pfss_fifo_add_active_list (pfss, f);
f->flags |= SVM_FIFO_F_LL_TRACKED;
}
fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f)
{
fifo_segment_header_t *fsh = fs->h;
+ fifo_slice_private_t *pfss;
fifo_segment_slice_t *fss;
svm_fifo_shared_t *sf;
sf = f->shr;
fss = fsh_slice_get (fsh, sf->slice_index);
+ pfss = fs_slice_private_get (fs, sf->slice_index);
/* Free fifo chunks */
- fsh_slice_collect_chunks (fsh, fss, sf->start_chunk);
+ fsh_slice_collect_chunks (fsh, fss, fs_chunk_ptr (fsh, f->shr->start_chunk));
sf->start_chunk = sf->end_chunk = 0;
sf->head_chunk = sf->tail_chunk = 0;
/* Remove from active list. Only rx fifos are tracked */
if (f->flags & SVM_FIFO_F_LL_TRACKED)
{
- fss_fifo_del_active_list (fss, f);
+ pfss_fifo_del_active_list (pfss, f);
f->flags &= ~SVM_FIFO_F_LL_TRACKED;
}
void
fifo_segment_detach_fifo (fifo_segment_t * fs, svm_fifo_t * f)
{
+ fifo_slice_private_t *pfss;
fifo_segment_slice_t *fss;
svm_fifo_chunk_t *c;
u32 fl_index;
ASSERT (f->refcnt == 1);
fss = fsh_slice_get (fs->h, f->shr->slice_index);
+ pfss = fs_slice_private_get (fs, f->shr->slice_index);
fss->virtual_mem -= svm_fifo_size (f);
if (f->flags & SVM_FIFO_F_LL_TRACKED)
- fss_fifo_del_active_list (fss, f);
+ pfss_fifo_del_active_list (pfss, f);
- c = f->shr->start_chunk;
+ c = fs_chunk_ptr (fs->h, f->shr->start_chunk);
while (c)
{
fl_index = fs_freelist_for_size (c->length);
clib_atomic_fetch_sub_rel (&fss->num_chunks[fl_index], 1);
- c = c->next;
+ c = fs_chunk_ptr (fs->h, c->next);
}
}
fifo_segment_attach_fifo (fifo_segment_t * fs, svm_fifo_t * f,
u32 slice_index)
{
+ fifo_slice_private_t *pfss;
fifo_segment_slice_t *fss;
svm_fifo_chunk_t *c;
u32 fl_index;
f->shr->slice_index = slice_index;
fss = fsh_slice_get (fs->h, f->shr->slice_index);
+ pfss = fs_slice_private_get (fs, f->shr->slice_index);
fss->virtual_mem += svm_fifo_size (f);
if (f->flags & SVM_FIFO_F_LL_TRACKED)
- fss_fifo_add_active_list (fss, f);
+ pfss_fifo_add_active_list (pfss, f);
- c = f->shr->start_chunk;
+ c = fs_chunk_ptr (fs->h, f->shr->start_chunk);
while (c)
{
fl_index = fs_freelist_for_size (c->length);
clib_atomic_fetch_add_rel (&fss->num_chunks[fl_index], 1);
- c = c->next;
+ c = fs_chunk_ptr (fs->h, c->next);
+ }
+}
+
+svm_msg_q_t *
+fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index,
+ svm_msg_q_cfg_t *cfg)
+{
+ fifo_segment_header_t *fsh = fs->h;
+ svm_msg_q_shared_t *smq;
+ svm_msg_q_t *mq;
+ void *base;
+ u32 size;
+
+ if (!fs->mqs)
+ {
+ u32 n_mqs = clib_max (fs->h->n_mqs, 1);
+ vec_validate (fs->mqs, n_mqs - 1);
+ }
+
+ size = svm_msg_q_size_to_alloc (cfg);
+ base = fsh_alloc_aligned (fsh, size, 8);
+ fsh->n_reserved_bytes += size;
+
+ smq = svm_msg_q_init (base, cfg);
+ mq = vec_elt_at_index (fs->mqs, mq_index);
+ svm_msg_q_attach (mq, smq);
+
+ return mq;
+}
+
+svm_msg_q_t *
+fifo_segment_msg_q_attach (fifo_segment_t *fs, uword offset, u32 mq_index)
+{
+ svm_msg_q_t *mq;
+
+ if (!fs->mqs)
+ {
+ u32 n_mqs = clib_max (fs->h->n_mqs, 1);
+ vec_validate (fs->mqs, n_mqs - 1);
}
+
+ mq = vec_elt_at_index (fs->mqs, mq_index);
+
+ if (!mq->q)
+ {
+ svm_msg_q_shared_t *smq;
+ smq = (svm_msg_q_shared_t *) ((u8 *) fs->h + offset);
+ svm_msg_q_attach (mq, smq);
+ }
+
+ ASSERT (fifo_segment_msg_q_offset (fs, mq_index) == offset);
+
+ return mq;
+}
+
+uword
+fifo_segment_msg_q_offset (fifo_segment_t *fs, u32 mq_index)
+{
+ svm_msg_q_t *mq = vec_elt_at_index (fs->mqs, mq_index);
+
+ if (mq->q == 0)
+ return ~0ULL;
+
+ return (uword) ((u8 *) mq->q - (u8 *) fs->h) - sizeof (svm_msg_q_shared_t);
}
int
}
static u32
-fs_slice_num_free_chunks (fifo_segment_slice_t * fss, u32 size)
+fs_slice_num_free_chunks (fifo_segment_header_t *fsh,
+ fifo_segment_slice_t *fss, u32 size)
{
u32 count = 0, rounded_size, fl_index;
svm_fifo_chunk_t *c;
{
for (i = 0; i < FS_CHUNK_VEC_LEN; i++)
{
- c = fss->free_chunks[i];
+ c = fs_chunk_ptr (fsh, fss->free_chunks[i]);
if (c == 0)
continue;
while (c)
{
- c = c->next;
+ c = fs_chunk_ptr (fsh, c->next);
count++;
}
}
if (fl_index >= FS_CHUNK_VEC_LEN)
return 0;
- c = fss->free_chunks[fl_index];
+ c = fs_chunk_ptr (fsh, fss->free_chunks[fl_index]);
if (c == 0)
return 0;
while (c)
{
- c = c->next;
+ c = fs_chunk_ptr (fsh, c->next);
count++;
}
return count;
for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
{
fss = fsh_slice_get (fsh, slice_index);
- count += fs_slice_num_free_chunks (fss, size);
+ count += fs_slice_num_free_chunks (fsh, fss, size);
}
return count;
}
svm_fifo_t *
fifo_segment_get_slice_fifo_list (fifo_segment_t * fs, u32 slice_index)
{
- fifo_segment_header_t *fsh = fs->h;
- fifo_segment_slice_t *fss;
+ fifo_slice_private_t *pfss;
- fss = fsh_slice_get (fsh, slice_index);
- return fss->fifos;
+ pfss = fs_slice_private_get (fs, slice_index);
+ return pfss->active_fifos;
}
u8
fss = fsh_slice_get (fsh, slice_index);
for (i = 0; i < FS_CHUNK_VEC_LEN; i++)
{
- c = fss->free_chunks[i];
+ c = fs_chunk_ptr (fsh, fss->free_chunks[i]);
if (c == 0 && fss->num_chunks[i] == 0)
continue;
count = 0;
while (c)
{
- c = c->next;
+ c = fs_chunk_ptr (fsh, c->next);
count++;
}