static void
fsh_slice_collect_chunks (fifo_segment_header_t * fsh,
- fifo_segment_slice_t * fss, svm_fifo_chunk_t * cur)
+ fifo_segment_slice_t * fss, svm_fifo_chunk_t * c)
{
svm_fifo_chunk_t *next;
int fl_index;
clib_spinlock_lock (&fss->chunk_lock);
- while (cur)
+ while (c)
{
- next = cur->next;
- fl_index = fs_freelist_for_size (cur->length);
- cur->next = fss->free_chunks[fl_index];
- cur->enq_rb_index = RBTREE_TNIL_INDEX;
- cur->deq_rb_index = RBTREE_TNIL_INDEX;
- fss->free_chunks[fl_index] = cur;
+ next = c->next;
+ fl_index = fs_freelist_for_size (c->length);
+ c->next = fss->free_chunks[fl_index];
+ c->enq_rb_index = RBTREE_TNIL_INDEX;
+ c->deq_rb_index = RBTREE_TNIL_INDEX;
+ fss->free_chunks[fl_index] = c;
n_collect += fs_freelist_index_to_size (fl_index);
- cur = next;
+ c = next;
}
fss->n_fl_chunk_bytes += n_collect;
void
fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
- svm_fifo_chunk_t * cur)
+ svm_fifo_chunk_t * c)
{
fifo_segment_slice_t *fss;
fss = fsh_slice_get (fsh, slice_index);
- fsh_slice_collect_chunks (fsh, fss, cur);
+ fsh_slice_collect_chunks (fsh, fss, c);
}
/**
u32 tx_fifo_size,
u32 * n_fifo_pairs);
+/**
+ * Allocate chunks in fifo segment
+ *
+ * @param fsh fifo segment header
+ * @param slice_index slice where chunks should be alocated
+ * @param chunk_size chunk size needed
+ * @return chunk (or chunks) that cover at least chunk_size bytes
+ * on success, 0 on failure.
+ */
svm_fifo_chunk_t *fsh_alloc_chunk (fifo_segment_header_t * fsh,
u32 slice_index, u32 chunk_size);
+/**
+ * Return chunks to fifo segment
+ *
+ * @param fsh fifo segment header
+ * @param slice_index slice where chunks should be returned
+ * @param c pointer to first chunk in 0 terminated linked list
+ */
void fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
- svm_fifo_chunk_t * cur);
+ svm_fifo_chunk_t * c);
/**
* Fifo segment has reached mem limit
return c;
}
-void
-svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c)
-{
- svm_fifo_chunk_t *cur, *prev;
-
- cur = c;
- prev = f->end_chunk;
-
- while (cur)
- {
- cur->start_byte = prev->start_byte + prev->length;
- cur->enq_rb_index = RBTREE_TNIL_INDEX;
- cur->deq_rb_index = RBTREE_TNIL_INDEX;
-
- prev = cur;
- cur = cur->next;
- }
-
- prev->next = 0;
- f->end_chunk->next = c;
- f->end_chunk = prev;
-
- if (!f->tail_chunk)
- f->tail_chunk = c;
-
- return;
-}
-
void
svm_fifo_free_chunk_lookup (svm_fifo_t * f)
{
}
static int
-f_try_grow (svm_fifo_t * f, u32 head, u32 tail, u32 len)
+f_try_chunk_alloc (svm_fifo_t * f, u32 head, u32 tail, u32 len)
{
- svm_fifo_chunk_t *c;
+ svm_fifo_chunk_t *c, *cur, *prev;
u32 alloc_size, free_alloced;
free_alloced = f_chunk_end (f->end_chunk) - tail;
if (PREDICT_FALSE (!c))
return -1;
- svm_fifo_add_chunk (f, c);
+ cur = c;
+ prev = f->end_chunk;
+
+ while (cur)
+ {
+ cur->start_byte = prev->start_byte + prev->length;
+ cur->enq_rb_index = RBTREE_TNIL_INDEX;
+ cur->deq_rb_index = RBTREE_TNIL_INDEX;
+
+ prev = cur;
+ cur = cur->next;
+ }
+
+ prev->next = 0;
+ f->end_chunk->next = c;
+ f->end_chunk = prev;
+
+ if (!f->tail_chunk)
+ f->tail_chunk = c;
+
return 0;
}
if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
{
- if (PREDICT_FALSE (f_try_grow (f, head, tail, len)))
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
{
len = f_chunk_end (f->end_chunk) - tail;
if (!len)
if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
{
- if (PREDICT_FALSE (f_try_grow (f, head, tail, offset + len)))
+ if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
return SVM_FIFO_EGROW;
}
/**
* Drop all data from fifo
*
- * Should be called only from vpp side because of lookup cleanup
*/
void
svm_fifo_dequeue_drop_all (svm_fifo_t * f)
if (f_chunk_end (f->end_chunk) - head >= f->size)
return 0;
- if (f_try_grow (f, head, tail, f->size - (tail - head)))
+ if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
return SVM_FIFO_EGROW;
return 0;
*/
svm_fifo_chunk_t *svm_fifo_chunk_alloc (u32 size);
/**
- * Grow fifo size by adding chunk to chunk list
+ * Ensure the whole fifo size is writeable
*
- * If fifos are allocated on a segment, this should be called with
- * the segment's heap pushed.
+ * Allocates enough chunks to cover the whole fifo size.
*
- * @param f fifo to be extended
- * @param c chunk or linked list of chunks to be added
+ * @param f fifo
*/
-void svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c);
int svm_fifo_fill_chunk_list (svm_fifo_t * f);
+/**
+ * Initialize rbtrees used for ooo lookups
+ *
+ * @param f fifo
+ * @param ooo_type type of ooo operation (0 enqueue, 1 dequeue)
+ */
void svm_fifo_init_ooo_lookup (svm_fifo_t * f, u8 ooo_type);
/**
* Free fifo and associated state
* @return 1 if sane, 0 otherwise
*/
u8 svm_fifo_is_sane (svm_fifo_t * f);
+/**
+ * Number of chunks linked into the fifo
+ *
+ * @param f fifo
+ * @return number of chunks in fifo linked list
+ */
u32 svm_fifo_n_chunks (svm_fifo_t * f);
format_function_t format_svm_fifo;
*/
u32 svm_fifo_max_write_chunk (svm_fifo_t * f);
+/**
+ * Fifo head chunk getter
+ *
+ * @param f fifo
+ * @return head chunk pointer
+ */
static inline svm_fifo_chunk_t *
svm_fifo_head_chunk (svm_fifo_t * f)
{
return f->head_chunk;
}
+/**
+ * Fifo head pointer getter
+ *
+ * @param f fifo
+ * @return head pointer
+ */
static inline u8 *
svm_fifo_head (svm_fifo_t * f)
{
return (f->head_chunk->data + (f->head - f->head_chunk->start_byte));
}
+/**
+ * Fifo tail chunk getter
+ *
+ * @param f fifo
+ * @return tail chunk pointer
+ */
static inline svm_fifo_chunk_t *
svm_fifo_tail_chunk (svm_fifo_t * f)
{
return f->tail_chunk;
}
+/**
+ * Fifo tail pointer getter
+ *
+ * @param f fifo
+ * @return tail pointer
+ */
static inline u8 *
svm_fifo_tail (svm_fifo_t * f)
{
return (f->tail_chunk->data + (f->tail - f->tail_chunk->start_byte));
}
+/**
+ * Fifo number of subscribers getter
+ *
+ * @param f fifo
+ * @return number of subscribers
+ */
static inline u8
svm_fifo_n_subscribers (svm_fifo_t * f)
{