STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
-#define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32))
-
-/* Make sure we have at least given number of unaligned buffers. */
-static void
-fill_unaligned (vlib_main_t * vm,
- vlib_buffer_free_list_t * free_list,
- uword n_unaligned_buffers)
+static_always_inline void
+dpdk_rte_pktmbuf_free (vlib_main_t * vm, vlib_buffer_t * b)
{
- word la = vec_len (free_list->aligned_buffers);
- word lu = vec_len (free_list->unaligned_buffers);
-
- /* Aligned come in aligned copy-sized chunks. */
- ASSERT (la % BUFFERS_PER_COPY == 0);
+ vlib_buffer_t *hb = b;
+ struct rte_mbuf *mb;
+ u32 next, flags;
+ mb = rte_mbuf_from_vlib_buffer (hb);
- ASSERT (la >= n_unaligned_buffers);
+next:
+ flags = b->flags;
+ next = b->next_buffer;
+ mb = rte_mbuf_from_vlib_buffer (b);
- while (lu < n_unaligned_buffers)
+ if (PREDICT_FALSE (b->n_add_refs))
{
- /* Copy 4 buffers from end of aligned vector to unaligned vector. */
- vec_add (free_list->unaligned_buffers,
- free_list->aligned_buffers + la - BUFFERS_PER_COPY,
- BUFFERS_PER_COPY);
- la -= BUFFERS_PER_COPY;
- lu += BUFFERS_PER_COPY;
+ rte_mbuf_refcnt_update (mb, b->n_add_refs);
+ b->n_add_refs = 0;
}
- _vec_len (free_list->aligned_buffers) = la;
-}
-
-/* After free aligned buffers may not contain even sized chunks. */
-static void
-trim_aligned (vlib_buffer_free_list_t * f)
-{
- uword l, n_trim;
-
- /* Add unaligned to aligned before trim. */
- l = vec_len (f->unaligned_buffers);
- if (l > 0)
- {
- vec_add_aligned (f->aligned_buffers, f->unaligned_buffers, l,
- /* align */ sizeof (vlib_copy_unit_t));
- _vec_len (f->unaligned_buffers) = 0;
- }
+ rte_pktmbuf_free_seg (mb);
- /* Remove unaligned buffers from end of aligned vector and save for next trim. */
- l = vec_len (f->aligned_buffers);
- n_trim = l % BUFFERS_PER_COPY;
- if (n_trim)
+ if (flags & VLIB_BUFFER_NEXT_PRESENT)
{
- /* Trim aligned -> unaligned. */
- vec_add (f->unaligned_buffers, f->aligned_buffers + l - n_trim, n_trim);
-
- /* Remove from aligned. */
- _vec_len (f->aligned_buffers) = l - n_trim;
+ b = vlib_get_buffer (vm, next);
+ goto next;
}
}
-static void
-merge_free_lists (vlib_buffer_free_list_t * dst,
- vlib_buffer_free_list_t * src)
-{
- uword l;
- u32 *d;
-
- trim_aligned (src);
- trim_aligned (dst);
-
- l = vec_len (src->aligned_buffers);
- if (l > 0)
- {
- vec_add2_aligned (dst->aligned_buffers, d, l,
- /* align */ sizeof (vlib_copy_unit_t));
- clib_memcpy (d, src->aligned_buffers, l * sizeof (d[0]));
- vec_free (src->aligned_buffers);
- }
-
- l = vec_len (src->unaligned_buffers);
- if (l > 0)
- {
- vec_add (dst->unaligned_buffers, src->unaligned_buffers, l);
- vec_free (src->unaligned_buffers);
- }
-}
-
-always_inline u32
-dpdk_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
-{
- vlib_buffer_main_t *bm = vm->buffer_main;
-
- size = vlib_buffer_round_size (size);
- uword *p = hash_get (bm->free_list_by_size, size);
- return p ? p[0] : ~0;
-}
-
static void
del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
{
u32 i;
- struct rte_mbuf *mb;
vlib_buffer_t *b;
- for (i = 0; i < vec_len (f->unaligned_buffers); i++)
+ for (i = 0; i < vec_len (f->buffers); i++)
{
- b = vlib_get_buffer (vm, f->unaligned_buffers[i]);
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
- }
- for (i = 0; i < vec_len (f->aligned_buffers); i++)
- {
- b = vlib_get_buffer (vm, f->aligned_buffers[i]);
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
+ b = vlib_get_buffer (vm, f->buffers[i]);
+ dpdk_rte_pktmbuf_free (vm, b);
}
+
vec_free (f->name);
- vec_free (f->unaligned_buffers);
- vec_free (f->aligned_buffers);
+ vec_free (f->buffers);
}
/* Add buffer free list. */
f = vlib_buffer_get_free_list (vm, free_list_index);
- merge_index = dpdk_buffer_get_free_list_with_size (vm, f->n_data_bytes);
+ merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes);
if (merge_index != ~0 && merge_index != free_list_index)
{
- merge_free_lists (pool_elt_at_index (bm->buffer_free_list_pool,
- merge_index), f);
+ vlib_buffer_merge_free_lists (pool_elt_at_index
+ (bm->buffer_free_list_pool, merge_index),
+ f);
}
del_free_list (vm, f);
vlib_buffer_free_list_t * fl, uword min_free_buffers)
{
dpdk_main_t *dm = &dpdk_main;
- vlib_buffer_t *b;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
int n, i;
- u32 bi;
- u32 n_remaining = 0, n_alloc = 0;
+ u32 bi0, bi1, bi2, bi3;
unsigned socket_id = rte_socket_id ();
struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
- struct rte_mbuf *mb;
+ struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
/* Too early? */
if (PREDICT_FALSE (rmp == 0))
return 0;
- trim_aligned (fl);
-
/* Already have enough free buffers on free list? */
- n = min_free_buffers - vec_len (fl->aligned_buffers);
+ n = min_free_buffers - vec_len (fl->buffers);
if (n <= 0)
return min_free_buffers;
/* Always allocate round number of buffers. */
- n = round_pow2 (n, BUFFERS_PER_COPY);
+ n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
/* Always allocate new buffers in reasonably large sized chunks. */
n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
_vec_len (vm->mbuf_alloc_list) = n;
- for (i = 0; i < n; i++)
+ i = 0;
+
+ while (i < (n - 7))
{
- mb = vm->mbuf_alloc_list[i];
+ vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
+ (vm->mbuf_alloc_list[i + 4]), STORE);
+ vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
+ (vm->mbuf_alloc_list[i + 5]), STORE);
+ vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
+ (vm->mbuf_alloc_list[i + 6]), STORE);
+ vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
+ (vm->mbuf_alloc_list[i + 7]), STORE);
+
+ mb0 = vm->mbuf_alloc_list[i];
+ mb1 = vm->mbuf_alloc_list[i + 1];
+ mb2 = vm->mbuf_alloc_list[i + 2];
+ mb3 = vm->mbuf_alloc_list[i + 3];
+
+ ASSERT (rte_mbuf_refcnt_read (mb0) == 0);
+ ASSERT (rte_mbuf_refcnt_read (mb1) == 0);
+ ASSERT (rte_mbuf_refcnt_read (mb2) == 0);
+ ASSERT (rte_mbuf_refcnt_read (mb3) == 0);
+
+ rte_mbuf_refcnt_set (mb0, 1);
+ rte_mbuf_refcnt_set (mb1, 1);
+ rte_mbuf_refcnt_set (mb2, 1);
+ rte_mbuf_refcnt_set (mb3, 1);
+
+ b0 = vlib_buffer_from_rte_mbuf (mb0);
+ b1 = vlib_buffer_from_rte_mbuf (mb1);
+ b2 = vlib_buffer_from_rte_mbuf (mb2);
+ b3 = vlib_buffer_from_rte_mbuf (mb3);
+
+ bi0 = vlib_get_buffer_index (vm, b0);
+ bi1 = vlib_get_buffer_index (vm, b1);
+ bi2 = vlib_get_buffer_index (vm, b2);
+ bi3 = vlib_get_buffer_index (vm, b3);
+
+ vec_add1_aligned (fl->buffers, bi0, CLIB_CACHE_LINE_BYTES);
+ vec_add1_aligned (fl->buffers, bi1, CLIB_CACHE_LINE_BYTES);
+ vec_add1_aligned (fl->buffers, bi2, CLIB_CACHE_LINE_BYTES);
+ vec_add1_aligned (fl->buffers, bi3, CLIB_CACHE_LINE_BYTES);
+
+ vlib_buffer_init_for_free_list (b0, fl);
+ vlib_buffer_init_for_free_list (b1, fl);
+ vlib_buffer_init_for_free_list (b2, fl);
+ vlib_buffer_init_for_free_list (b3, fl);
+
+ if (fl->buffer_init_function)
+ {
+ fl->buffer_init_function (vm, fl, &bi0, 1);
+ fl->buffer_init_function (vm, fl, &bi1, 1);
+ fl->buffer_init_function (vm, fl, &bi2, 1);
+ fl->buffer_init_function (vm, fl, &bi3, 1);
+ }
+ i += 4;
+ }
- ASSERT (rte_mbuf_refcnt_read (mb) == 0);
- rte_mbuf_refcnt_set (mb, 1);
+ while (i < n)
+ {
+ mb0 = vm->mbuf_alloc_list[i];
- b = vlib_buffer_from_rte_mbuf (mb);
- bi = vlib_get_buffer_index (vm, b);
+ ASSERT (rte_mbuf_refcnt_read (mb0) == 0);
+ rte_mbuf_refcnt_set (mb0, 1);
- vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t));
- n_alloc++;
- n_remaining--;
+ b0 = vlib_buffer_from_rte_mbuf (mb0);
+ bi0 = vlib_get_buffer_index (vm, b0);
- vlib_buffer_init_for_free_list (b, fl);
+ vec_add1_aligned (fl->buffers, bi0, CLIB_CACHE_LINE_BYTES);
+
+ vlib_buffer_init_for_free_list (b0, fl);
if (fl->buffer_init_function)
- fl->buffer_init_function (vm, fl, &bi, 1);
+ fl->buffer_init_function (vm, fl, &bi0, 1);
+ i++;
}
fl->n_alloc += n;
return n;
}
-always_inline uword
-copy_alignment (u32 * x)
-{
- return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY;
-}
-
static u32
alloc_from_free_list (vlib_main_t * vm,
vlib_buffer_free_list_t * free_list,
u32 * alloc_buffers, u32 n_alloc_buffers)
{
- u32 *dst, *u_src;
- uword u_len, n_left;
- uword n_unaligned_start, n_unaligned_end, n_filled;
+ u32 *dst, *src;
+ uword len, n_filled;
- n_left = n_alloc_buffers;
dst = alloc_buffers;
- n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst))
- & (BUFFERS_PER_COPY - 1));
n_filled = fill_free_list (vm, free_list, n_alloc_buffers);
if (n_filled == 0)
return 0;
- n_left = n_filled < n_left ? n_filled : n_left;
- n_alloc_buffers = n_left;
-
- if (n_unaligned_start >= n_left)
- {
- n_unaligned_start = n_left;
- n_unaligned_end = 0;
- }
- else
- n_unaligned_end = copy_alignment (dst + n_alloc_buffers);
-
- fill_unaligned (vm, free_list, n_unaligned_start + n_unaligned_end);
-
- u_len = vec_len (free_list->unaligned_buffers);
- u_src = free_list->unaligned_buffers + u_len - 1;
-
- if (n_unaligned_start)
- {
- uword n_copy = n_unaligned_start;
- if (n_copy > n_left)
- n_copy = n_left;
- n_left -= n_copy;
-
- while (n_copy > 0)
- {
- *dst++ = *u_src--;
- n_copy--;
- u_len--;
- }
-
- /* Now dst should be aligned. */
- if (n_left > 0)
- ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0);
- }
-
- /* Aligned copy. */
- {
- vlib_copy_unit_t *d, *s;
- uword n_copy;
-
- if (vec_len (free_list->aligned_buffers) <
- ((n_left / BUFFERS_PER_COPY) * BUFFERS_PER_COPY))
- abort ();
-
- n_copy = n_left / BUFFERS_PER_COPY;
- n_left = n_left % BUFFERS_PER_COPY;
-
- /* Remove buffers from aligned free list. */
- _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY;
-
- s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers);
- d = (vlib_copy_unit_t *) dst;
+ len = vec_len (free_list->buffers);
+ ASSERT (len >= n_alloc_buffers);
- /* Fast path loop. */
- while (n_copy >= 4)
- {
- d[0] = s[0];
- d[1] = s[1];
- d[2] = s[2];
- d[3] = s[3];
- n_copy -= 4;
- s += 4;
- d += 4;
- }
-
- while (n_copy >= 1)
- {
- d[0] = s[0];
- n_copy -= 1;
- s += 1;
- d += 1;
- }
+ src = free_list->buffers + len - n_alloc_buffers;
+ clib_memcpy (dst, src, n_alloc_buffers * sizeof (u32));
- dst = (void *) d;
- }
-
- /* Unaligned copy. */
- ASSERT (n_unaligned_end == n_left);
- while (n_left > 0)
- {
- *dst++ = *u_src--;
- n_left--;
- u_len--;
- }
-
- if (!free_list->unaligned_buffers)
- ASSERT (u_len == 0);
- else
- _vec_len (free_list->unaligned_buffers) = u_len;
+ _vec_len (free_list->buffers) -= n_alloc_buffers;
return n_alloc_buffers;
}
return alloc_from_free_list (vm, f, buffers, n_buffers);
}
-always_inline void
-add_buffer_to_free_list (vlib_main_t * vm,
- vlib_buffer_free_list_t * f,
- u32 buffer_index, u8 do_init)
-{
- vlib_buffer_t *b;
- b = vlib_get_buffer (vm, buffer_index);
- if (PREDICT_TRUE (do_init))
- vlib_buffer_init_for_free_list (b, f);
- vec_add1_aligned (f->aligned_buffers, buffer_index,
- sizeof (vlib_copy_unit_t));
-}
-
-always_inline vlib_buffer_free_list_t *
-buffer_get_free_list (vlib_main_t * vm, vlib_buffer_t * b, u32 * index)
-{
- vlib_buffer_main_t *bm = vm->buffer_main;
- u32 i;
-
- *index = i = b->free_list_index;
- return pool_elt_at_index (bm->buffer_free_list_pool, i);
-}
-
static_always_inline void
vlib_buffer_free_inline (vlib_main_t * vm,
u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
for (i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b;
- struct rte_mbuf *mb;
b = vlib_get_buffer (vm, buffers[i]);
- fl = buffer_get_free_list (vm, b, &fi);
+ fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
/* The only current use of this callback: multicast recycle */
if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
{
int j;
- add_buffer_to_free_list
+ vlib_buffer_add_to_free_list
(vm, fl, buffers[i], (b->flags & VLIB_BUFFER_RECYCLE) == 0);
for (j = 0; j < vec_len (bm->announce_list); j++)
else
{
if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
- {
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
- }
+ dpdk_rte_pktmbuf_free (vm, b);
}
}
if (vec_len (bm->announce_list))