STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
+static struct rte_mbuf ***mbuf_pending_free_list = 0;
+
static_always_inline void
dpdk_rte_pktmbuf_free (vlib_main_t * vm, vlib_buffer_t * b)
{
struct rte_mbuf *mb;
u32 next, flags;
mb = rte_mbuf_from_vlib_buffer (hb);
+ static struct rte_mempool *last_pool = 0;
+ static u8 last_buffer_pool_index;
next:
flags = b->flags;
b->n_add_refs = 0;
}
- rte_pktmbuf_free_seg (mb);
+ mb = rte_pktmbuf_prefree_seg (mb);
+ if (mb)
+ {
+ if (mb->pool != last_pool)
+ {
+ last_pool = mb->pool;
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (last_pool);
+ last_buffer_pool_index = privp->buffer_pool_index;
+ vec_validate_aligned (mbuf_pending_free_list,
+ last_buffer_pool_index,
+ CLIB_CACHE_LINE_BYTES);
+ }
+ vec_add1 (mbuf_pending_free_list[last_buffer_pool_index], mb);
+ }
if (flags & VLIB_BUFFER_NEXT_PRESENT)
{
}
}
+#ifndef CLIB_MULTIARCH_VARIANT
static void
del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
{
pool_put (bm->buffer_free_list_pool, f);
}
}
+#endif
/* Make sure free list has at least given number of free buffers. */
static uword
u32 bi0, bi1, bi2, bi3;
unsigned socket_id = rte_socket_id ();
struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ vlib_buffer_t bt;
/* Too early? */
if (PREDICT_FALSE (rmp == 0))
/* Always allocate new buffers in reasonably large sized chunks. */
n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
- vec_validate (vm->mbuf_alloc_list, n - 1);
+ vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES);
if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
return 0;
+ memset (&bt, 0, sizeof (vlib_buffer_t));
+ vlib_buffer_init_for_free_list (&bt, fl);
+ bt.buffer_pool_index = privp->buffer_pool_index;
+
_vec_len (vm->mbuf_alloc_list) = n;
i = 0;
+ int f = vec_len (fl->buffers);
+ vec_resize_aligned (fl->buffers, n, CLIB_CACHE_LINE_BYTES);
while (i < (n - 7))
{
bi2 = vlib_get_buffer_index (vm, b2);
bi3 = vlib_get_buffer_index (vm, b3);
- vec_add1_aligned (fl->buffers, bi0, CLIB_CACHE_LINE_BYTES);
- vec_add1_aligned (fl->buffers, bi1, CLIB_CACHE_LINE_BYTES);
- vec_add1_aligned (fl->buffers, bi2, CLIB_CACHE_LINE_BYTES);
- vec_add1_aligned (fl->buffers, bi3, CLIB_CACHE_LINE_BYTES);
+ fl->buffers[f++] = bi0;
+ fl->buffers[f++] = bi1;
+ fl->buffers[f++] = bi2;
+ fl->buffers[f++] = bi3;
- vlib_buffer_init_for_free_list (b0, fl);
- vlib_buffer_init_for_free_list (b1, fl);
- vlib_buffer_init_for_free_list (b2, fl);
- vlib_buffer_init_for_free_list (b3, fl);
+ clib_memcpy64_x4 (b0, b1, b2, b3, &bt);
if (fl->buffer_init_function)
{
b0 = vlib_buffer_from_rte_mbuf (mb0);
bi0 = vlib_get_buffer_index (vm, b0);
- vec_add1_aligned (fl->buffers, bi0, CLIB_CACHE_LINE_BYTES);
-
- vlib_buffer_init_for_free_list (b0, fl);
+ fl->buffers[f++] = bi0;
+ clib_memcpy (b0, &bt, sizeof (vlib_buffer_t));
if (fl->buffer_init_function)
fl->buffer_init_function (vm, fl, &bi0, 1);
Returns number actually allocated which will be either zero or
number requested. */
u32
-dpdk_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
+CLIB_MULTIARCH_FN (dpdk_buffer_alloc) (vlib_main_t * vm, u32 * buffers,
+ u32 n_buffers)
{
vlib_buffer_main_t *bm = vm->buffer_main;
u32
-dpdk_buffer_alloc_from_free_list (vlib_main_t * vm,
- u32 * buffers,
- u32 n_buffers, u32 free_list_index)
+CLIB_MULTIARCH_FN (dpdk_buffer_alloc_from_free_list) (vlib_main_t * vm,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 free_list_index)
{
vlib_buffer_main_t *bm = vm->buffer_main;
vlib_buffer_free_list_t *f;
return alloc_from_free_list (vm, f, buffers, n_buffers);
}
+static_always_inline void
+dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
+{
+ vlib_buffer_t *b;
+ struct rte_mbuf *mb;
+ b = vlib_get_buffer (vm, bi);
+ mb = rte_mbuf_from_vlib_buffer (b);
+ CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static_always_inline void
+recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
+ vlib_buffer_t * b)
+{
+ vlib_buffer_free_list_t *fl;
+ u32 fi;
+ fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
+
+ /* The only current use of this callback: multicast recycle */
+ if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
+ {
+ int j;
+
+ vlib_buffer_add_to_free_list (vm, fl, bi,
+ (b->flags & VLIB_BUFFER_RECYCLE) == 0);
+
+ for (j = 0; j < vec_len (bm->announce_list); j++)
+ {
+ if (fl == bm->announce_list[j])
+ goto already_announced;
+ }
+ vec_add1 (bm->announce_list, fl);
+ already_announced:
+ ;
+ }
+ else
+ {
+ if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
+ dpdk_rte_pktmbuf_free (vm, b);
+ }
+}
+
static_always_inline void
vlib_buffer_free_inline (vlib_main_t * vm,
u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
{
vlib_buffer_main_t *bm = vm->buffer_main;
- vlib_buffer_free_list_t *fl;
- u32 fi;
- int i;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ int i = 0;
u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
u32 follow_buffer_next);
if (!n_buffers)
return;
- for (i = 0; i < n_buffers; i++)
+ while (i + 7 < n_buffers)
{
- vlib_buffer_t *b;
+ dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]);
+ dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]);
+ dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]);
+ dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]);
+
+ b0 = vlib_get_buffer (vm, buffers[i]);
+ b1 = vlib_get_buffer (vm, buffers[i + 1]);
+ b2 = vlib_get_buffer (vm, buffers[i + 2]);
+ b3 = vlib_get_buffer (vm, buffers[i + 3]);
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
+
+ recycle_or_free (vm, bm, buffers[i], b0);
+ recycle_or_free (vm, bm, buffers[i + 1], b1);
+ recycle_or_free (vm, bm, buffers[i + 2], b2);
+ recycle_or_free (vm, bm, buffers[i + 3], b3);
- b = vlib_get_buffer (vm, buffers[i]);
+ i += 4;
+ }
+ while (i < n_buffers)
+ {
+ b0 = vlib_get_buffer (vm, buffers[i]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
- fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
-
- /* The only current use of this callback: multicast recycle */
- if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
- {
- int j;
-
- vlib_buffer_add_to_free_list
- (vm, fl, buffers[i], (b->flags & VLIB_BUFFER_RECYCLE) == 0);
-
- for (j = 0; j < vec_len (bm->announce_list); j++)
- {
- if (fl == bm->announce_list[j])
- goto already_announced;
- }
- vec_add1 (bm->announce_list, fl);
- already_announced:
- ;
- }
- else
- {
- if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
- dpdk_rte_pktmbuf_free (vm, b);
- }
+ recycle_or_free (vm, bm, buffers[i], b0);
+ i++;
}
if (vec_len (bm->announce_list))
{
}
_vec_len (bm->announce_list) = 0;
}
+
+ vec_foreach_index (i, mbuf_pending_free_list)
+ {
+ int len = vec_len (mbuf_pending_free_list[i]);
+ if (len)
+ {
+ rte_mempool_put_bulk (mbuf_pending_free_list[i][len - 1]->pool,
+ (void *) mbuf_pending_free_list[i], len);
+ vec_reset_length (mbuf_pending_free_list[i]);
+ }
+ }
}
-static void
-dpdk_buffer_free (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
+void
+CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
+ u32 n_buffers)
{
vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
1);
}
-static void
-dpdk_buffer_free_no_next (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
+void
+CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
+ u32 n_buffers)
{
vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
0);
}
+#ifndef CLIB_MULTIARCH_VARIANT
static void
dpdk_packet_template_init (vlib_main_t * vm,
void *vt,
{
dpdk_main_t *dm = &dpdk_main;
struct rte_mempool *rmp;
- int i;
+ dpdk_mempool_private_t priv;
+ vlib_physmem_region_t *pr;
+ vlib_physmem_region_index_t pri;
+ u8 *pool_name;
+ unsigned elt_size;
+ u32 size, obj_size;
+ i32 i, ret;
vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
if (dm->pktmbuf_pools[socket_id])
return 0;
- u8 *pool_name = format (0, "mbuf_pool_socket%u%c", socket_id, 0);
+ pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
+
+ elt_size = sizeof (struct rte_mbuf) +
+ VLIB_BUFFER_HDR_SIZE /* priv size */ +
+ VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
- rmp = rte_pktmbuf_pool_create ((char *) pool_name, /* pool name */
- num_mbufs, /* number of mbufs */
- 512, /* cache size */
- VLIB_BUFFER_HDR_SIZE, /* priv size */
- VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */
- socket_id); /* cpu socket */
+ obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
+ size = rte_mempool_xmem_size (num_mbufs, obj_size, 21);
+ clib_error_t *error = 0;
+ error = vlib_physmem_region_alloc (vm, (char *) pool_name, size, socket_id,
+ 0, &pri);
+ if (error)
+ clib_error_report (error);
+
+ pr = vlib_physmem_get_region (vm, pri);
+
+ priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ VLIB_BUFFER_DATA_SIZE;
+ priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
+
+#if 0
+ /* Check that pg_shift parameter is valid. */
+ if (pg_shift > MEMPOOL_PG_SHIFT_MAX)
+ {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+#endif
+ rmp = rte_mempool_create_empty ((char *) pool_name, /* pool name */
+ num_mbufs, /* number of mbufs */
+ elt_size, 512, /* cache size */
+ sizeof (dpdk_mempool_private_t), /* private data size */
+ socket_id, 0); /* flags */
if (rmp)
{
- {
- struct rte_mempool_memhdr *memhdr;
+ rte_mempool_set_ops_byname (rmp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
- STAILQ_FOREACH (memhdr, &rmp->mem_list, next)
- vlib_buffer_add_mem_range (vm, (uword) memhdr->addr, memhdr->len);
- }
- if (rmp)
+ /* call the mempool priv initializer */
+ rte_pktmbuf_pool_init (rmp, &priv);
+
+ ret = rte_mempool_populate_phys_tab (rmp, pr->mem, pr->page_table,
+ pr->n_pages, pr->log2_page_size,
+ NULL, NULL);
+ if (ret == (i32) rmp->size)
{
+ /* call the object initializers */
+ rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
+
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
+ privp->buffer_pool_index = vlib_buffer_add_physmem_region (vm, pri);
+
dm->pktmbuf_pools[socket_id] = rmp;
- vec_free (pool_name);
+
return 0;
}
+
+ rte_mempool_free (rmp);
}
vec_free (pool_name);
{
if (dm->pktmbuf_pools[i])
{
- clib_warning
- ("WARNING: Failed to allocate mempool for CPU socket %u. "
- "Threads running on socket %u will use socket %u mempool.",
- socket_id, socket_id, i);
+ clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
+ "%u. Threads running on socket %u will use socket %u "
+ "mempool.", socket_id, socket_id, i);
dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
return 0;
}
};
/* *INDENT-ON* */
+#if __x86_64__
+vlib_buffer_alloc_cb_t __clib_weak dpdk_buffer_alloc_avx512;
+vlib_buffer_alloc_cb_t __clib_weak dpdk_buffer_alloc_avx2;
+vlib_buffer_alloc_from_free_list_cb_t __clib_weak
+ dpdk_buffer_alloc_from_free_list_avx512;
+vlib_buffer_alloc_from_free_list_cb_t __clib_weak
+ dpdk_buffer_alloc_from_free_list_avx2;
+vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_cb_avx512;
+vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_cb_avx2;
+vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_cb_avx512;
+vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_cb_avx2;
+
+static void __clib_constructor
+dpdk_input_multiarch_select (void)
+{
+ vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
+ if (dpdk_buffer_alloc_avx512 && clib_cpu_supports_avx512f ())
+ {
+ cb->vlib_buffer_alloc_cb = dpdk_buffer_alloc_avx512;
+ cb->vlib_buffer_alloc_from_free_list_cb =
+ dpdk_buffer_alloc_from_free_list_avx512;
+ cb->vlib_buffer_free_cb = dpdk_buffer_free_cb_avx512;
+ cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_cb_avx512;
+ }
+ else if (dpdk_buffer_alloc_avx2 && clib_cpu_supports_avx2 ())
+ {
+ cb->vlib_buffer_alloc_cb = dpdk_buffer_alloc_avx2;
+ cb->vlib_buffer_alloc_from_free_list_cb =
+ dpdk_buffer_alloc_from_free_list_avx2;
+ cb->vlib_buffer_free_cb = dpdk_buffer_free_cb_avx2;
+ cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_cb_avx2;
+ }
+}
+#endif
+#endif
+
/** @endcond */
/*
* fd.io coding-style-patch-verification: ON