#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_vfio.h>
+#include <rte_version.h>
#include <vlib/vlib.h>
#include <dpdk/buffer.h>
STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
+extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index;
#ifndef CLIB_MARCH_VARIANT
struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0;
+struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0;
clib_error_t *
dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp)
struct rte_mempool *mp, *nmp;
struct rte_pktmbuf_pool_private priv;
enum rte_iova_mode iova_mode;
- u32 *bi;
+ u32 i;
u8 *name = 0;
u32 elt_size =
/* normal mempool */
name = format (name, "vpp pool %u%c", bp->index, 0);
- mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
+ mp = rte_mempool_create_empty ((char *) name, bp->n_buffers,
elt_size, 512, sizeof (priv),
bp->numa_node, 0);
+ if (!mp)
+ {
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create normal mempool for numa node %u",
+ bp->index);
+ }
vec_reset_length (name);
/* non-cached mempool */
name = format (name, "vpp pool %u (no cache)%c", bp->index, 0);
- nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
+ nmp = rte_mempool_create_empty ((char *) name, bp->n_buffers,
elt_size, 0, sizeof (priv),
bp->numa_node, 0);
+ if (!nmp)
+ {
+ rte_mempool_free (mp);
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create non-cache mempool for numa nude %u",
+ bp->index);
+ }
vec_free (name);
dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
/* Call the mempool priv initializer */
+ memset (&priv, 0, sizeof (priv));
priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
- vlib_bufer_get_default_size (vm);
+ vlib_buffer_get_default_data_size (vm);
priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
rte_pktmbuf_pool_init (mp, &priv);
rte_pktmbuf_pool_init (nmp, &priv);
iova_mode = rte_eal_iova_mode ();
/* populate mempool object buffer header */
- /* *INDENT-OFF* */
- vec_foreach (bi, bp->buffers)
+ for (i = 0; i < bp->n_buffers; i++)
{
struct rte_mempool_objhdr *hdr;
- vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
+ vlib_buffer_t *b = vlib_get_buffer (vm, bp->buffers[i]);
struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
hdr->mp = mp;
mp->populated_size++;
nmp->populated_size++;
}
- /* *INDENT-ON* */
/* call the object initializers */
rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
- /* *INDENT-OFF* */
- vec_foreach (bi, bp->buffers)
+ /* create mbuf header tempate from the first buffer in the pool */
+ vec_validate_aligned (dpdk_mbuf_template_by_pool_index, bp->index,
+ CLIB_CACHE_LINE_BYTES);
+ clib_memcpy (vec_elt_at_index (dpdk_mbuf_template_by_pool_index, bp->index),
+ rte_mbuf_from_vlib_buffer (vlib_buffer_ptr_from_index
+ (buffer_mem_start, *bp->buffers,
+ 0)), sizeof (struct rte_mbuf));
+
+ for (i = 0; i < bp->n_buffers; i++)
{
vlib_buffer_t *b;
- b = vlib_buffer_ptr_from_index (buffer_mem_start, *bi, 0);
+ b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0);
vlib_buffer_copy_template (b, &bp->buffer_template);
}
- /* *INDENT-ON* */
/* map DMA pages if at least one physical device exists */
if (rte_eth_dev_count_avail ())
pointer_to_uword (va) : pm->page_table[i];
if (do_vfio_map &&
+#if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0)
rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+#else
+ rte_vfio_container_dma_map (RTE_VFIO_DEFAULT_CONTAINER_FD,
+ pointer_to_uword (va), pa, page_sz))
+#endif
do_vfio_map = 0;
struct rte_mempool_memhdr *memhdr;
if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
{
u32 bi = vlib_get_buffer_index (vm, b);
- mb->pool = new;
vlib_buffer_copy_template (b, bt);
vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1);
return;
CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue_no_cache);
+static_always_inline void
+dpdk_mbuf_init_from_template (struct rte_mbuf **mba, struct rte_mbuf *mt,
+ int count)
+{
+ /* Assumptions about rte_mbuf layout */
+ STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_addr, 0);
+ STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_iova, 8);
+ STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
+ STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
+ STATIC_ASSERT_SIZEOF (struct rte_mbuf, 128);
+
+ while (count--)
+ {
+ struct rte_mbuf *mb = mba[0];
+ int i;
+ /* bytes 0 .. 15 hold buf_addr and buf_iova which we need to preserve */
+ /* copy bytes 16 .. 31 */
+ *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
+
+ /* copy bytes 32 .. 127 */
+#ifdef CLIB_HAVE_VEC256
+ for (i = 1; i < 4; i++)
+ *((u8x32 *) mb + i) = *((u8x32 *) mt + i);
+#else
+ for (i = 2; i < 8; i++)
+ *((u8x16 *) mb + i) = *((u8x16 *) mt + i);
+#endif
+ mba++;
+ }
+}
+
int
CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp,
void **obj_table, unsigned n)
u32 bufs[batch_size], total = 0, n_alloc = 0;
u8 buffer_pool_index = mp->pool_id;
void **obj = obj_table;
+ struct rte_mbuf t = dpdk_mbuf_template_by_pool_index[buffer_pool_index];
while (n >= batch_size)
{
vlib_get_buffers_with_offset (vm, bufs, obj, batch_size,
-(i32) sizeof (struct rte_mbuf));
+ dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, batch_size);
total += batch_size;
obj += batch_size;
n -= batch_size;
vlib_get_buffers_with_offset (vm, bufs, obj, n,
-(i32) sizeof (struct rte_mbuf));
+ dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, n);
}
return 0;
static unsigned
dpdk_ops_vpp_get_count (const struct rte_mempool *mp)
{
- clib_warning ("");
+ vlib_main_t *vm = vlib_get_main ();
+ if (mp)
+ {
+ vlib_buffer_pool_t *pool = vlib_get_buffer_pool (vm, mp->pool_id);
+ if (pool)
+ {
+ return pool->n_avail;
+ }
+ }
return 0;
}