STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
-
-typedef struct
-{
- /* must be first */
- struct rte_pktmbuf_pool_private mbp_priv;
- u8 buffer_pool_index;
-} dpdk_mempool_private_t;
-
#ifndef CLIB_MARCH_VARIANT
struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0;
clib_error_t *
dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp)
{
+ uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
struct rte_mempool *mp, *nmp;
- dpdk_mempool_private_t priv;
+ struct rte_pktmbuf_pool_private priv;
enum rte_iova_mode iova_mode;
u32 *bi;
u8 *name = 0;
mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
elt_size, 512, sizeof (priv),
bp->numa_node, 0);
+ if (!mp)
+ {
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create normal mempool for numa node %u",
+ bp->index);
+ }
vec_reset_length (name);
/* non-cached mempool */
nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
elt_size, 0, sizeof (priv),
bp->numa_node, 0);
+ if (!nmp)
+ {
+ rte_mempool_free (mp);
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create non-cache mempool for numa nude %u",
+ bp->index);
+ }
vec_free (name);
dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
dpdk_no_cache_mempool_by_buffer_pool_index[bp->index] = nmp;
+ mp->pool_id = nmp->pool_id = bp->index;
+
rte_mempool_set_ops_byname (mp, "vpp", NULL);
rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
/* Call the mempool priv initializer */
- priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
- VLIB_BUFFER_DATA_SIZE;
- priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
- priv.buffer_pool_index = bp->index;
+ priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ vlib_buffer_get_default_data_size (vm);
+ priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
rte_pktmbuf_pool_init (mp, &priv);
rte_pktmbuf_pool_init (nmp, &priv);
iova_mode = rte_eal_iova_mode ();
/* populate mempool object buffer header */
+ /* *INDENT-OFF* */
vec_foreach (bi, bp->buffers)
- {
- struct rte_mempool_objhdr *hdr;
- vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
- struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
- hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
- hdr->mp = mp;
- hdr->iova = (iova_mode == RTE_IOVA_VA) ?
- pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
- STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
- STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
- mp->populated_size++;
- nmp->populated_size++;
- }
+ {
+ struct rte_mempool_objhdr *hdr;
+ vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
+ struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
+ hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
+ hdr->mp = mp;
+ hdr->iova = (iova_mode == RTE_IOVA_VA) ?
+ pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
+ STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
+ STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
+ mp->populated_size++;
+ nmp->populated_size++;
+ }
+ /* *INDENT-ON* */
/* call the object initializers */
rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
+ /* *INDENT-OFF* */
+ vec_foreach (bi, bp->buffers)
+ {
+ vlib_buffer_t *b;
+ b = vlib_buffer_ptr_from_index (buffer_mem_start, *bi, 0);
+ vlib_buffer_copy_template (b, &bp->buffer_template);
+ }
+ /* *INDENT-ON* */
+
/* map DMA pages if at least one physical device exists */
if (rte_eth_dev_count_avail ())
{
uword i;
size_t page_sz;
vlib_physmem_map_t *pm;
+ int do_vfio_map = 1;
pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
page_sz = 1ULL << pm->log2_page_size;
uword pa = (iova_mode == RTE_IOVA_VA) ?
pointer_to_uword (va) : pm->page_table[i];
- if (rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
- break;
+ if (do_vfio_map &&
+ rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+ do_vfio_map = 0;
+
+ struct rte_mempool_memhdr *memhdr;
+ memhdr = clib_mem_alloc (sizeof (*memhdr));
+ memhdr->mp = mp;
+ memhdr->addr = va;
+ memhdr->iova = pa;
+ memhdr->len = page_sz;
+ memhdr->free_cb = 0;
+ memhdr->opaque = 0;
+
+ STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
}
}
const int batch_size = 32;
vlib_main_t *vm = vlib_get_main ();
vlib_buffer_t bt;
- dpdk_mempool_private_t *privp = rte_mempool_get_priv (mp);
- u8 buffer_pool_index = privp->buffer_pool_index;
+ u8 buffer_pool_index = mp->pool_id;
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
u32 bufs[batch_size];
u32 n_left = n;
{
vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
n, sizeof (struct rte_mbuf));
- vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
}
return 0;
{
vlib_main_t *vm = vlib_get_main ();
vlib_buffer_t bt;
- dpdk_mempool_private_t *privp = rte_mempool_get_priv (cmp);
struct rte_mempool *mp;
- mp = dpdk_mempool_by_buffer_pool_index[privp->buffer_pool_index];
- u8 buffer_pool_index = privp->buffer_pool_index;
+ mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id];
+ u8 buffer_pool_index = cmp->pool_id;
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
vlib_buffer_copy_template (&bt, &bp->buffer_template);
const int batch_size = 32;
vlib_main_t *vm = vlib_get_main ();
u32 bufs[batch_size], total = 0, n_alloc = 0;
- dpdk_mempool_private_t *privp = rte_mempool_get_priv (mp);
- u8 buffer_pool_index = privp->buffer_pool_index;
+ u8 buffer_pool_index = mp->pool_id;
void **obj = obj_table;
while (n >= batch_size)
static unsigned
dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp)
{
- dpdk_mempool_private_t *privp;
struct rte_mempool *cmp;
- privp = rte_mempool_get_priv ((struct rte_mempool *) mp);
- cmp = dpdk_no_cache_mempool_by_buffer_pool_index[privp->buffer_pool_index];
+ cmp = dpdk_no_cache_mempool_by_buffer_pool_index[mp->pool_id];
return dpdk_ops_vpp_get_count (cmp);
}