mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
elt_size, 512, sizeof (priv),
bp->numa_node, 0);
+ if (!mp)
+ {
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create normal mempool for numa node %u",
+ bp->index);
+ }
vec_reset_length (name);
/* non-cached mempool */
nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
elt_size, 0, sizeof (priv),
bp->numa_node, 0);
+ if (!nmp)
+ {
+ rte_mempool_free (mp);
+ vec_free (name);
+ return clib_error_return (0,
+ "failed to create non-cache mempool for numa nude %u",
+ bp->index);
+ }
vec_free (name);
dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
/* Call the mempool priv initializer */
priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
- VLIB_BUFFER_DATA_SIZE;
+ vlib_buffer_get_default_data_size (vm);
priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
rte_pktmbuf_pool_init (mp, &priv);
rte_pktmbuf_pool_init (nmp, &priv);
uword i;
size_t page_sz;
vlib_physmem_map_t *pm;
+ int do_vfio_map = 1;
pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
page_sz = 1ULL << pm->log2_page_size;
uword pa = (iova_mode == RTE_IOVA_VA) ?
pointer_to_uword (va) : pm->page_table[i];
- if (rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
- break;
+ if (do_vfio_map &&
+ rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+ do_vfio_map = 0;
+
+ struct rte_mempool_memhdr *memhdr;
+ memhdr = clib_mem_alloc (sizeof (*memhdr));
+ memhdr->mp = mp;
+ memhdr->addr = va;
+ memhdr->iova = pa;
+ memhdr->len = page_sz;
+ memhdr->free_cb = 0;
+ memhdr->opaque = 0;
+
+ STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
}
}
{
vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
n, sizeof (struct rte_mbuf));
- vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
}
return 0;