+clib_error_t *
+dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
+ u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
+ struct rte_mempool **_mp, u32 * map_index)
+{
+ struct rte_mempool *mp;
+ enum rte_iova_mode iova_mode;
+ dpdk_mempool_private_t priv;
+ vlib_physmem_map_t *pm;
+ clib_error_t *error = 0;
+ size_t min_chunk_size, align;
+ int map_dma = 1;
+ u32 size;
+ i32 ret;
+ uword i;
+
+ mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
+ 512, pool_priv_size, numa, 0);
+ if (!mp)
+ return clib_error_return (0, "failed to create %s", pool_name);
+
+ rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+
+ size = rte_mempool_op_calc_mem_size_default (mp, num_elts, 21,
+ &min_chunk_size, &align);
+
+ if ((error = vlib_physmem_shared_map_create (vm, (char *) pool_name, size,
+ 0, numa, map_index)))
+ {
+ rte_mempool_free (mp);
+ return error;
+ }
+ pm = vlib_physmem_get_map (vm, *map_index);
+
+ /* Call the mempool priv initializer */
+ priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ VLIB_BUFFER_DATA_SIZE;
+ priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
+ rte_pktmbuf_pool_init (mp, &priv);
+
+ if (rte_eth_dev_count_avail () == 0)
+ map_dma = 0;
+
+ iova_mode = rte_eal_iova_mode ();
+ for (i = 0; i < pm->n_pages; i++)
+ {
+ size_t page_sz = 1ULL << pm->log2_page_size;
+ char *va = ((char *) pm->base) + i * page_sz;
+ uword pa = iova_mode == RTE_IOVA_VA ?
+ pointer_to_uword (va) : pm->page_table[i];
+ ret = rte_mempool_populate_iova (mp, va, pa, page_sz, 0, 0);
+ if (ret < 0)
+ {
+ rte_mempool_free (mp);
+ return clib_error_return (0, "failed to populate %s", pool_name);
+ }
+ /* -1 likely means there is no PCI devices assigned to vfio
+ container or noiommu mode is used so we stop trying */
+ if (map_dma && rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+ map_dma = 0;
+ }
+
+ _mp[0] = mp;
+
+ return 0;
+}
+