+dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
+ u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
+ struct rte_mempool ** _mp,
+ vlib_physmem_region_index_t * pri)
+{
+ dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
+ struct rte_mempool *mp;
+ vlib_physmem_region_t *pr;
+ dpdk_mempool_private_t priv;
+ clib_error_t *error = 0;
+ u32 size, obj_size;
+ i32 ret;
+ uword i;
+
+ obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
+
+#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0)
+ size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
+#else
+ size = rte_mempool_calc_mem_size_helper (num_elts, obj_size, 21);
+#endif
+
+ error = vlib_physmem_region_alloc (vm, (char *) pool_name, size, numa,
+ VLIB_PHYSMEM_F_HUGETLB |
+ VLIB_PHYSMEM_F_SHARED, pri);
+ if (error)
+ return error;
+
+ pr = vlib_physmem_get_region (vm, pri[0]);
+
+ mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
+ 512, pool_priv_size, numa, 0);
+ if (!mp)
+ return clib_error_return (0, "failed to create %s", pool_name);
+
+ rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+
+ /* Call the mempool priv initializer */
+ priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ VLIB_BUFFER_DATA_SIZE;
+ priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
+ rte_pktmbuf_pool_init (mp, &priv);
+
+ for (i = 0; i < pr->n_pages; i++)
+ {
+ size_t page_size = 1 << pr->log2_page_size;
+ ret = rte_mempool_populate_iova (mp, ((char *) pr->mem) + i * page_size,
+ pr->page_table[i], page_size, 0, 0);
+ if (ret < 0)
+ {
+ rte_mempool_free (mp);
+ return clib_error_return (0, "failed to populate %s", pool_name);
+ }
+ }
+
+ _mp[0] = mp;
+
+ /* DPDK currently doesn't provide API to map DMA memory for empty mempool
+ so we are using this hack, will be nice to have at least API to get
+ VFIO container FD */
+ if (dbm->vfio_container_fd == -1)
+ foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
+
+ if (dbm->vfio_container_fd != -1)
+ {
+ struct vfio_iommu_type1_dma_map dm = { 0 };
+ int i, rv = 0;
+ dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
+ dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, pr->page_table)
+ {
+ dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
+ dm.size = 1 << pr->log2_page_size;
+ dm.iova = pr->page_table[i];
+ if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
+ break;
+ }
+ /* *INDENT-ON* */
+ if (rv != 0 && errno != EINVAL)
+ clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
+ }
+
+ return 0;
+}
+
+clib_error_t *
+dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,