*/
#include <unistd.h>
-#include <linux/vfio.h>
-#include <sys/ioctl.h>
#include <rte_config.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
+#include <rte_vfio.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
-#include <vlib/pci/pci.h>
-#include <vlib/linux/vfio.h>
#include <vnet/vnet.h>
#include <dpdk/device/dpdk.h>
#include <dpdk/device/dpdk_priv.h>
vec_free (f->name);
vec_free (f->buffers);
/* Poison it. */
- memset (f, 0xab, sizeof (f[0]));
+ clib_memset (f, 0xab, sizeof (f[0]));
}
/* Add buffer free list. */
if (rte_mempool_get_bulk (rmp, (void *) d->mbuf_alloc_list, n_alloc) < 0)
return 0;
- memset (&bt, 0, sizeof (vlib_buffer_t));
+ clib_memset (&bt, 0, sizeof (vlib_buffer_t));
vlib_buffer_init_for_free_list (&bt, fl);
bt.buffer_pool_index = privp->buffer_pool_index;
recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
vlib_buffer_t * b)
{
- vlib_buffer_free_list_t *fl;
u32 thread_index = vlib_get_thread_index ();
- vlib_buffer_free_list_index_t fi;
- fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
- /* The only current use of this callback: multicast recycle */
- if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
- {
- int j;
-
- vlib_buffer_add_to_free_list (vm, fl, bi,
- (b->flags & VLIB_BUFFER_RECYCLE) == 0);
-
- for (j = 0; j < vec_len (vm->buffer_announce_list); j++)
- {
- if (fl == vm->buffer_announce_list[j])
- goto already_announced;
- }
- vec_add1 (vm->buffer_announce_list, fl);
- already_announced:
- ;
- }
- else if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
- dpdk_rte_pktmbuf_free (vm, thread_index, b, 1);
+ dpdk_rte_pktmbuf_free (vm, thread_index, b, 1);
}
static_always_inline void
vlib_buffer_t *bufp[n_buffers], **b = bufp;
u32 thread_index = vlib_get_thread_index ();
int i = 0;
- u32 simple_mask = (VLIB_BUFFER_NON_DEFAULT_FREELIST | VLIB_BUFFER_RECYCLE |
+ u32 simple_mask = (VLIB_BUFFER_NON_DEFAULT_FREELIST |
VLIB_BUFFER_NEXT_PRESENT);
u32 n_left, *bi;
u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
b += 1;
n_left -= 1;
}
- if (vec_len (vm->buffer_announce_list))
- {
- vlib_buffer_free_list_t *fl;
- for (i = 0; i < vec_len (vm->buffer_announce_list); i++)
- {
- fl = vm->buffer_announce_list[i];
- fl->buffers_added_to_freelist_function (vm, fl);
- }
- _vec_len (vm->buffer_announce_list) = 0;
- }
}
void
vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
vlib_worker_thread_barrier_sync (vm);
- memset (t, 0, sizeof (t[0]));
+ clib_memset (t, 0, sizeof (t[0]));
vec_add (t->packet_data, packet_data, n_packet_data_bytes);
vlib_worker_thread_barrier_release (vm);
}
-static clib_error_t *
-scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
-{
- dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
- linux_vfio_main_t *lvm = &vfio_main;
- const char fn[] = "/dev/vfio/vfio";
- char buff[sizeof (fn)] = { 0 };
- int fd;
- u8 *path = format (0, "%v%c", path_name, 0);
-
- if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn))
- goto done;
-
- if (strncmp (fn, buff, sizeof (fn)))
- goto done;
-
- fd = atoi ((char *) file_name);
- if (fd != lvm->container_fd)
- dbm->vfio_container_fd = fd;
-
-done:
- vec_free (path);
- return 0;
-}
-
clib_error_t *
dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
- struct rte_mempool ** _mp,
- vlib_physmem_region_index_t * pri)
+ struct rte_mempool **_mp, vlib_physmem_region_index_t * pri)
{
- dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
struct rte_mempool *mp;
+ enum rte_iova_mode iova_mode;
vlib_physmem_region_t *pr;
dpdk_mempool_private_t priv;
clib_error_t *error = 0;
- u32 size, obj_size;
+ size_t min_chunk_size, align;
+ int map_dma = 1;
+ u32 size;
i32 ret;
uword i;
- obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
-#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0)
- size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
-#else
- size = rte_mempool_calc_mem_size_helper (num_elts, obj_size, 21);
-#endif
+ mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
+ 512, pool_priv_size, numa, 0);
+ if (!mp)
+ return clib_error_return (0, "failed to create %s", pool_name);
+
+ rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+
+ size = rte_mempool_op_calc_mem_size_default (mp, num_elts, 21,
+ &min_chunk_size, &align);
error = vlib_physmem_region_alloc (vm, (char *) pool_name, size, numa,
VLIB_PHYSMEM_F_HUGETLB |
VLIB_PHYSMEM_F_SHARED, pri);
if (error)
- return error;
+ {
+ rte_mempool_free (mp);
+ return error;
+ }
pr = vlib_physmem_get_region (vm, pri[0]);
- mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
- 512, pool_priv_size, numa, 0);
- if (!mp)
- return clib_error_return (0, "failed to create %s", pool_name);
-
- rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
-
/* Call the mempool priv initializer */
priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
VLIB_BUFFER_DATA_SIZE;
priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
rte_pktmbuf_pool_init (mp, &priv);
+ if (rte_eth_dev_count_avail () == 0)
+ map_dma = 0;
+
+ iova_mode = rte_eal_iova_mode ();
for (i = 0; i < pr->n_pages; i++)
{
- size_t page_size = 1 << pr->log2_page_size;
- ret = rte_mempool_populate_iova (mp, ((char *) pr->mem) + i * page_size,
- pr->page_table[i], page_size, 0, 0);
+ size_t page_sz = 1ull << pr->log2_page_size;
+ char *va = ((char *) pr->mem) + i * page_sz;
+ uword pa = iova_mode == RTE_IOVA_VA ?
+ pointer_to_uword (va) : pr->page_table[i];
+ ret = rte_mempool_populate_iova (mp, va, pa, page_sz, 0, 0);
if (ret < 0)
{
rte_mempool_free (mp);
return clib_error_return (0, "failed to populate %s", pool_name);
}
+ /* -1 likely means there is no PCI devices assigned to vfio
+ container or noiommu mode is used so we stop trying */
+ if (map_dma && rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+ map_dma = 0;
}
_mp[0] = mp;
- /* DPDK currently doesn't provide API to map DMA memory for empty mempool
- so we are using this hack, will be nice to have at least API to get
- VFIO container FD */
- if (dbm->vfio_container_fd == -1)
- foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
-
- if (dbm->vfio_container_fd != -1)
- {
- struct vfio_iommu_type1_dma_map dm = { 0 };
- int i, rv = 0;
- dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
- dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
-
- /* *INDENT-OFF* */
- vec_foreach_index (i, pr->page_table)
- {
- dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
- dm.size = 1 << pr->log2_page_size;
- dm.iova = pr->page_table[i];
- if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
- break;
- }
- /* *INDENT-ON* */
- if (rv != 0 && errno != EINVAL)
- clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
- }
-
return 0;
}