#include <sys/mman.h>
#include <sys/fcntl.h>
#include <sys/stat.h>
+#include <unistd.h>
#include <vppinfra/linux/syscall.h>
#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
#include <vlib/physmem.h>
#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vlib/linux/vfio.h>
static void *
unix_physmem_alloc_aligned (vlib_main_t * vm, vlib_physmem_region_index_t idx,
while (1)
{
+#if USE_DLMALLOC == 0
+
mheap_get_aligned (pr->heap, n_bytes,
/* align */ alignment,
/* align offset */ 0,
&lo_offset);
+#else
+ lo_offset = (uword) mspace_get_aligned (pr->heap, n_bytes,
+ alignment, ~0ULL /* offset */ );
+ if (lo_offset == 0)
+ lo_offset = ~0ULL;
+#endif
/* Allocation failed? */
if (lo_offset == ~0)
break;
- if (pr->flags & VLIB_PHYSMEM_F_FAKE)
- break;
-
/* Make sure allocation does not span DMA physical chunk boundary. */
hi_offset = lo_offset + n_bytes - 1;
- if ((lo_offset >> pr->log2_page_size) ==
- (hi_offset >> pr->log2_page_size))
+ if (((pointer_to_uword (pr->heap) + lo_offset) >> pr->log2_page_size) ==
+ ((pointer_to_uword (pr->heap) + hi_offset) >> pr->log2_page_size))
break;
/* Allocation would span chunk boundary, queue it to be freed as soon as
{
uword i;
for (i = 0; i < vec_len (to_free); i++)
- mheap_put (pr->heap, to_free[i]);
+ {
+#if USE_DLMALLOC == 0
+ mheap_put (pr->heap, to_free[i]);
+#else
+ mspace_put_no_offset (pr->heap, (void *) to_free[i]);
+#endif
+ }
vec_free (to_free);
}
- return lo_offset != ~0 ? pr->heap + lo_offset : 0;
+#if USE_DLMALLOC == 0
+ return lo_offset != ~0 ? (void *) (pr->heap + lo_offset) : 0;
+#else
+ return lo_offset != ~0 ? (void *) lo_offset : 0;
+#endif
}
static void
{
vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
/* Return object to region's heap. */
+#if USE_DLMALLOC == 0
mheap_put (pr->heap, x - pr->heap);
+#else
+ mspace_put_no_offset (pr->heap, x);
+#endif
}
static clib_error_t *
u8 numa_node, u32 flags,
vlib_physmem_region_index_t * idx)
{
- vlib_physmem_main_t *vpm = &vm->physmem_main;
+ vlib_physmem_main_t *vpm = &physmem_main;
vlib_physmem_region_t *pr;
clib_error_t *error = 0;
clib_mem_vm_alloc_t alloc = { 0 };
-
-
- if (geteuid () != 0 && (flags & VLIB_PHYSMEM_F_FAKE) == 0)
- return clib_error_return (0, "not allowed");
+ int i;
pool_get (vpm->regions, pr);
alloc.name = name;
alloc.size = size;
alloc.numa_node = numa_node;
- alloc.flags = CLIB_MEM_VM_F_SHARED;
- if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
+ alloc.flags = (flags & VLIB_PHYSMEM_F_SHARED) ?
+ CLIB_MEM_VM_F_SHARED : CLIB_MEM_VM_F_LOCKED;
+
+ if ((flags & VLIB_PHYSMEM_F_HUGETLB))
{
alloc.flags |= CLIB_MEM_VM_F_HUGETLB;
alloc.flags |= CLIB_MEM_VM_F_HUGETLB_PREALLOC;
pr->size = (u64) pr->n_pages << (u64) pr->log2_page_size;
pr->page_mask = (1 << pr->log2_page_size) - 1;
pr->numa_node = numa_node;
- pr->name = format (0, "%s", name);
+ pr->name = format (0, "%s%c", name, 0);
- if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
+ for (i = 0; i < pr->n_pages; i++)
{
- int i;
- for (i = 0; i < pr->n_pages; i++)
+ void *ptr = pr->mem + ((u64) i << pr->log2_page_size);
+ int node;
+ if ((move_pages (0, 1, &ptr, 0, &node, 0) == 0) && (numa_node != node))
{
- void *ptr = pr->mem + (i << pr->log2_page_size);
- int node;
- move_pages (0, 1, &ptr, 0, &node, 0);
- if (numa_node != node)
- {
- clib_warning ("physmem page for region \'%s\' allocated on the"
- " wrong numa node (requested %u actual %u)",
- pr->name, pr->numa_node, node, i);
- break;
- }
+ clib_warning ("physmem page for region \'%s\' allocated on the"
+ " wrong numa node (requested %u actual %u)",
+ pr->name, pr->numa_node, node, i);
+ break;
}
- pr->page_table = clib_mem_vm_get_paddr (pr->mem, pr->log2_page_size,
- pr->n_pages);
}
+ pr->page_table = clib_mem_vm_get_paddr (pr->mem, pr->log2_page_size,
+ pr->n_pages);
+
+ linux_vfio_dma_map_regions (vm);
+
if (flags & VLIB_PHYSMEM_F_INIT_MHEAP)
{
+#if USE_DLMALLOC == 0
pr->heap = mheap_alloc_with_flags (pr->mem, pr->size,
/* Don't want mheap mmap/munmap with IO memory. */
MHEAP_FLAG_DISABLE_VM |
MHEAP_FLAG_THREAD_SAFE);
- }
-
- if (flags & VLIB_PHYSMEM_F_HAVE_BUFFERS)
- {
- vlib_buffer_add_mem_range (vm, pointer_to_uword (pr->mem), pr->size);
+#else
+ pr->heap = create_mspace_with_base (pr->mem, pr->size, 1 /* locked */ );
+ mspace_disable_expand (pr->heap);
+#endif
}
*idx = pr->index;
static void
unix_physmem_region_free (vlib_main_t * vm, vlib_physmem_region_index_t idx)
{
- vlib_physmem_main_t *vpm = &vm->physmem_main;
+ vlib_physmem_main_t *vpm = &physmem_main;
vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
if (pr->fd > 0)
clib_error_t *
unix_physmem_init (vlib_main_t * vm)
{
+ vlib_physmem_main_t *vpm = &physmem_main;
clib_error_t *error = 0;
+ u64 *pt = 0;
/* Avoid multiple calls. */
if (vm->os_physmem_alloc_aligned)
return error;
+ /* check if pagemap is accessible */
+ pt = clib_mem_vm_get_paddr (&pt, min_log2 (sysconf (_SC_PAGESIZE)), 1);
+ if (pt[0])
+ vpm->flags |= VLIB_PHYSMEM_MAIN_F_HAVE_PAGEMAP;
+ vec_free (pt);
+
+ if ((error = linux_vfio_init (vm)))
+ return error;
+
vm->os_physmem_alloc_aligned = unix_physmem_alloc_aligned;
vm->os_physmem_free = unix_physmem_free;
vm->os_physmem_region_alloc = unix_physmem_region_alloc;
show_physmem (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
- vlib_physmem_main_t *vpm = &vm->physmem_main;
+ vlib_physmem_main_t *vpm = &physmem_main;
vlib_physmem_region_t *pr;
/* *INDENT-OFF* */