Without pagemap access only way to do DMA to physmem is by
using IOMMU. In such case VFIO will take care for preventing
paging of such memory so we don't need to lock here.
Change-Id: Ica9c20659fba3ea3c96202eb5f7d29c43b313fa9
Signed-off-by: Damjan Marion <damarion@cisco.com>
clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
{
uword off, pagesize;
clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
{
uword off, pagesize;
pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
pm->lookup_log2_page_sz = pm->def_log2_page_sz;
pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
pm->lookup_log2_page_sz = pm->def_log2_page_sz;
+ /* check if pagemap is accessible */
+ pt = clib_mem_vm_get_paddr (&pt, pm->sys_log2_page_sz, 1);
+ if (pt == 0 || pt[0] == 0)
+ pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
+
size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
size = round_pow2 (size, pagesize);
size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
size = round_pow2 (size, pagesize);
vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
- fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
-
p = first * elts_per_page;
p = first * elts_per_page;
+ if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
+ {
+ while (p < elts_per_page * count)
+ {
+ pm->lookup_table[p] = pointer_to_uword (pm->base) +
+ (p << pm->lookup_log2_page_sz);
+ p++;
+ }
+ return;
+ }
+
+ fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
while (p < elts_per_page * count)
{
va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
while (p < elts_per_page * count)
{
va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
- mmap_flags = MAP_FIXED | MAP_ANONYMOUS | MAP_LOCKED;
+ mmap_flags = MAP_FIXED | MAP_ANONYMOUS;
+
+ if ((pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP) == 0)
+ mmap_flags |= MAP_LOCKED;
if (a->log2_subpage_sz != pm->sys_log2_page_sz)
if (a->log2_subpage_sz != pm->sys_log2_page_sz)
- mmap_flags |= MAP_HUGETLB;
+ mmap_flags |= MAP_HUGETLB | MAP_LOCKED;
if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
{
if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
{
clib_pmalloc_arena_t *a;
s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
clib_pmalloc_arena_t *a;
s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
- "lookup-page-size %U", vec_len (pm->pages), pm->max_pages,
+ "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
format_log2_page_size, pm->def_log2_page_sz,
format_log2_page_size, pm->def_log2_page_sz,
- format_log2_page_size, pm->lookup_log2_page_sz);
+ format_log2_page_size, pm->lookup_log2_page_sz,
+ pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
+ /* flags */
+ u32 flags;
+#define CLIB_PMALLOC_F_NO_PAGEMAP (1 << 0)
+
/* base VA address */
u8 *base;
/* base VA address */
u8 *base;