is_stale_cb, arg, 0, 0);
}
-#endif /* __included_bihash_template_inlines_h__ */
\ No newline at end of file
+#endif /* __included_bihash_template_inlines_h__ */
}
uword
-clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
-{
- clib_mem_main_t *mm = &clib_mem_main;
- uword pagesize = 1ULL << log2_page_sz;
- uword sys_page_sz = 1ULL << mm->log2_page_sz;
- uword n_bytes;
- void *base = 0, *p;
-
- size = round_pow2 (size, pagesize);
-
- /* in adition of requested reservation, we also rserve one system page
- * (typically 4K) adjacent to the start off reservation */
-
- if (start)
- {
- /* start address is provided, so we just need to make sure we are not
- * replacing existing map */
- if (start & pow2_mask (log2_page_sz))
- return ~0;
- base = (void *) start - sys_page_sz;
- base = mmap (base, size + sys_page_sz, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
-
- return (base == MAP_FAILED) ? ~0 : start;
- }
-
- /* to make sure that we get reservation aligned to page_size we need to
- * request one additional page as mmap will return us address which is
- * aligned only to system page size */
- base =
- mmap (0, size + pagesize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-
- if (base == MAP_FAILED)
- return ~0;
-
- /* return additional space at the end of allocation */
- p = base + size + pagesize;
- n_bytes = (uword) p & pow2_mask (log2_page_sz);
- if (n_bytes)
- {
- p -= n_bytes;
- munmap (p, n_bytes);
- }
-
- /* return additional space at the start of allocation */
- n_bytes = pagesize - sys_page_sz - n_bytes;
- if (n_bytes)
- {
- munmap (base, n_bytes);
- base += n_bytes;
- }
-
- return (uword) base + sys_page_sz;
-}
-__clib_export clib_mem_vm_map_hdr_t *
-clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *hdr)
+ __clib_export clib_mem_vm_map_hdr_t *
+ clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *hdr)
{
/* TODO: Not yet implemented */
return NULL;
void *
clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
- uword size, int fd, uword offset, char *name)
+ uword size, int fd, u8 log2_align, uword offset,
+ char *name)
{
clib_mem_main_t *mm = &clib_mem_main;
clib_mem_vm_map_hdr_t *hdr;
size = round_pow2 (size, 1ULL << log2_page_sz);
- base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
+ base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz,
+ log2_align);
if (base == (void *) ~0)
return CLIB_MEM_VM_MAP_FAILED;
return fd;
}
-uword
-clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
-{
- clib_mem_main_t *mm = &clib_mem_main;
- uword pagesize = 1ULL << log2_page_sz;
- uword sys_page_sz = 1ULL << mm->log2_page_sz;
- uword n_bytes;
- void *base = 0, *p;
-
- size = round_pow2 (size, pagesize);
-
- /* in adition of requested reservation, we also rserve one system page
- * (typically 4K) adjacent to the start off reservation */
-
- if (start)
- {
- /* start address is provided, so we just need to make sure we are not
- * replacing existing map */
- if (start & pow2_mask (log2_page_sz))
- return ~0;
-
- base = (void *) start - sys_page_sz;
- base = mmap (base, size + sys_page_sz, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
- return (base == MAP_FAILED) ? ~0 : start;
- }
-
- /* to make sure that we get reservation aligned to page_size we need to
- * request one additional page as mmap will return us address which is
- * aligned only to system page size */
- base = mmap (0, size + pagesize, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-
- if (base == MAP_FAILED)
- return ~0;
-
- /* return additional space at the end of allocation */
- p = base + size + pagesize;
- n_bytes = (uword) p & pow2_mask (log2_page_sz);
- if (n_bytes)
- {
- p -= n_bytes;
- munmap (p, n_bytes);
- }
-
- /* return additional space at the start of allocation */
- n_bytes = pagesize - sys_page_sz - n_bytes;
- if (n_bytes)
- {
- munmap (base, n_bytes);
- base += n_bytes;
- }
-
- return (uword) base + sys_page_sz;
-}
-
__clib_export clib_mem_vm_map_hdr_t *
clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
{
void *
clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
- uword size, int fd, uword offset, char *name)
+ uword size, int fd, u8 log2_align, uword offset,
+ char *name)
{
clib_mem_main_t *mm = &clib_mem_main;
clib_mem_vm_map_hdr_t *hdr;
size = round_pow2 (size, 1ULL << log2_page_sz);
- base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
+ base = (void *) clib_mem_vm_reserve ((uword) base, size,
+ clib_max (log2_page_sz, log2_align));
if (base == (void *) ~0)
return CLIB_MEM_VM_MAP_FAILED;
#include <vppinfra/time.h>
#include <vppinfra/format.h>
#include <vppinfra/clib_error.h>
+#include <sys/mman.h>
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED
+#endif
__clib_export clib_mem_main_t clib_mem_main;
+__clib_export uword
+clib_mem_vm_reserve (uword start, uword size, u8 log2_align)
+{
+ clib_mem_main_t *mm = &clib_mem_main;
+ uword sys_page_sz = 1ULL << mm->log2_page_sz;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ uword off, align;
+ void *r;
+
+ align = 1ULL << clib_max (log2_align, mm->log2_page_sz);
+ size = round_pow2 (size, align);
+
+ if (start)
+ {
+ if (start & (align - 1))
+ return ~0;
+
+ flags |= MAP_FIXED_NOREPLACE;
+ r = (void *) (start - sys_page_sz);
+ r = mmap (r, size + sys_page_sz, PROT_NONE, flags, -1, 0);
+ if (r == MAP_FAILED)
+ return ~0;
+
+ return start;
+ }
+
+ r = mmap (0, size + align + sys_page_sz, PROT_NONE, flags, -1, 0);
+
+ if (r == MAP_FAILED)
+ return ~0;
+
+ start = round_pow2 (pointer_to_uword (r) + sys_page_sz, align);
+ off = start - sys_page_sz - pointer_to_uword (r);
+
+ if (off)
+ munmap (r, off);
+
+ if (align - off)
+ munmap ((void *) (start + size), align - off);
+
+ return start;
+}
+
__clib_export void *
clib_mem_vm_map (void *base, uword size, clib_mem_page_sz_t log2_page_sz,
char *fmt, ...)
va_start (va, fmt);
s = va_format (0, fmt, &va);
vec_add1 (s, 0);
- rv = clib_mem_vm_map_internal (base, log2_page_sz, size, -1, 0, (char *) s);
+ rv =
+ clib_mem_vm_map_internal (base, log2_page_sz, size, -1, 0, 0, (char *) s);
va_end (va);
vec_free (s);
return rv;
va_start (va, fmt);
s = va_format (0, fmt, &va);
vec_add1 (s, 0);
- rv = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0, (char *) s);
+ rv = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0, 0, (char *) s);
va_end (va);
vec_free (s);
return rv;
va_start (va, fmt);
s = va_format (0, fmt, &va);
vec_add1 (s, 0);
- rv = clib_mem_vm_map_internal (base, 0, size, fd, offset, (char *) s);
+ rv = clib_mem_vm_map_internal (base, 0, size, fd, 0, offset, (char *) s);
va_end (va);
vec_free (s);
return rv;
}
void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
- uword size, int fd, uword offset, char *name);
+ uword size, int fd, u8 log2_align,
+ uword offset, char *name);
void *clib_mem_vm_map (void *start, uword size,
clib_mem_page_sz_t log2_page_size, char *fmt, ...);
int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
uword clib_mem_get_fd_page_size (int fd);
clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
-uword clib_mem_vm_reserve (uword start, uword size,
- clib_mem_page_sz_t log2_page_sz);
+uword clib_mem_vm_reserve (uword start, uword size, u8 log2_align);
u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
int n_pages);
void clib_mem_destroy (void);
{
log2_page_sz = clib_mem_log2_page_size_validate (log2_page_sz);
size = round_pow2 (size, clib_mem_page_bytes (log2_page_sz));
- base = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0,
+ base = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0, 0,
"main heap");
if (base == CLIB_MEM_VM_MAP_FAILED)