X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fmem.h;h=04c26d218aaf0ee41a4e3f2e13fbc6e1a3057bf9;hb=b7b929931a07fbb27b43d5cd105f366c3e29807e;hp=1260eab28c03da348d4037d659e80cb05deb0225;hpb=7cd468a3d7dee7d6c92f69a0bb7061ae208ec727;p=vpp.git diff --git a/src/vppinfra/mem.h b/src/vppinfra/mem.h index 1260eab28c0..04c26d218aa 100644 --- a/src/vppinfra/mem.h +++ b/src/vppinfra/mem.h @@ -39,11 +39,20 @@ #define _included_clib_mem_h #include +#include +#include #include /* uword, etc */ +#include + +#if USE_DLMALLOC == 0 #include +#else +#include +#endif + #include -#include /* memcpy, memset */ +#include /* memcpy, clib_memset */ #include #define CLIB_MAX_MHEAPS 256 @@ -54,14 +63,14 @@ extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS]; always_inline void * clib_mem_get_per_cpu_heap (void) { - int cpu = os_get_cpu_number (); + int cpu = os_get_thread_index (); return clib_per_cpu_mheaps[cpu]; } always_inline void * clib_mem_set_per_cpu_heap (u8 * new_heap) { - int cpu = os_get_cpu_number (); + int cpu = os_get_thread_index (); void *old = clib_per_cpu_mheaps[cpu]; clib_per_cpu_mheaps[cpu] = new_heap; return old; @@ -73,7 +82,7 @@ clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset, int os_out_of_memory_on_failure) { void *heap, *p; - uword offset, cpu; + uword cpu; if (align_offset > align) { @@ -83,8 +92,11 @@ clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset, align_offset = align; } - cpu = os_get_cpu_number (); + cpu = os_get_thread_index (); heap = clib_per_cpu_mheaps[cpu]; + +#if USE_DLMALLOC == 0 + uword offset; heap = mheap_get_aligned (heap, size, align, align_offset, &offset); clib_per_cpu_mheaps[cpu] = heap; @@ -102,6 +114,17 @@ clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset, os_out_of_memory (); return 0; } +#else + p = mspace_get_aligned (heap, size, align, align_offset); + if (PREDICT_FALSE (p == 0)) + { + if (os_out_of_memory_on_failure) + os_out_of_memory (); + return 0; + } + + return p; +#endif /* USE_DLMALLOC */ } /* Memory allocator which calls os_out_of_memory() when it fails */ @@ -158,6 +181,7 @@ clib_mem_alloc_aligned_or_null (uword size, uword align) always_inline uword clib_mem_is_heap_object (void *p) { +#if USE_DLMALLOC == 0 void *heap = clib_mem_get_per_cpu_heap (); uword offset = (uword) p - (uword) heap; mheap_elt_t *e, *n; @@ -170,6 +194,11 @@ clib_mem_is_heap_object (void *p) /* Check that heap forward and reverse pointers agree. */ return e->n_user_data == n->prev_n_user_data; +#else + void *heap = clib_mem_get_per_cpu_heap (); + + return mspace_is_heap_object (heap, p); +#endif /* USE_DLMALLOC */ } always_inline void @@ -180,7 +209,11 @@ clib_mem_free (void *p) /* Make sure object is in the correct heap. */ ASSERT (clib_mem_is_heap_object (p)); +#if USE_DLMALLOC == 0 mheap_put (heap, (u8 *) p - heap); +#else + mspace_put (heap, p); +#endif #if CLIB_DEBUG > 0 VALGRIND_FREELIKE_BLOCK (p, 0); @@ -208,9 +241,14 @@ clib_mem_realloc (void *p, uword new_size, uword old_size) always_inline uword clib_mem_size (void *p) { - ASSERT (clib_mem_is_heap_object (p)); +#if USE_DLMALLOC == 0 mheap_elt_t *e = mheap_user_pointer_to_elt (p); + ASSERT (clib_mem_is_heap_object (p)); return mheap_elt_data_bytes (e); +#else + ASSERT (clib_mem_is_heap_object (p)); + return mspace_usable_size_with_delta (p); +#endif } always_inline void * @@ -226,6 +264,7 @@ clib_mem_set_heap (void *heap) } void *clib_mem_init (void *heap, uword size); +void *clib_mem_init_thread_safe (void *memory, uword memory_size); void clib_mem_exit (void); @@ -264,19 +303,107 @@ void clib_mem_usage (clib_mem_usage_t * usage); u8 *format_clib_mem_usage (u8 * s, va_list * args); -/* Include appropriate VM functions depending on whether - we are compiling for linux kernel, for Unix or standalone. */ -#ifdef CLIB_LINUX_KERNEL -#include -#endif +/* Allocate virtual address space. */ +always_inline void * +clib_mem_vm_alloc (uword size) +{ + void *mmap_addr; + uword flags = MAP_PRIVATE; -#ifdef CLIB_UNIX -#include +#ifdef MAP_ANONYMOUS + flags |= MAP_ANONYMOUS; #endif -#ifdef CLIB_STANDALONE -#include -#endif + mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0); + if (mmap_addr == (void *) -1) + mmap_addr = 0; + + return mmap_addr; +} + +always_inline void +clib_mem_vm_free (void *addr, uword size) +{ + munmap (addr, size); +} + +always_inline void * +clib_mem_vm_unmap (void *addr, uword size) +{ + void *mmap_addr; + uword flags = MAP_PRIVATE | MAP_FIXED; + + /* To unmap we "map" with no protection. If we actually called + munmap then other callers could steal the address space. By + changing to PROT_NONE the kernel can free up the pages which is + really what we want "unmap" to mean. */ + mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0); + if (mmap_addr == (void *) -1) + mmap_addr = 0; + + return mmap_addr; +} + +always_inline void * +clib_mem_vm_map (void *addr, uword size) +{ + void *mmap_addr; + uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS; + + mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0); + if (mmap_addr == (void *) -1) + mmap_addr = 0; + + return mmap_addr; +} + +typedef struct +{ +#define CLIB_MEM_VM_F_SHARED (1 << 0) +#define CLIB_MEM_VM_F_HUGETLB (1 << 1) +#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2) +#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3) +#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4) +#define CLIB_MEM_VM_F_LOCKED (1 << 5) + u32 flags; /**< vm allocation flags: +
CLIB_MEM_VM_F_SHARED: request shared memory, file + descriptor will be provided on successful allocation. +
CLIB_MEM_VM_F_HUGETLB: request hugepages. +
CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid + numa node preference. +
CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails. +
CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if + number of available pages is not sufficient. +
CLIB_MEM_VM_F_LOCKED: request locked memory. + */ + char *name; /**< Name for memory allocation, set by caller. */ + uword size; /**< Allocation size, set by caller. */ + int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */ + void *addr; /**< Pointer to allocated memory, set on successful allocation. */ + int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */ + int log2_page_size; /* Page size in log2 format, set on successful allocation. */ + int n_pages; /* Number of pages. */ + uword requested_va; /**< Request fixed position mapping */ +} clib_mem_vm_alloc_t; + +clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp); +clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a); +void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a); +u64 clib_mem_vm_get_page_size (int fd); +int clib_mem_vm_get_log2_page_size (int fd); +u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages); + +typedef struct +{ + uword size; /**< Map size */ + int fd; /**< File descriptor to be mapped */ + uword requested_va; /**< Request fixed position mapping */ + void *addr; /**< Pointer to mapped memory, if successful */ +} clib_mem_vm_map_t; + +clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a); +void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size); +void mheap_trace (void *v, int enable); #include /* clib_panic */