X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fmem.h;h=dfe8de9362629347511eac0b773dee8f424e0360;hb=e494ad12578b5b2a85c707e2ce67bafeb78d60c5;hp=99097263dfa2eb8054d111f118ba1bbc7a68756c;hpb=57d1ec00a953620ff59242db07c369843bb16820;p=vpp.git diff --git a/src/vppinfra/mem.h b/src/vppinfra/mem.h index 99097263dfa..dfe8de93626 100644 --- a/src/vppinfra/mem.h +++ b/src/vppinfra/mem.h @@ -45,14 +45,14 @@ #include /* uword, etc */ #include -#include - #include #include /* memcpy, clib_memset */ #include #define CLIB_MAX_MHEAPS 256 -#define CLIB_MAX_NUMAS 8 +#define CLIB_MAX_NUMAS 16 +#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0) +#define CLIB_MEM_ERROR (-1) typedef enum { @@ -71,13 +71,88 @@ typedef enum CLIB_MEM_PAGE_SZ_16G = 34, } clib_mem_page_sz_t; +typedef struct _clib_mem_vm_map_hdr +{ + /* base address */ + uword base_addr; + + /* number of pages */ + uword num_pages; + + /* page size (log2) */ + clib_mem_page_sz_t log2_page_sz; + + /* file descriptor, -1 if memory is not shared */ + int fd; + + /* allocation mame */ +#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64 + char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN]; + + /* linked list */ + struct _clib_mem_vm_map_hdr *prev, *next; +} clib_mem_vm_map_hdr_t; + +#define foreach_clib_mem_heap_flag \ + _(0, LOCKED, "locked") \ + _(1, UNMAP_ON_DESTROY, "unmap-on-destroy") + +typedef enum +{ +#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i), + foreach_clib_mem_heap_flag +#undef _ +} clib_mem_heap_flag_t; + typedef struct { + /* base address */ + void *base; + + /* dlmalloc mspace */ + void *mspace; + + /* heap size */ + uword size; + + /* page size (log2) */ + clib_mem_page_sz_t log2_page_sz:8; + + /* flags */ + clib_mem_heap_flag_t flags:8; + + /* name - _MUST_ be last */ + char name[0]; +} clib_mem_heap_t; + +typedef struct +{ + /* log2 system page size */ + clib_mem_page_sz_t log2_page_sz; + + /* log2 default hugepage size */ + clib_mem_page_sz_t log2_default_hugepage_sz; + + /* log2 system default hugepage size */ + clib_mem_page_sz_t log2_sys_default_hugepage_sz; + + /* bitmap of available numa nodes */ + u32 numa_node_bitmap; + /* per CPU heaps */ void *per_cpu_mheaps[CLIB_MAX_MHEAPS]; /* per NUMA heaps */ void *per_numa_mheaps[CLIB_MAX_NUMAS]; + + /* memory maps */ + clib_mem_vm_map_hdr_t *first_map, *last_map; + + /* map lock */ + u8 map_lock; + + /* last error */ + clib_error_t *error; } clib_mem_main_t; extern clib_mem_main_t clib_mem_main; @@ -85,7 +160,7 @@ extern clib_mem_main_t clib_mem_main; /* Unspecified NUMA socket */ #define VEC_NUMA_UNSPECIFIED (0xFF) -always_inline void * +always_inline clib_mem_heap_t * clib_mem_get_per_cpu_heap (void) { int cpu = os_get_thread_index (); @@ -93,7 +168,7 @@ clib_mem_get_per_cpu_heap (void) } always_inline void * -clib_mem_set_per_cpu_heap (u8 * new_heap) +clib_mem_set_per_cpu_heap (void *new_heap) { int cpu = os_get_thread_index (); void *old = clib_mem_main.per_cpu_mheaps[cpu]; @@ -109,7 +184,7 @@ clib_mem_get_per_numa_heap (u32 numa_id) } always_inline void * -clib_mem_set_per_numa_heap (u8 * new_heap) +clib_mem_set_per_numa_heap (void *new_heap) { int numa = os_get_numa_index (); void *old = clib_mem_main.per_numa_mheaps[numa]; @@ -141,6 +216,7 @@ clib_mem_set_thread_index (void) always_inline uword clib_mem_size_nocheck (void *p) { + size_t mspace_usable_size_with_delta (const void *p); return mspace_usable_size_with_delta (p); } @@ -149,8 +225,10 @@ always_inline void * clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset, int os_out_of_memory_on_failure) { - void *heap, *p; - uword cpu; + void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes, + unsigned long align, unsigned long align_offset); + clib_mem_heap_t *h = clib_mem_get_per_cpu_heap (); + void *p; if (align_offset > align) { @@ -160,10 +238,7 @@ clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset, align_offset = align; } - cpu = os_get_thread_index (); - heap = clib_mem_main.per_cpu_mheaps[cpu]; - - p = mspace_get_aligned (heap, size, align, align_offset); + p = mspace_get_aligned (h->mspace, size, align, align_offset); if (PREDICT_FALSE (0 == p)) { @@ -230,22 +305,23 @@ clib_mem_alloc_aligned_or_null (uword size, uword align) always_inline uword clib_mem_is_heap_object (void *p) { - void *heap = clib_mem_get_per_cpu_heap (); - - return mspace_is_heap_object (heap, p); + int mspace_is_heap_object (void *msp, void *p); + clib_mem_heap_t *h = clib_mem_get_per_cpu_heap (); + return mspace_is_heap_object (h->mspace, p); } always_inline void clib_mem_free (void *p) { - u8 *heap = clib_mem_get_per_cpu_heap (); + void mspace_put (void *msp, void *p_arg); + clib_mem_heap_t *h = clib_mem_get_per_cpu_heap (); /* Make sure object is in the correct heap. */ ASSERT (clib_mem_is_heap_object (p)); CLIB_MEM_POISON (p, clib_mem_size_nocheck (p)); - mspace_put (heap, p); + mspace_put (h->mspace, p); } always_inline void * @@ -282,29 +358,30 @@ clib_mem_free_s (void *p) clib_mem_free (p); } -always_inline void * +always_inline clib_mem_heap_t * clib_mem_get_heap (void) { return clib_mem_get_per_cpu_heap (); } -always_inline void * -clib_mem_set_heap (void *heap) +always_inline clib_mem_heap_t * +clib_mem_set_heap (clib_mem_heap_t * heap) { return clib_mem_set_per_cpu_heap (heap); } -void *clib_mem_init (void *heap, uword size); +void clib_mem_destroy_heap (clib_mem_heap_t * heap); +clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked, + char *fmt, ...); + +void clib_mem_main_init (); +void *clib_mem_init (void *base, uword size); +void *clib_mem_init_with_page_size (uword memory_size, + clib_mem_page_sz_t log2_page_sz); void *clib_mem_init_thread_safe (void *memory, uword memory_size); -void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size, - u8 numa); void clib_mem_exit (void); -uword clib_mem_get_page_size (void); - -void clib_mem_validate (void); - void clib_mem_trace (int enable); int clib_mem_is_traced (void); @@ -334,9 +411,16 @@ typedef struct uword bytes_max; } clib_mem_usage_t; -void clib_mem_usage (clib_mem_usage_t * usage); +void clib_mem_get_heap_usage (clib_mem_heap_t * heap, + clib_mem_usage_t * usage); + +void *clib_mem_get_heap_base (clib_mem_heap_t * heap); +uword clib_mem_get_heap_size (clib_mem_heap_t * heap); +uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap); u8 *format_clib_mem_usage (u8 * s, va_list * args); +u8 *format_clib_mem_heap (u8 * s, va_list * va); +u8 *format_clib_mem_page_stats (u8 * s, va_list * va); /* Allocate virtual address space. */ always_inline void * @@ -364,98 +448,137 @@ clib_mem_vm_free (void *addr, uword size) munmap (addr, size); } -always_inline void * -clib_mem_vm_unmap (void *addr, uword size) +void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz, + uword size, int fd, uword offset, char *name); + +void *clib_mem_vm_map (void *start, uword size, + clib_mem_page_sz_t log2_page_size, char *fmt, ...); +void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size, + char *fmt, ...); +void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset, + char *fmt, ...); +int clib_mem_vm_unmap (void *base); +clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * + hdr); + +static_always_inline clib_mem_page_sz_t +clib_mem_get_log2_page_size (void) { - void *mmap_addr; - uword flags = MAP_PRIVATE | MAP_FIXED; - - /* To unmap we "map" with no protection. If we actually called - munmap then other callers could steal the address space. By - changing to PROT_NONE the kernel can free up the pages which is - really what we want "unmap" to mean. */ - mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0); - if (mmap_addr == (void *) -1) - mmap_addr = 0; - else - CLIB_MEM_UNPOISON (mmap_addr, size); - - return mmap_addr; + return clib_mem_main.log2_page_sz; } -always_inline void * -clib_mem_vm_map (void *addr, uword size) +static_always_inline uword +clib_mem_get_page_size (void) { - void *mmap_addr; - uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS; + return 1ULL << clib_mem_main.log2_page_sz; +} - mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0); - if (mmap_addr == (void *) -1) - mmap_addr = 0; - else - CLIB_MEM_UNPOISON (mmap_addr, size); +static_always_inline void +clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz) +{ + clib_mem_main.log2_default_hugepage_sz = log2_page_sz; +} - return mmap_addr; +static_always_inline clib_mem_page_sz_t +clib_mem_get_log2_default_hugepage_size () +{ + return clib_mem_main.log2_default_hugepage_sz; } -typedef struct +static_always_inline uword +clib_mem_get_default_hugepage_size (void) { -#define CLIB_MEM_VM_F_SHARED (1 << 0) -#define CLIB_MEM_VM_F_HUGETLB (1 << 1) -#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2) -#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3) -#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4) -#define CLIB_MEM_VM_F_LOCKED (1 << 5) - u32 flags; /**< vm allocation flags: -
CLIB_MEM_VM_F_SHARED: request shared memory, file - descriptor will be provided on successful allocation. -
CLIB_MEM_VM_F_HUGETLB: request hugepages. -
CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid - numa node preference. -
CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails. -
CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if - number of available pages is not sufficient. -
CLIB_MEM_VM_F_LOCKED: request locked memory. - */ - char *name; /**< Name for memory allocation, set by caller. */ - uword size; /**< Allocation size, set by caller. */ - int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */ - void *addr; /**< Pointer to allocated memory, set on successful allocation. */ - int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */ - int log2_page_size; /* Page size in log2 format, set on successful allocation. */ - int n_pages; /* Number of pages. */ - uword requested_va; /**< Request fixed position mapping */ -} clib_mem_vm_alloc_t; - -clib_error_t *clib_mem_create_fd (char *name, int *fdp); -clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp); -clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a); -void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a); -u64 clib_mem_get_fd_page_size (int fd); -uword clib_mem_get_default_hugepage_size (void); -int clib_mem_get_fd_log2_page_size (int fd); + return 1ULL << clib_mem_main.log2_default_hugepage_sz; +} + +int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...); +uword clib_mem_get_fd_page_size (int fd); +clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd); uword clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz); -u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages); -void clib_mem_destroy_mspace (void *mspace); +u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size, + int n_pages); void clib_mem_destroy (void); - -typedef struct -{ - uword size; /**< Map size */ - int fd; /**< File descriptor to be mapped */ - uword requested_va; /**< Request fixed position mapping */ - void *addr; /**< Pointer to mapped memory, if successful */ - u8 numa_node; -} clib_mem_vm_map_t; - -clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a); +int clib_mem_set_numa_affinity (u8 numa_node, int force); +int clib_mem_set_default_numa_affinity (); void clib_mem_vm_randomize_va (uword * requested_va, clib_mem_page_sz_t log2_page_size); -void mheap_trace (void *v, int enable); +void mheap_trace (clib_mem_heap_t * v, int enable); uword clib_mem_trace_enable_disable (uword enable); void clib_mem_trace (int enable); +always_inline uword +clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size) +{ + ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN); + + if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT) + log2_page_size = clib_mem_get_log2_page_size (); + else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE) + log2_page_size = clib_mem_get_log2_default_hugepage_size (); + + return round_pow2 (size, 1ULL << log2_page_size); +} + +typedef struct +{ + clib_mem_page_sz_t log2_page_sz; + uword total; + uword mapped; + uword not_mapped; + uword per_numa[CLIB_MAX_NUMAS]; + uword unknown; +} clib_mem_page_stats_t; + +void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size, + uword n_pages, clib_mem_page_stats_t * stats); + +static_always_inline int +vlib_mem_get_next_numa_node (int numa) +{ + clib_mem_main_t *mm = &clib_mem_main; + u32 bitmap = mm->numa_node_bitmap; + + if (numa >= 0) + bitmap &= ~pow2_mask (numa + 1); + if (bitmap == 0) + return -1; + + return count_trailing_zeros (bitmap); +} + +static_always_inline clib_mem_page_sz_t +clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size) +{ + if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT) + return clib_mem_get_log2_page_size (); + if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE) + return clib_mem_get_log2_default_hugepage_size (); + return log2_page_size; +} + +static_always_inline uword +clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size) +{ + return 1ULL << clib_mem_log2_page_size_validate (log2_page_size); +} + +static_always_inline clib_error_t * +clib_mem_get_last_error (void) +{ + return clib_mem_main.error; +} + +/* bulk allocator */ + +typedef void *clib_mem_bulk_handle_t; +clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align, + u32 min_elts_per_chunk); +void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h); +void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h); +void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p); +u8 *format_clib_mem_bulk (u8 *s, va_list *args); + #include /* clib_panic */ #endif /* _included_clib_mem_h */