#include <vppinfra/clib.h> /* uword, etc */
#include <vppinfra/clib_error.h>
-#if USE_DLMALLOC == 0
-#include <vppinfra/mheap_bootstrap.h>
-#else
-#include <vppinfra/dlmalloc.h>
-#endif
-
#include <vppinfra/os.h>
-#include <vppinfra/string.h> /* memcpy, memset */
-#include <vppinfra/valgrind.h>
+#include <vppinfra/string.h> /* memcpy, clib_memset */
+#include <vppinfra/sanitizer.h>
#define CLIB_MAX_MHEAPS 256
+#define CLIB_MAX_NUMAS 16
+#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
+#define CLIB_MEM_ERROR (-1)
+
+typedef enum
+{
+ CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
+ CLIB_MEM_PAGE_SZ_DEFAULT = 1,
+ CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
+ CLIB_MEM_PAGE_SZ_4K = 12,
+ CLIB_MEM_PAGE_SZ_16K = 14,
+ CLIB_MEM_PAGE_SZ_64K = 16,
+ CLIB_MEM_PAGE_SZ_1M = 20,
+ CLIB_MEM_PAGE_SZ_2M = 21,
+ CLIB_MEM_PAGE_SZ_16M = 24,
+ CLIB_MEM_PAGE_SZ_32M = 25,
+ CLIB_MEM_PAGE_SZ_512M = 29,
+ CLIB_MEM_PAGE_SZ_1G = 30,
+ CLIB_MEM_PAGE_SZ_16G = 34,
+} clib_mem_page_sz_t;
+
+typedef struct _clib_mem_vm_map_hdr
+{
+ /* base address */
+ uword base_addr;
-/* Per CPU heaps. */
-extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+ /* number of pages */
+ uword num_pages;
-always_inline void *
+ /* page size (log2) */
+ clib_mem_page_sz_t log2_page_sz;
+
+ /* file descriptor, -1 if memory is not shared */
+ int fd;
+
+ /* allocation mame */
+#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
+ char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
+
+ /* linked list */
+ struct _clib_mem_vm_map_hdr *prev, *next;
+} clib_mem_vm_map_hdr_t;
+
+#define foreach_clib_mem_heap_flag \
+ _(0, LOCKED, "locked") \
+ _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
+
+typedef enum
+{
+#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
+ foreach_clib_mem_heap_flag
+#undef _
+} clib_mem_heap_flag_t;
+
+typedef struct
+{
+ /* base address */
+ void *base;
+
+ /* dlmalloc mspace */
+ void *mspace;
+
+ /* heap size */
+ uword size;
+
+ /* page size (log2) */
+ clib_mem_page_sz_t log2_page_sz:8;
+
+ /* flags */
+ clib_mem_heap_flag_t flags:8;
+
+ /* name - _MUST_ be last */
+ char name[0];
+} clib_mem_heap_t;
+
+typedef struct
+{
+ /* log2 system page size */
+ clib_mem_page_sz_t log2_page_sz;
+
+ /* log2 default hugepage size */
+ clib_mem_page_sz_t log2_default_hugepage_sz;
+
+ /* log2 system default hugepage size */
+ clib_mem_page_sz_t log2_sys_default_hugepage_sz;
+
+ /* bitmap of available numa nodes */
+ u32 numa_node_bitmap;
+
+ /* per CPU heaps */
+ void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
+
+ /* per NUMA heaps */
+ void *per_numa_mheaps[CLIB_MAX_NUMAS];
+
+ /* memory maps */
+ clib_mem_vm_map_hdr_t *first_map, *last_map;
+
+ /* map lock */
+ u8 map_lock;
+
+ /* last error */
+ clib_error_t *error;
+} clib_mem_main_t;
+
+extern clib_mem_main_t clib_mem_main;
+
+/* Unspecified NUMA socket */
+#define VEC_NUMA_UNSPECIFIED (0xFF)
+
+always_inline clib_mem_heap_t *
clib_mem_get_per_cpu_heap (void)
{
int cpu = os_get_thread_index ();
- return clib_per_cpu_mheaps[cpu];
+ return clib_mem_main.per_cpu_mheaps[cpu];
}
always_inline void *
-clib_mem_set_per_cpu_heap (u8 * new_heap)
+clib_mem_set_per_cpu_heap (void *new_heap)
{
int cpu = os_get_thread_index ();
- void *old = clib_per_cpu_mheaps[cpu];
- clib_per_cpu_mheaps[cpu] = new_heap;
+ void *old = clib_mem_main.per_cpu_mheaps[cpu];
+ clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
return old;
}
+always_inline void *
+clib_mem_get_per_numa_heap (u32 numa_id)
+{
+ ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
+ return clib_mem_main.per_numa_mheaps[numa_id];
+}
+
+always_inline void *
+clib_mem_set_per_numa_heap (void *new_heap)
+{
+ int numa = os_get_numa_index ();
+ void *old = clib_mem_main.per_numa_mheaps[numa];
+ clib_mem_main.per_numa_mheaps[numa] = new_heap;
+ return old;
+}
+
+always_inline void
+clib_mem_set_thread_index (void)
+{
+ /*
+ * Find an unused slot in the per-cpu-mheaps array,
+ * and grab it for this thread. We need to be able to
+ * push/pop the thread heap without affecting other thread(s).
+ */
+ int i;
+ if (__os_thread_index != 0)
+ return;
+ for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
+ if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
+ 0, clib_mem_main.per_cpu_mheaps[0]))
+ {
+ os_set_thread_index (i);
+ break;
+ }
+ ASSERT (__os_thread_index > 0);
+}
+
+always_inline uword
+clib_mem_size_nocheck (void *p)
+{
+ size_t mspace_usable_size_with_delta (const void *p);
+ return mspace_usable_size_with_delta (p);
+}
+
/* Memory allocator which may call os_out_of_memory() if it fails */
always_inline void *
clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
int os_out_of_memory_on_failure)
{
- void *heap, *p;
- uword cpu;
+ void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
+ unsigned long align, unsigned long align_offset);
+ clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
+ void *p;
if (align_offset > align)
{
align_offset = align;
}
- cpu = os_get_thread_index ();
- heap = clib_per_cpu_mheaps[cpu];
-
-#if USE_DLMALLOC == 0
- uword offset;
- heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
- clib_per_cpu_mheaps[cpu] = heap;
+ p = mspace_get_aligned (h->mspace, size, align, align_offset);
- if (offset != ~0)
- {
- p = heap + offset;
-#if CLIB_DEBUG > 0
- VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
-#endif
- return p;
- }
- else
- {
- if (os_out_of_memory_on_failure)
- os_out_of_memory ();
- return 0;
- }
-#else
- p = mspace_get_aligned (heap, size, align, align_offset);
- if (PREDICT_FALSE (p == 0))
+ if (PREDICT_FALSE (0 == p))
{
if (os_out_of_memory_on_failure)
os_out_of_memory ();
return 0;
}
+ CLIB_MEM_UNPOISON (p, size);
return p;
-#endif /* USE_DLMALLOC */
}
/* Memory allocator which calls os_out_of_memory() when it fails */
always_inline uword
clib_mem_is_heap_object (void *p)
{
-#if USE_DLMALLOC == 0
- void *heap = clib_mem_get_per_cpu_heap ();
- uword offset = (uword) p - (uword) heap;
- mheap_elt_t *e, *n;
-
- if (offset >= vec_len (heap))
- return 0;
-
- e = mheap_elt_at_uoffset (heap, offset);
- n = mheap_next_elt (e);
-
- /* Check that heap forward and reverse pointers agree. */
- return e->n_user_data == n->prev_n_user_data;
-#else
- void *heap = clib_mem_get_per_cpu_heap ();
-
- return mspace_is_heap_object (heap, p);
-#endif /* USE_DLMALLOC */
+ int mspace_is_heap_object (void *msp, void *p);
+ clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
+ return mspace_is_heap_object (h->mspace, p);
}
always_inline void
clib_mem_free (void *p)
{
- u8 *heap = clib_mem_get_per_cpu_heap ();
+ void mspace_put (void *msp, void *p_arg);
+ clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
/* Make sure object is in the correct heap. */
ASSERT (clib_mem_is_heap_object (p));
-#if USE_DLMALLOC == 0
- mheap_put (heap, (u8 *) p - heap);
-#else
- mspace_put (heap, p);
-#endif
+ CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
-#if CLIB_DEBUG > 0
- VALGRIND_FREELIKE_BLOCK (p, 0);
-#endif
+ mspace_put (h->mspace, p);
}
always_inline void *
copy_size = old_size;
else
copy_size = new_size;
- clib_memcpy (q, p, copy_size);
+ clib_memcpy_fast (q, p, copy_size);
clib_mem_free (p);
}
return q;
always_inline uword
clib_mem_size (void *p)
{
-#if USE_DLMALLOC == 0
- mheap_elt_t *e = mheap_user_pointer_to_elt (p);
ASSERT (clib_mem_is_heap_object (p));
- return mheap_elt_data_bytes (e);
-#else
- ASSERT (clib_mem_is_heap_object (p));
- return mspace_usable_size_with_delta (p);
-#endif
+ return clib_mem_size_nocheck (p);
}
-always_inline void *
+always_inline void
+clib_mem_free_s (void *p)
+{
+ uword size = clib_mem_size (p);
+ CLIB_MEM_UNPOISON (p, size);
+ memset_s_inline (p, size, 0, size);
+ clib_mem_free (p);
+}
+
+always_inline clib_mem_heap_t *
clib_mem_get_heap (void)
{
return clib_mem_get_per_cpu_heap ();
}
-always_inline void *
-clib_mem_set_heap (void *heap)
+always_inline clib_mem_heap_t *
+clib_mem_set_heap (clib_mem_heap_t * heap)
{
return clib_mem_set_per_cpu_heap (heap);
}
-void *clib_mem_init (void *heap, uword size);
+void clib_mem_destroy_heap (clib_mem_heap_t * heap);
+clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
+ char *fmt, ...);
+
+void clib_mem_main_init ();
+void *clib_mem_init (void *base, uword size);
+void *clib_mem_init_with_page_size (uword memory_size,
+ clib_mem_page_sz_t log2_page_sz);
void *clib_mem_init_thread_safe (void *memory, uword memory_size);
void clib_mem_exit (void);
-uword clib_mem_get_page_size (void);
-
-void clib_mem_validate (void);
-
void clib_mem_trace (int enable);
+int clib_mem_is_traced (void);
+
typedef struct
{
/* Total number of objects allocated. */
uword bytes_max;
} clib_mem_usage_t;
-void clib_mem_usage (clib_mem_usage_t * usage);
+void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
+ clib_mem_usage_t * usage);
+
+void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
+uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
+uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
u8 *format_clib_mem_usage (u8 * s, va_list * args);
+u8 *format_clib_mem_heap (u8 * s, va_list * va);
+u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
/* Allocate virtual address space. */
always_inline void *
mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
if (mmap_addr == (void *) -1)
mmap_addr = 0;
+ else
+ CLIB_MEM_UNPOISON (mmap_addr, size);
return mmap_addr;
}
munmap (addr, size);
}
-always_inline void *
-clib_mem_vm_unmap (void *addr, uword size)
+void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
+ uword size, int fd, uword offset, char *name);
+
+void *clib_mem_vm_map (void *start, uword size,
+ clib_mem_page_sz_t log2_page_size, char *fmt, ...);
+void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
+ char *fmt, ...);
+void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
+ char *fmt, ...);
+int clib_mem_vm_unmap (void *base);
+clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
+ hdr);
+
+static_always_inline clib_mem_page_sz_t
+clib_mem_get_log2_page_size (void)
{
- void *mmap_addr;
- uword flags = MAP_PRIVATE | MAP_FIXED;
+ return clib_mem_main.log2_page_sz;
+}
- /* To unmap we "map" with no protection. If we actually called
- munmap then other callers could steal the address space. By
- changing to PROT_NONE the kernel can free up the pages which is
- really what we want "unmap" to mean. */
- mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
- if (mmap_addr == (void *) -1)
- mmap_addr = 0;
+static_always_inline uword
+clib_mem_get_page_size (void)
+{
+ return 1ULL << clib_mem_main.log2_page_sz;
+}
- return mmap_addr;
+static_always_inline void
+clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
+{
+ clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
}
-always_inline void *
-clib_mem_vm_map (void *addr, uword size)
+static_always_inline clib_mem_page_sz_t
+clib_mem_get_log2_default_hugepage_size ()
{
- void *mmap_addr;
- uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
+ return clib_mem_main.log2_default_hugepage_sz;
+}
- mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
- if (mmap_addr == (void *) -1)
- mmap_addr = 0;
+static_always_inline uword
+clib_mem_get_default_hugepage_size (void)
+{
+ return 1ULL << clib_mem_main.log2_default_hugepage_sz;
+}
- return mmap_addr;
+int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
+uword clib_mem_get_fd_page_size (int fd);
+clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
+uword clib_mem_vm_reserve (uword start, uword size,
+ clib_mem_page_sz_t log2_page_sz);
+u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
+ int n_pages);
+void clib_mem_destroy (void);
+int clib_mem_set_numa_affinity (u8 numa_node, int force);
+int clib_mem_set_default_numa_affinity ();
+void clib_mem_vm_randomize_va (uword * requested_va,
+ clib_mem_page_sz_t log2_page_size);
+void mheap_trace (clib_mem_heap_t * v, int enable);
+uword clib_mem_trace_enable_disable (uword enable);
+void clib_mem_trace (int enable);
+
+always_inline uword
+clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
+{
+ ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
+
+ if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
+ log2_page_size = clib_mem_get_log2_page_size ();
+ else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
+ log2_page_size = clib_mem_get_log2_default_hugepage_size ();
+
+ return round_pow2 (size, 1ULL << log2_page_size);
}
typedef struct
{
-#define CLIB_MEM_VM_F_SHARED (1 << 0)
-#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
-#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
-#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
-#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
-#define CLIB_MEM_VM_F_LOCKED (1 << 5)
- u32 flags; /**< vm allocation flags:
- <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
- descriptor will be provided on successful allocation.
- <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
- <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
- numa node preference.
- <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
- <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
- number of available pages is not sufficient.
- <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
- */
- char *name; /**< Name for memory allocation, set by caller. */
- uword size; /**< Allocation size, set by caller. */
- int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
- void *addr; /**< Pointer to allocated memory, set on successful allocation. */
- int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
- int log2_page_size; /* Page size in log2 format, set on successful allocation. */
- int n_pages; /* Number of pages. */
- uword requested_va; /**< Request fixed position mapping */
-} clib_mem_vm_alloc_t;
-
-clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
-clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
-void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
-u64 clib_mem_vm_get_page_size (int fd);
-int clib_mem_vm_get_log2_page_size (int fd);
-u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
+ clib_mem_page_sz_t log2_page_sz;
+ uword total;
+ uword mapped;
+ uword not_mapped;
+ uword per_numa[CLIB_MAX_NUMAS];
+ uword unknown;
+} clib_mem_page_stats_t;
+
+void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
+ uword n_pages, clib_mem_page_stats_t * stats);
+
+static_always_inline int
+vlib_mem_get_next_numa_node (int numa)
+{
+ clib_mem_main_t *mm = &clib_mem_main;
+ u32 bitmap = mm->numa_node_bitmap;
-typedef struct
+ if (numa >= 0)
+ bitmap &= ~pow2_mask (numa + 1);
+ if (bitmap == 0)
+ return -1;
+
+ return count_trailing_zeros (bitmap);
+}
+
+static_always_inline clib_mem_page_sz_t
+clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
+{
+ if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
+ return clib_mem_get_log2_page_size ();
+ if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
+ return clib_mem_get_log2_default_hugepage_size ();
+ return log2_page_size;
+}
+
+static_always_inline uword
+clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
{
- uword size; /**< Map size */
- int fd; /**< File descriptor to be mapped */
- uword requested_va; /**< Request fixed position mapping */
- void *addr; /**< Pointer to mapped memory, if successful */
-} clib_mem_vm_map_t;
+ return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
+}
+
+static_always_inline clib_error_t *
+clib_mem_get_last_error (void)
+{
+ return clib_mem_main.error;
+}
+
+/* bulk allocator */
-clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
-void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
-void mheap_trace (void *v, int enable);
+typedef void *clib_mem_bulk_handle_t;
+clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
+ u32 min_elts_per_chunk);
+void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
+void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
+void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
+u8 *format_clib_mem_bulk (u8 *s, va_list *args);
#include <vppinfra/error.h> /* clib_panic */