#include <vppinfra/lock.h>
#include <vppinfra/hash.h>
#include <vppinfra/elf_clib.h>
-
-void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+#include <vppinfra/sanitizer.h>
typedef struct
{
uword callers[12];
/* Count of allocations with this traceback. */
-#if CLIB_VEC64 > 0
- u64 n_allocations;
-#else
u32 n_allocations;
-#endif
/* Count of bytes allocated with this traceback. */
u32 n_bytes;
/* Initialize CLIB heap based on memory/size given by user.
Set memory to 0 and CLIB will try to allocate its own heap. */
-void *
-clib_mem_init (void *memory, uword memory_size)
+static void *
+clib_mem_init_internal (void *memory, uword memory_size,
+ clib_mem_page_sz_t log2_page_sz, int set_heap)
{
u8 *heap;
+ clib_mem_main_init ();
+
if (memory)
{
heap = create_mspace_with_base (memory, memory_size, 1 /* locked */ );
mspace_disable_expand (heap);
}
else
- heap = create_mspace (memory_size, 1 /* locked */ );
+ {
+ memory_size = round_pow2 (memory_size,
+ clib_mem_page_bytes (log2_page_sz));
+ memory = clib_mem_vm_map_internal (0, log2_page_sz, memory_size, -1, 0,
+ "main heap");
- clib_mem_set_heap (heap);
+ if (memory == CLIB_MEM_VM_MAP_FAILED)
+ return 0;
+
+ heap = create_mspace_with_base (memory, memory_size, 1 /* locked */ );
+ mspace_disable_expand (heap);
+ }
+
+ CLIB_MEM_POISON (mspace_least_addr (heap), mspace_footprint (heap));
+
+ if (set_heap)
+ clib_mem_set_heap (heap);
if (mheap_trace_main.lock == 0)
clib_spinlock_init (&mheap_trace_main.lock);
return heap;
}
+void *
+clib_mem_init (void *memory, uword memory_size)
+{
+ return clib_mem_init_internal (memory, memory_size,
+ CLIB_MEM_PAGE_SZ_DEFAULT,
+ 1 /* do clib_mem_set_heap */ );
+}
+
+void *
+clib_mem_init_with_page_size (uword memory_size,
+ clib_mem_page_sz_t log2_page_sz)
+{
+ return clib_mem_init_internal (0, memory_size, log2_page_sz,
+ 1 /* do clib_mem_set_heap */ );
+}
+
void *
clib_mem_init_thread_safe (void *memory, uword memory_size)
{
- return clib_mem_init (memory, memory_size);
+ return clib_mem_init_internal (memory, memory_size,
+ CLIB_MEM_PAGE_SZ_DEFAULT,
+ 1 /* do clib_mem_set_heap */ );
+}
+
+void
+clib_mem_destroy_mspace (void *mspace)
+{
+ mheap_trace_main_t *tm = &mheap_trace_main;
+
+ if (tm->enabled && mspace == tm->current_traced_mheap)
+ tm->enabled = 0;
+
+ destroy_mspace (mspace);
+}
+
+void
+clib_mem_destroy (void)
+{
+ void *heap = clib_mem_get_heap ();
+ void *base = mspace_least_addr (heap);
+ clib_mem_destroy_mspace (clib_mem_get_heap ());
+ clib_mem_vm_unmap (base);
+}
+
+void *
+clib_mem_init_thread_safe_numa (void *memory, uword memory_size, u8 numa)
+{
+ clib_mem_vm_alloc_t alloc = { 0 };
+ clib_error_t *err;
+ void *heap;
+
+ alloc.size = memory_size;
+ alloc.flags = CLIB_MEM_VM_F_NUMA_FORCE;
+ alloc.numa_node = numa;
+ if ((err = clib_mem_vm_ext_alloc (&alloc)))
+ {
+ clib_error_report (err);
+ return 0;
+ }
+
+ heap = clib_mem_init_internal (memory, memory_size,
+ CLIB_MEM_PAGE_SZ_DEFAULT,
+ 0 /* do NOT clib_mem_set_heap */ );
+
+ ASSERT (heap);
+
+ return heap;
}
u8 *
u8 *
-format_mheap (u8 * s, va_list * va)
+format_clib_mem_heap (u8 * s, va_list * va)
{
void *heap = va_arg (*va, u8 *);
int verbose = va_arg (*va, int);
struct dlmallinfo mi;
mheap_trace_main_t *tm = &mheap_trace_main;
+ if (heap == 0)
+ heap = clib_mem_get_heap ();
+
mi = mspace_mallinfo (heap);
s = format (s, "total: %U, used: %U, free: %U, trimmable: %U",
}
void
-mheap_usage (void *heap, clib_mem_usage_t * usage)
+clib_mem_get_heap_usage (void *heap, clib_mem_usage_t * usage)
{
struct dlmallinfo mi = mspace_mallinfo (heap);
return rv;
}
-/*
- * These API functions seem like layering violations, but
- * by introducing them we greatly reduce the number
- * of code changes required to use dlmalloc spaces
- */
void *
-mheap_alloc_with_lock (void *memory, uword size, int locked)
+clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt, ...)
{
void *rv;
- if (memory == 0)
- return create_mspace (size, locked);
+ if (base == 0)
+ rv = create_mspace (size, is_locked);
else
- {
- rv = create_mspace_with_base (memory, size, locked);
- if (rv)
- mspace_disable_expand (rv);
- return rv;
- }
+ rv = create_mspace_with_base (base, size, is_locked);
+
+ if (rv)
+ mspace_disable_expand (rv);
+ return rv;
+}
+
+void
+clib_mem_destroy_heap (void *heap)
+{
+ destroy_mspace (heap);
+}
+
+uword
+clib_mem_get_heap_free_space (void *heap)
+{
+ struct dlmallinfo dlminfo = mspace_mallinfo (heap);
+ return dlminfo.fordblks;
+}
+
+void *
+clib_mem_get_heap_base (void *heap)
+{
+ return mspace_least_addr (heap);
+}
+
+uword
+clib_mem_get_heap_size (void *heap)
+{
+ struct dlmallinfo mi;
+ mi = mspace_mallinfo (heap);
+ return mi.arena;
}
/*