X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fmem_dlmalloc.c;h=38226e26f8f190a732cac4a4e79496476d973e02;hb=a690fdbfe179e0ea65818c03b52535bf9210efd0;hp=6268709bb366adb464657503605013a36318fc03;hpb=b7b929931a07fbb27b43d5cd105f366c3e29807e;p=vpp.git diff --git a/src/vppinfra/mem_dlmalloc.c b/src/vppinfra/mem_dlmalloc.c index 6268709bb36..38226e26f8f 100644 --- a/src/vppinfra/mem_dlmalloc.c +++ b/src/vppinfra/mem_dlmalloc.c @@ -19,8 +19,11 @@ #include #include #include +#include +#include void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS]; +void *clib_per_numa_mheaps[CLIB_MAX_NUMAS]; typedef struct { @@ -56,6 +59,10 @@ typedef struct /* Hash table mapping mheap offset to trace index. */ uword *trace_index_by_offset; + + /* So we can easily shut off current segment trace, if any */ + void *current_traced_mheap; + } mheap_trace_main_t; mheap_trace_main_t mheap_trace_main; @@ -69,7 +76,7 @@ mheap_get_trace (uword offset, uword size) mheap_trace_t trace; uword save_enabled; - if (tm->enabled == 0) + if (tm->enabled == 0 || (clib_mem_get_heap () != tm->current_traced_mheap)) return; /* Spurious Coverity warnings be gone. */ @@ -80,11 +87,6 @@ mheap_get_trace (uword offset, uword size) if (n_callers == 0) return; - /* $$$ This looks like dreck to remove... */ - if (0) - for (i = n_callers; i < ARRAY_LEN (trace.callers); i++) - trace.callers[i] = 0; - clib_spinlock_lock (&tm->lock); /* Turn off tracing to avoid embarrassment... */ @@ -202,8 +204,8 @@ mheap_trace_main_free (mheap_trace_main_t * tm) /* Initialize CLIB heap based on memory/size given by user. Set memory to 0 and CLIB will try to allocate its own heap. */ -void * -clib_mem_init (void *memory, uword memory_size) +static void * +clib_mem_init_internal (void *memory, uword memory_size, int set_heap) { u8 *heap; @@ -215,7 +217,10 @@ clib_mem_init (void *memory, uword memory_size) else heap = create_mspace (memory_size, 1 /* locked */ ); - clib_mem_set_heap (heap); + CLIB_MEM_POISON (mspace_least_addr (heap), mspace_footprint (heap)); + + if (set_heap) + clib_mem_set_heap (heap); if (mheap_trace_main.lock == 0) clib_spinlock_init (&mheap_trace_main.lock); @@ -224,37 +229,63 @@ clib_mem_init (void *memory, uword memory_size) } void * -clib_mem_init_thread_safe (void *memory, uword memory_size) +clib_mem_init (void *memory, uword memory_size) { - return clib_mem_init (memory, memory_size); + return clib_mem_init_internal (memory, memory_size, + 1 /* do clib_mem_set_heap */ ); } -#ifdef CLIB_LINUX_KERNEL -#include - -uword -clib_mem_get_page_size (void) +void * +clib_mem_init_thread_safe (void *memory, uword memory_size) { - return PAGE_SIZE; + return clib_mem_init_internal (memory, memory_size, + 1 /* do clib_mem_set_heap */ ); } -#endif -#ifdef CLIB_UNIX -uword -clib_mem_get_page_size (void) +void * +clib_mem_init_thread_safe_numa (void *memory, uword memory_size) { - return getpagesize (); -} + void *heap; + unsigned long this_numa; + + heap = + clib_mem_init_internal (memory, memory_size, + 0 /* do NOT clib_mem_set_heap */ ); + + ASSERT (heap); + + this_numa = os_get_numa_index (); + +#if HAVE_NUMA_LIBRARY > 0 + unsigned long nodemask = 1 << this_numa; + void *page_base; + unsigned long page_mask; + long rv; + + /* + * Bind the heap to the current thread's NUMA node. + * heap is not naturally page-aligned, so fix it. + */ + + page_mask = ~(clib_mem_get_page_size () - 1); + page_base = (void *) (((unsigned long) heap) & page_mask); + + clib_warning ("Bind heap at %llx size %llx to NUMA numa %d", + page_base, memory_size, this_numa); + + rv = mbind (page_base, memory_size, MPOL_BIND /* mode */ , + &nodemask /* nodemask */ , + BITS (nodemask) /* max node number */ , + MPOL_MF_MOVE /* flags */ ); + + if (rv < 0) + clib_unix_warning ("mbind"); +#else + clib_warning ("mbind unavailable, can't bind to numa %d", this_numa); #endif -/* Make a guess for standalone. */ -#ifdef CLIB_STANDALONE -uword -clib_mem_get_page_size (void) -{ - return 4096; + return heap; } -#endif u8 * format_clib_mem_usage (u8 * s, va_list * va) @@ -318,7 +349,8 @@ format_mheap_trace (u8 * s, va_list * va) int i; clib_spinlock_lock (&tm->lock); - if (vec_len (tm->traces) > 0) + if (vec_len (tm->traces) > 0 && + clib_mem_get_heap () == tm->current_traced_mheap) { have_traces = 1; @@ -399,7 +431,8 @@ format_mheap (u8 * s, va_list * va) format (s, "\n max total allocated %U", format_msize, mi.usmblks); } - s = format (s, "\n%U", format_mheap_trace, tm, verbose); + if (mspace_is_traced (heap)) + s = format (s, "\n%U", format_mheap_trace, tm, verbose); return s; } @@ -409,6 +442,21 @@ clib_mem_usage (clib_mem_usage_t * u) clib_warning ("unimp"); } +void +mheap_usage (void *heap, clib_mem_usage_t * usage) +{ + struct dlmallinfo mi = mspace_mallinfo (heap); + + /* TODO: Fill in some more values */ + usage->object_count = 0; + usage->bytes_total = mi.arena; + usage->bytes_overhead = 0; + usage->bytes_max = 0; + usage->bytes_used = mi.uordblks; + usage->bytes_free = mi.fordblks; + usage->bytes_free_reclaimed = 0; +} + /* Call serial number for debugger breakpoints. */ uword clib_mem_validate_serial = 0; @@ -431,9 +479,21 @@ void clib_mem_trace (int enable) { mheap_trace_main_t *tm = &mheap_trace_main; + void *current_heap = clib_mem_get_heap (); tm->enabled = enable; - mheap_trace (clib_mem_get_heap (), enable); + mheap_trace (current_heap, enable); + + if (enable) + tm->current_traced_mheap = current_heap; + else + tm->current_traced_mheap = 0; +} + +int +clib_mem_is_traced (void) +{ + return mspace_is_traced (clib_mem_get_heap ()); } uword