2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/format.h>
17 #include <vppinfra/dlmalloc.h>
18 #include <vppinfra/os.h>
19 #include <vppinfra/lock.h>
20 #include <vppinfra/hash.h>
21 #include <vppinfra/elf_clib.h>
22 #include <vppinfra/sanitizer.h>
26 /* Address of callers: outer first, inner last. */
29 /* Count of allocations with this traceback. */
32 /* Count of bytes allocated with this traceback. */
35 /* Offset of this item */
44 mheap_trace_t *traces;
46 /* Indices of free traces. */
49 /* Hash table mapping callers to trace index. */
50 uword *trace_by_callers;
52 /* Hash table mapping mheap offset to trace index. */
53 uword *trace_index_by_offset;
55 /* So we can easily shut off current segment trace, if any */
56 void *current_traced_mheap;
60 mheap_trace_main_t mheap_trace_main;
63 mheap_get_trace (uword offset, uword size)
65 mheap_trace_main_t *tm = &mheap_trace_main;
67 uword i, n_callers, trace_index, *p;
71 if (tm->enabled == 0 || (clib_mem_get_heap () != tm->current_traced_mheap))
74 /* Spurious Coverity warnings be gone. */
75 clib_memset (&trace, 0, sizeof (trace));
77 clib_spinlock_lock (&tm->lock);
79 /* Turn off tracing to avoid embarrassment... */
80 save_enabled = tm->enabled;
83 /* Skip our frame and mspace_get_aligned's frame */
84 n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), 2);
88 if (!tm->trace_by_callers)
89 tm->trace_by_callers =
90 hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
92 p = hash_get_mem (tm->trace_by_callers, &trace.callers);
96 t = tm->traces + trace_index;
100 i = vec_len (tm->trace_free_list);
103 trace_index = tm->trace_free_list[i - 1];
104 _vec_len (tm->trace_free_list) = i - 1;
108 mheap_trace_t *old_start = tm->traces;
109 mheap_trace_t *old_end = vec_end (tm->traces);
111 vec_add2 (tm->traces, t, 1);
113 if (tm->traces != old_start)
118 hash_foreach_pair (p, tm->trace_by_callers,
120 q = uword_to_pointer (p->key, mheap_trace_t *);
121 ASSERT (q >= old_start && q < old_end);
122 p->key = pointer_to_uword (tm->traces + (q - old_start));
126 trace_index = t - tm->traces;
129 t = tm->traces + trace_index;
131 t->n_allocations = 0;
133 hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
136 t->n_allocations += 1;
138 t->offset = offset; /* keep a sample to autopsy */
139 hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
142 tm->enabled = save_enabled;
143 clib_spinlock_unlock (&tm->lock);
147 mheap_put_trace (uword offset, uword size)
150 uword trace_index, *p;
151 mheap_trace_main_t *tm = &mheap_trace_main;
154 if (tm->enabled == 0)
157 clib_spinlock_lock (&tm->lock);
159 /* Turn off tracing for a moment */
160 save_enabled = tm->enabled;
163 p = hash_get (tm->trace_index_by_offset, offset);
166 tm->enabled = save_enabled;
167 clib_spinlock_unlock (&tm->lock);
172 hash_unset (tm->trace_index_by_offset, offset);
173 ASSERT (trace_index < vec_len (tm->traces));
175 t = tm->traces + trace_index;
176 ASSERT (t->n_allocations > 0);
177 ASSERT (t->n_bytes >= size);
178 t->n_allocations -= 1;
180 if (t->n_allocations == 0)
182 hash_unset_mem (tm->trace_by_callers, t->callers);
183 vec_add1 (tm->trace_free_list, trace_index);
184 clib_memset (t, 0, sizeof (t[0]));
186 tm->enabled = save_enabled;
187 clib_spinlock_unlock (&tm->lock);
191 mheap_trace_main_free (mheap_trace_main_t * tm)
193 vec_free (tm->traces);
194 vec_free (tm->trace_free_list);
195 hash_free (tm->trace_by_callers);
196 hash_free (tm->trace_index_by_offset);
199 static clib_mem_heap_t *
200 clib_mem_create_heap_internal (void *base, uword size,
201 clib_mem_page_sz_t log2_page_sz, int is_locked,
206 int sz = sizeof (clib_mem_heap_t);
210 log2_page_sz = clib_mem_log2_page_size_validate (log2_page_sz);
211 size = round_pow2 (size, clib_mem_page_bytes (log2_page_sz));
212 base = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0,
215 if (base == CLIB_MEM_VM_MAP_FAILED)
218 flags = CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY;
221 log2_page_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
224 flags |= CLIB_MEM_HEAP_F_LOCKED;
229 h->log2_page_sz = log2_page_sz;
232 strcpy (h->name, name);
233 sz = round_pow2 (sz + sizeof (clib_mem_heap_t), 16);
234 h->mspace = create_mspace_with_base (base + sz, size - sz, is_locked);
236 mspace_disable_expand (h->mspace);
238 CLIB_MEM_POISON (mspace_least_addr (h->mspace),
239 mspace_footprint (h->mspace));
244 /* Initialize CLIB heap based on memory/size given by user.
245 Set memory to 0 and CLIB will try to allocate its own heap. */
247 clib_mem_init_internal (void *base, uword size,
248 clib_mem_page_sz_t log2_page_sz)
252 clib_mem_main_init ();
254 h = clib_mem_create_heap_internal (base, size, log2_page_sz,
255 1 /*is_locked */ , "main heap");
257 clib_mem_set_heap (h);
259 if (mheap_trace_main.lock == 0)
260 clib_spinlock_init (&mheap_trace_main.lock);
266 clib_mem_init (void *memory, uword memory_size)
268 return clib_mem_init_internal (memory, memory_size,
269 CLIB_MEM_PAGE_SZ_DEFAULT);
273 clib_mem_init_with_page_size (uword memory_size,
274 clib_mem_page_sz_t log2_page_sz)
276 return clib_mem_init_internal (0, memory_size, log2_page_sz);
280 clib_mem_init_thread_safe (void *memory, uword memory_size)
282 return clib_mem_init_internal (memory, memory_size,
283 CLIB_MEM_PAGE_SZ_DEFAULT);
287 clib_mem_destroy (void)
289 mheap_trace_main_t *tm = &mheap_trace_main;
290 clib_mem_heap_t *heap = clib_mem_get_heap ();
291 void *base = mspace_least_addr (heap->mspace);
293 if (tm->enabled && heap->mspace == tm->current_traced_mheap)
296 destroy_mspace (heap->mspace);
297 clib_mem_vm_unmap (base);
301 format_clib_mem_usage (u8 * s, va_list * va)
303 int verbose = va_arg (*va, int);
304 return format (s, "$$$$ heap at %llx verbose %d", clib_mem_get_heap (),
309 * Magic decoder ring for mallinfo stats (ala dlmalloc):
311 * size_t arena; / * Non-mmapped space allocated (bytes) * /
312 * size_t ordblks; / * Number of free chunks * /
313 * size_t smblks; / * Number of free fastbin blocks * /
314 * size_t hblks; / * Number of mmapped regions * /
315 * size_t hblkhd; / * Space allocated in mmapped regions (bytes) * /
316 * size_t usmblks; / * Maximum total allocated space (bytes) * /
317 * size_t fsmblks; / * Space in freed fastbin blocks (bytes) * /
318 * size_t uordblks; / * Total allocated space (bytes) * /
319 * size_t fordblks; / * Total free space (bytes) * /
320 * size_t keepcost; / * Top-most, releasable space (bytes) * /
325 format_msize (u8 * s, va_list * va)
327 uword a = va_arg (*va, uword);
330 s = format (s, "%.2fG", (((f64) a) / ((f64) (1ULL << 30))));
331 else if (a >= 1ULL << 20)
332 s = format (s, "%.2fM", (((f64) a) / ((f64) (1ULL << 20))));
333 else if (a >= 1ULL << 10)
334 s = format (s, "%.2fK", (((f64) a) / ((f64) (1ULL << 10))));
336 s = format (s, "%lld", a);
341 mheap_trace_sort (const void *_t1, const void *_t2)
343 const mheap_trace_t *t1 = _t1;
344 const mheap_trace_t *t2 = _t2;
347 cmp = (word) t2->n_bytes - (word) t1->n_bytes;
349 cmp = (word) t2->n_allocations - (word) t1->n_allocations;
354 format_mheap_trace (u8 * s, va_list * va)
356 mheap_trace_main_t *tm = va_arg (*va, mheap_trace_main_t *);
357 int verbose = va_arg (*va, int);
361 clib_spinlock_lock (&tm->lock);
362 if (vec_len (tm->traces) > 0 &&
363 clib_mem_get_heap () == tm->current_traced_mheap)
367 /* Make a copy of traces since we'll be sorting them. */
368 mheap_trace_t *t, *traces_copy;
369 u32 indent, total_objects_traced;
371 traces_copy = vec_dup (tm->traces);
373 qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
376 total_objects_traced = 0;
377 s = format (s, "\n");
378 vec_foreach (t, traces_copy)
380 /* Skip over free elements. */
381 if (t->n_allocations == 0)
384 total_objects_traced += t->n_allocations;
386 /* When not verbose only report allocations of more than 1k. */
387 if (!verbose && t->n_bytes < 1024)
390 if (t == traces_copy)
391 s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
393 s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations, t->offset);
394 indent = format_get_indent (s);
395 for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
398 s = format (s, "%U", format_white_space, indent);
399 #if defined(CLIB_UNIX) && !defined(__APPLE__)
400 /* $$$$ does this actually work? */
402 format (s, " %U\n", format_clib_elf_symbol_with_address,
405 s = format (s, " %p\n", t->callers[i]);
410 s = format (s, "%d total traced objects\n", total_objects_traced);
412 vec_free (traces_copy);
414 clib_spinlock_unlock (&tm->lock);
415 if (have_traces == 0)
416 s = format (s, "no traced allocations\n");
422 format_clib_mem_heap (u8 * s, va_list * va)
424 clib_mem_heap_t *heap = va_arg (*va, clib_mem_heap_t *);
425 int verbose = va_arg (*va, int);
426 struct dlmallinfo mi;
427 mheap_trace_main_t *tm = &mheap_trace_main;
428 u32 indent = format_get_indent (s) + 2;
431 heap = clib_mem_get_heap ();
433 mi = mspace_mallinfo (heap->mspace);
435 s = format (s, "base %p, size %U",
436 heap->base, format_memory_size, heap->size);
439 if (heap->flags & CLIB_MEM_HEAP_F_##v) s = format (s, ", %s", str);
440 foreach_clib_mem_heap_flag;
443 s = format (s, ", name '%s'", heap->name);
445 if (heap->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
447 clib_mem_page_stats_t stats;
448 clib_mem_get_page_stats (heap->base, heap->log2_page_sz,
449 heap->size >> heap->log2_page_sz, &stats);
450 s = format (s, "\n%U%U", format_white_space, indent,
451 format_clib_mem_page_stats, &stats);
454 s = format (s, "\n%Utotal: %U, used: %U, free: %U, trimmable: %U",
455 format_white_space, indent,
456 format_msize, mi.arena,
457 format_msize, mi.uordblks,
458 format_msize, mi.fordblks, format_msize, mi.keepcost);
461 s = format (s, "\n%Ufree chunks %llu free fastbin blks %llu",
462 format_white_space, indent + 2, mi.ordblks, mi.smblks);
463 s = format (s, "\n%Umax total allocated %U",
464 format_white_space, indent + 2, format_msize, mi.usmblks);
467 if (mspace_is_traced (heap->mspace))
468 s = format (s, "\n%U", format_mheap_trace, tm, verbose);
473 clib_mem_get_heap_usage (clib_mem_heap_t * heap, clib_mem_usage_t * usage)
475 struct dlmallinfo mi = mspace_mallinfo (heap->mspace);
477 /* TODO: Fill in some more values */
478 usage->object_count = 0;
479 usage->bytes_total = mi.arena;
480 usage->bytes_overhead = 0;
481 usage->bytes_max = 0;
482 usage->bytes_used = mi.uordblks;
483 usage->bytes_free = mi.fordblks;
484 usage->bytes_free_reclaimed = 0;
487 /* Call serial number for debugger breakpoints. */
488 uword clib_mem_validate_serial = 0;
491 mheap_trace (clib_mem_heap_t * h, int enable)
493 (void) mspace_enable_disable_trace (h->mspace, enable);
496 mheap_trace_main_free (&mheap_trace_main);
500 clib_mem_trace (int enable)
502 mheap_trace_main_t *tm = &mheap_trace_main;
503 void *current_heap = clib_mem_get_heap ();
505 tm->enabled = enable;
506 mheap_trace (current_heap, enable);
509 tm->current_traced_mheap = current_heap;
511 tm->current_traced_mheap = 0;
515 clib_mem_is_traced (void)
517 clib_mem_heap_t *h = clib_mem_get_heap ();
518 return mspace_is_traced (h->mspace);
522 clib_mem_trace_enable_disable (uword enable)
525 mheap_trace_main_t *tm = &mheap_trace_main;
528 tm->enabled = enable;
532 __clib_export clib_mem_heap_t *
533 clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt, ...)
535 clib_mem_page_sz_t log2_page_sz = clib_mem_get_log2_page_size ();
544 else if (strchr (fmt, '%'))
548 s = va_format (0, fmt, &va);
556 h = clib_mem_create_heap_internal (base, size, log2_page_sz, is_locked,
563 clib_mem_destroy_heap (clib_mem_heap_t * h)
565 mheap_trace_main_t *tm = &mheap_trace_main;
567 if (tm->enabled && h->mspace == tm->current_traced_mheap)
570 destroy_mspace (h->mspace);
571 if (h->flags & CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY)
572 clib_mem_vm_unmap (h->base);
576 clib_mem_get_heap_free_space (clib_mem_heap_t * h)
578 struct dlmallinfo dlminfo = mspace_mallinfo (h->mspace);
579 return dlminfo.fordblks;
583 clib_mem_get_heap_base (clib_mem_heap_t * h)
589 clib_mem_get_heap_size (clib_mem_heap_t * heap)
595 * fd.io coding-style-patch-verification: ON
598 * eval: (c-set-style "gnu")