2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/format.h>
17 #include <vppinfra/dlmalloc.h>
18 #include <vppinfra/os.h>
19 #include <vppinfra/lock.h>
20 #include <vppinfra/hash.h>
21 #include <vppinfra/elf_clib.h>
22 #include <vppinfra/sanitizer.h>
25 void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
26 void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
30 /* Address of callers: outer first, inner last. */
33 /* Count of allocations with this traceback. */
40 /* Count of bytes allocated with this traceback. */
43 /* Offset of this item */
52 mheap_trace_t *traces;
54 /* Indices of free traces. */
57 /* Hash table mapping callers to trace index. */
58 uword *trace_by_callers;
60 /* Hash table mapping mheap offset to trace index. */
61 uword *trace_index_by_offset;
63 /* So we can easily shut off current segment trace, if any */
64 void *current_traced_mheap;
68 mheap_trace_main_t mheap_trace_main;
71 mheap_get_trace (uword offset, uword size)
73 mheap_trace_main_t *tm = &mheap_trace_main;
75 uword i, n_callers, trace_index, *p;
79 if (tm->enabled == 0 || (clib_mem_get_heap () != tm->current_traced_mheap))
82 /* Spurious Coverity warnings be gone. */
83 clib_memset (&trace, 0, sizeof (trace));
85 /* Skip our frame and mspace_get_aligned's frame */
86 n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), 2);
90 clib_spinlock_lock (&tm->lock);
92 /* Turn off tracing to avoid embarrassment... */
93 save_enabled = tm->enabled;
96 if (!tm->trace_by_callers)
97 tm->trace_by_callers =
98 hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
100 p = hash_get_mem (tm->trace_by_callers, &trace.callers);
104 t = tm->traces + trace_index;
108 i = vec_len (tm->trace_free_list);
111 trace_index = tm->trace_free_list[i - 1];
112 _vec_len (tm->trace_free_list) = i - 1;
116 mheap_trace_t *old_start = tm->traces;
117 mheap_trace_t *old_end = vec_end (tm->traces);
119 vec_add2 (tm->traces, t, 1);
121 if (tm->traces != old_start)
126 hash_foreach_pair (p, tm->trace_by_callers,
128 q = uword_to_pointer (p->key, mheap_trace_t *);
129 ASSERT (q >= old_start && q < old_end);
130 p->key = pointer_to_uword (tm->traces + (q - old_start));
134 trace_index = t - tm->traces;
137 t = tm->traces + trace_index;
139 t->n_allocations = 0;
141 hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
144 t->n_allocations += 1;
146 t->offset = offset; /* keep a sample to autopsy */
147 hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
148 tm->enabled = save_enabled;
149 clib_spinlock_unlock (&tm->lock);
153 mheap_put_trace (uword offset, uword size)
156 uword trace_index, *p;
157 mheap_trace_main_t *tm = &mheap_trace_main;
160 if (tm->enabled == 0)
163 clib_spinlock_lock (&tm->lock);
165 /* Turn off tracing for a moment */
166 save_enabled = tm->enabled;
169 p = hash_get (tm->trace_index_by_offset, offset);
172 tm->enabled = save_enabled;
173 clib_spinlock_unlock (&tm->lock);
178 hash_unset (tm->trace_index_by_offset, offset);
179 ASSERT (trace_index < vec_len (tm->traces));
181 t = tm->traces + trace_index;
182 ASSERT (t->n_allocations > 0);
183 ASSERT (t->n_bytes >= size);
184 t->n_allocations -= 1;
186 if (t->n_allocations == 0)
188 hash_unset_mem (tm->trace_by_callers, t->callers);
189 vec_add1 (tm->trace_free_list, trace_index);
190 clib_memset (t, 0, sizeof (t[0]));
192 tm->enabled = save_enabled;
193 clib_spinlock_unlock (&tm->lock);
197 mheap_trace_main_free (mheap_trace_main_t * tm)
199 vec_free (tm->traces);
200 vec_free (tm->trace_free_list);
201 hash_free (tm->trace_by_callers);
202 hash_free (tm->trace_index_by_offset);
205 /* Initialize CLIB heap based on memory/size given by user.
206 Set memory to 0 and CLIB will try to allocate its own heap. */
208 clib_mem_init_internal (void *memory, uword memory_size, int set_heap)
214 heap = create_mspace_with_base (memory, memory_size, 1 /* locked */ );
215 mspace_disable_expand (heap);
218 heap = create_mspace (memory_size, 1 /* locked */ );
220 CLIB_MEM_POISON (mspace_least_addr (heap), mspace_footprint (heap));
223 clib_mem_set_heap (heap);
225 if (mheap_trace_main.lock == 0)
226 clib_spinlock_init (&mheap_trace_main.lock);
232 clib_mem_init (void *memory, uword memory_size)
234 return clib_mem_init_internal (memory, memory_size,
235 1 /* do clib_mem_set_heap */ );
239 clib_mem_init_thread_safe (void *memory, uword memory_size)
241 return clib_mem_init_internal (memory, memory_size,
242 1 /* do clib_mem_set_heap */ );
246 clib_mem_init_thread_safe_numa (void *memory, uword memory_size, u8 numa)
248 clib_mem_vm_alloc_t alloc = { 0 };
252 alloc.size = memory_size;
253 alloc.flags = CLIB_MEM_VM_F_NUMA_FORCE;
254 alloc.numa_node = numa;
255 if ((err = clib_mem_vm_ext_alloc (&alloc)))
257 clib_error_report (err);
261 heap = clib_mem_init_internal (memory, memory_size,
262 0 /* do NOT clib_mem_set_heap */ );
270 format_clib_mem_usage (u8 * s, va_list * va)
272 int verbose = va_arg (*va, int);
273 return format (s, "$$$$ heap at %llx verbose %d", clib_mem_get_heap (),
278 * Magic decoder ring for mallinfo stats (ala dlmalloc):
280 * size_t arena; / * Non-mmapped space allocated (bytes) * /
281 * size_t ordblks; / * Number of free chunks * /
282 * size_t smblks; / * Number of free fastbin blocks * /
283 * size_t hblks; / * Number of mmapped regions * /
284 * size_t hblkhd; / * Space allocated in mmapped regions (bytes) * /
285 * size_t usmblks; / * Maximum total allocated space (bytes) * /
286 * size_t fsmblks; / * Space in freed fastbin blocks (bytes) * /
287 * size_t uordblks; / * Total allocated space (bytes) * /
288 * size_t fordblks; / * Total free space (bytes) * /
289 * size_t keepcost; / * Top-most, releasable space (bytes) * /
294 format_msize (u8 * s, va_list * va)
296 uword a = va_arg (*va, uword);
299 s = format (s, "%.2fG", (((f64) a) / ((f64) (1ULL << 30))));
300 else if (a >= 1ULL << 20)
301 s = format (s, "%.2fM", (((f64) a) / ((f64) (1ULL << 20))));
302 else if (a >= 1ULL << 10)
303 s = format (s, "%.2fK", (((f64) a) / ((f64) (1ULL << 10))));
305 s = format (s, "%lld", a);
310 mheap_trace_sort (const void *_t1, const void *_t2)
312 const mheap_trace_t *t1 = _t1;
313 const mheap_trace_t *t2 = _t2;
316 cmp = (word) t2->n_bytes - (word) t1->n_bytes;
318 cmp = (word) t2->n_allocations - (word) t1->n_allocations;
323 format_mheap_trace (u8 * s, va_list * va)
325 mheap_trace_main_t *tm = va_arg (*va, mheap_trace_main_t *);
326 int verbose = va_arg (*va, int);
330 clib_spinlock_lock (&tm->lock);
331 if (vec_len (tm->traces) > 0 &&
332 clib_mem_get_heap () == tm->current_traced_mheap)
336 /* Make a copy of traces since we'll be sorting them. */
337 mheap_trace_t *t, *traces_copy;
338 u32 indent, total_objects_traced;
340 traces_copy = vec_dup (tm->traces);
342 qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
345 total_objects_traced = 0;
346 s = format (s, "\n");
347 vec_foreach (t, traces_copy)
349 /* Skip over free elements. */
350 if (t->n_allocations == 0)
353 total_objects_traced += t->n_allocations;
355 /* When not verbose only report allocations of more than 1k. */
356 if (!verbose && t->n_bytes < 1024)
359 if (t == traces_copy)
360 s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
362 s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations, t->offset);
363 indent = format_get_indent (s);
364 for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
367 s = format (s, "%U", format_white_space, indent);
368 #if defined(CLIB_UNIX) && !defined(__APPLE__)
369 /* $$$$ does this actually work? */
371 format (s, " %U\n", format_clib_elf_symbol_with_address,
374 s = format (s, " %p\n", t->callers[i]);
379 s = format (s, "%d total traced objects\n", total_objects_traced);
381 vec_free (traces_copy);
383 clib_spinlock_unlock (&tm->lock);
384 if (have_traces == 0)
385 s = format (s, "no traced allocations\n");
392 format_mheap (u8 * s, va_list * va)
394 void *heap = va_arg (*va, u8 *);
395 int verbose = va_arg (*va, int);
396 struct dlmallinfo mi;
397 mheap_trace_main_t *tm = &mheap_trace_main;
399 mi = mspace_mallinfo (heap);
401 s = format (s, "total: %U, used: %U, free: %U, trimmable: %U",
402 format_msize, mi.arena,
403 format_msize, mi.uordblks,
404 format_msize, mi.fordblks, format_msize, mi.keepcost);
407 s = format (s, "\n free chunks %llu free fastbin blks %llu",
408 mi.ordblks, mi.smblks);
410 format (s, "\n max total allocated %U", format_msize, mi.usmblks);
413 if (mspace_is_traced (heap))
414 s = format (s, "\n%U", format_mheap_trace, tm, verbose);
419 clib_mem_usage (clib_mem_usage_t * u)
421 clib_warning ("unimp");
425 mheap_usage (void *heap, clib_mem_usage_t * usage)
427 struct dlmallinfo mi = mspace_mallinfo (heap);
429 /* TODO: Fill in some more values */
430 usage->object_count = 0;
431 usage->bytes_total = mi.arena;
432 usage->bytes_overhead = 0;
433 usage->bytes_max = 0;
434 usage->bytes_used = mi.uordblks;
435 usage->bytes_free = mi.fordblks;
436 usage->bytes_free_reclaimed = 0;
439 /* Call serial number for debugger breakpoints. */
440 uword clib_mem_validate_serial = 0;
443 clib_mem_validate (void)
445 clib_warning ("unimp");
449 mheap_trace (void *v, int enable)
451 (void) mspace_enable_disable_trace (v, enable);
454 mheap_trace_main_free (&mheap_trace_main);
458 clib_mem_trace (int enable)
460 mheap_trace_main_t *tm = &mheap_trace_main;
461 void *current_heap = clib_mem_get_heap ();
463 tm->enabled = enable;
464 mheap_trace (current_heap, enable);
467 tm->current_traced_mheap = current_heap;
469 tm->current_traced_mheap = 0;
473 clib_mem_is_traced (void)
475 return mspace_is_traced (clib_mem_get_heap ());
479 clib_mem_trace_enable_disable (uword enable)
482 mheap_trace_main_t *tm = &mheap_trace_main;
485 tm->enabled = enable;
490 * These API functions seem like layering violations, but
491 * by introducing them we greatly reduce the number
492 * of code changes required to use dlmalloc spaces
495 mheap_alloc_with_lock (void *memory, uword size, int locked)
499 return create_mspace (size, locked);
502 rv = create_mspace_with_base (memory, size, locked);
504 mspace_disable_expand (rv);
510 * fd.io coding-style-patch-verification: ON
513 * eval: (c-set-style "gnu")