#include <vppinfra/hash.h>
#include <vppinfra/elf_clib.h>
-void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
-
typedef struct
{
/* Address of callers: outer first, inner last. */
uword callers[12];
/* Count of allocations with this traceback. */
-#if CLIB_VEC64 > 0
- u64 n_allocations;
-#else
u32 n_allocations;
-#endif
/* Count of bytes allocated with this traceback. */
u32 n_bytes;
typedef struct
{
clib_spinlock_t lock;
- uword enabled;
mheap_trace_t *traces;
/* Hash table mapping mheap offset to trace index. */
uword *trace_index_by_offset;
+
+ /* So we can easily shut off current segment trace, if any */
+ const clib_mem_heap_t *current_traced_mheap;
+
} mheap_trace_main_t;
mheap_trace_main_t mheap_trace_main;
-void
-mheap_get_trace (uword offset, uword size)
+static __thread int mheap_trace_thread_disable;
+
+static void
+mheap_get_trace_internal (const clib_mem_heap_t *heap, uword offset,
+ uword size)
{
mheap_trace_main_t *tm = &mheap_trace_main;
mheap_trace_t *t;
uword i, n_callers, trace_index, *p;
mheap_trace_t trace;
- uword save_enabled;
- if (tm->enabled == 0)
+ if (heap != tm->current_traced_mheap || mheap_trace_thread_disable)
return;
/* Spurious Coverity warnings be gone. */
- memset (&trace, 0, sizeof (trace));
+ clib_memset (&trace, 0, sizeof (trace));
- /* Skip our frame and mspace_get_aligned's frame */
- n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), 2);
- if (n_callers == 0)
- return;
+ clib_spinlock_lock (&tm->lock);
- /* $$$ This looks like dreck to remove... */
- if (0)
- for (i = n_callers; i < ARRAY_LEN (trace.callers); i++)
- trace.callers[i] = 0;
+ /* heap could have changed while we were waiting on the lock */
+ if (heap != tm->current_traced_mheap)
+ goto out;
- clib_spinlock_lock (&tm->lock);
+ /* Turn off tracing for this thread to avoid embarrassment... */
+ mheap_trace_thread_disable = 1;
- /* Turn off tracing to avoid embarrassment... */
- save_enabled = tm->enabled;
- tm->enabled = 0;
+ /* Skip our frame and mspace_get_aligned's frame */
+ n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), 2);
+ if (n_callers == 0)
+ goto out;
if (!tm->trace_by_callers)
tm->trace_by_callers =
if (i > 0)
{
trace_index = tm->trace_free_list[i - 1];
- _vec_len (tm->trace_free_list) = i - 1;
+ vec_set_len (tm->trace_free_list, i - 1);
}
else
{
{
hash_pair_t *p;
mheap_trace_t *q;
- /* *INDENT-OFF* */
hash_foreach_pair (p, tm->trace_by_callers,
({
q = uword_to_pointer (p->key, mheap_trace_t *);
ASSERT (q >= old_start && q < old_end);
p->key = pointer_to_uword (tm->traces + (q - old_start));
}));
- /* *INDENT-ON* */
}
trace_index = t - tm->traces;
}
t->n_bytes += size;
t->offset = offset; /* keep a sample to autopsy */
hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
- tm->enabled = save_enabled;
+
+out:
+ mheap_trace_thread_disable = 0;
clib_spinlock_unlock (&tm->lock);
}
-void
-mheap_put_trace (uword offset, uword size)
+static void
+mheap_put_trace_internal (const clib_mem_heap_t *heap, uword offset,
+ uword size)
{
mheap_trace_t *t;
uword trace_index, *p;
mheap_trace_main_t *tm = &mheap_trace_main;
- uword save_enabled;
- if (tm->enabled == 0)
+ if (heap != tm->current_traced_mheap || mheap_trace_thread_disable)
return;
clib_spinlock_lock (&tm->lock);
- /* Turn off tracing for a moment */
- save_enabled = tm->enabled;
- tm->enabled = 0;
+ /* heap could have changed while we were waiting on the lock */
+ if (heap != tm->current_traced_mheap)
+ goto out;
+
+ /* Turn off tracing for this thread for a moment */
+ mheap_trace_thread_disable = 1;
p = hash_get (tm->trace_index_by_offset, offset);
if (!p)
- {
- tm->enabled = save_enabled;
- clib_spinlock_unlock (&tm->lock);
- return;
- }
+ goto out;
trace_index = p[0];
hash_unset (tm->trace_index_by_offset, offset);
{
hash_unset_mem (tm->trace_by_callers, t->callers);
vec_add1 (tm->trace_free_list, trace_index);
- memset (t, 0, sizeof (t[0]));
+ clib_memset (t, 0, sizeof (t[0]));
}
- tm->enabled = save_enabled;
+
+out:
+ mheap_trace_thread_disable = 0;
clib_spinlock_unlock (&tm->lock);
}
+void
+mheap_get_trace (uword offset, uword size)
+{
+ mheap_get_trace_internal (clib_mem_get_heap (), offset, size);
+}
+
+void
+mheap_put_trace (uword offset, uword size)
+{
+ mheap_put_trace_internal (clib_mem_get_heap (), offset, size);
+}
+
always_inline void
mheap_trace_main_free (mheap_trace_main_t * tm)
{
+ CLIB_SPINLOCK_ASSERT_LOCKED (&tm->lock);
+ tm->current_traced_mheap = 0;
vec_free (tm->traces);
vec_free (tm->trace_free_list);
hash_free (tm->trace_by_callers);
hash_free (tm->trace_index_by_offset);
+ mheap_trace_thread_disable = 0;
}
-/* Initialize CLIB heap based on memory/size given by user.
- Set memory to 0 and CLIB will try to allocate its own heap. */
-void *
-clib_mem_init (void *memory, uword memory_size)
+static clib_mem_heap_t *
+clib_mem_create_heap_internal (void *base, uword size,
+ clib_mem_page_sz_t log2_page_sz, int is_locked,
+ char *name)
{
- u8 *heap;
+ clib_mem_heap_t *h;
+ u8 flags = 0;
+ int sz = sizeof (clib_mem_heap_t);
- if (memory)
+ if (base == 0)
{
- heap = create_mspace_with_base (memory, memory_size, 1 /* locked */ );
- mspace_disable_expand (heap);
+ log2_page_sz = clib_mem_log2_page_size_validate (log2_page_sz);
+ size = round_pow2 (size, clib_mem_page_bytes (log2_page_sz));
+ base = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0,
+ "main heap");
+
+ if (base == CLIB_MEM_VM_MAP_FAILED)
+ return 0;
+
+ flags = CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY;
}
else
- heap = create_mspace (memory_size, 1 /* locked */ );
+ log2_page_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
- clib_mem_set_heap (heap);
+ if (is_locked)
+ flags |= CLIB_MEM_HEAP_F_LOCKED;
- if (mheap_trace_main.lock == 0)
- clib_spinlock_init (&mheap_trace_main.lock);
+ h = base;
+ h->base = base;
+ h->size = size;
+ h->log2_page_sz = log2_page_sz;
+ h->flags = flags;
+ sz = strlen (name);
+ strcpy (h->name, name);
+ sz = round_pow2 (sz + sizeof (clib_mem_heap_t), 16);
+ h->mspace = create_mspace_with_base (base + sz, size - sz, is_locked);
+
+ mspace_disable_expand (h->mspace);
- return heap;
+ clib_mem_poison (mspace_least_addr (h->mspace),
+ mspace_footprint (h->mspace));
+
+ return h;
}
-void *
-clib_mem_init_thread_safe (void *memory, uword memory_size)
+/* Initialize CLIB heap based on memory/size given by user.
+ Set memory to 0 and CLIB will try to allocate its own heap. */
+static void *
+clib_mem_init_internal (void *base, uword size,
+ clib_mem_page_sz_t log2_page_sz)
{
- return clib_mem_init (memory, memory_size);
+ clib_mem_heap_t *h;
+
+ clib_mem_main_init ();
+
+ h = clib_mem_create_heap_internal (base, size, log2_page_sz,
+ 1 /*is_locked */ , "main heap");
+
+ clib_mem_set_heap (h);
+
+ if (mheap_trace_main.lock == 0)
+ {
+ /* clib_spinlock_init() dynamically allocates the spinlock in the current
+ * per-cpu heap, but it is used for all traces accross all heaps and
+ * hence we can't really allocate it in the current per-cpu heap as it
+ * could be destroyed later */
+ static struct clib_spinlock_s mheap_trace_main_lock = {};
+ mheap_trace_main.lock = &mheap_trace_main_lock;
+ }
+
+ return h;
}
-#ifdef CLIB_LINUX_KERNEL
-#include <asm/page.h>
+__clib_export void *
+clib_mem_init (void *memory, uword memory_size)
+{
+ return clib_mem_init_internal (memory, memory_size,
+ CLIB_MEM_PAGE_SZ_DEFAULT);
+}
-uword
-clib_mem_get_page_size (void)
+__clib_export void *
+clib_mem_init_with_page_size (uword memory_size,
+ clib_mem_page_sz_t log2_page_sz)
{
- return PAGE_SIZE;
+ return clib_mem_init_internal (0, memory_size, log2_page_sz);
}
-#endif
-#ifdef CLIB_UNIX
-uword
-clib_mem_get_page_size (void)
+__clib_export void *
+clib_mem_init_thread_safe (void *memory, uword memory_size)
{
- return getpagesize ();
+ return clib_mem_init_internal (memory, memory_size,
+ CLIB_MEM_PAGE_SZ_DEFAULT);
}
-#endif
-/* Make a guess for standalone. */
-#ifdef CLIB_STANDALONE
-uword
-clib_mem_get_page_size (void)
+__clib_export void
+clib_mem_destroy (void)
{
- return 4096;
+ mheap_trace_main_t *tm = &mheap_trace_main;
+ clib_mem_heap_t *heap = clib_mem_get_heap ();
+
+ if (heap->mspace == tm->current_traced_mheap)
+ mheap_trace (heap, 0);
+
+ destroy_mspace (heap->mspace);
+ clib_mem_vm_unmap (heap);
}
-#endif
-u8 *
-format_clib_mem_usage (u8 * s, va_list * va)
+__clib_export u8 *
+format_clib_mem_usage (u8 *s, va_list *va)
{
int verbose = va_arg (*va, int);
return format (s, "$$$$ heap at %llx verbose %d", clib_mem_get_heap (),
int verbose = va_arg (*va, int);
int have_traces = 0;
int i;
+ int n = 0;
clib_spinlock_lock (&tm->lock);
- if (vec_len (tm->traces) > 0)
+ if (vec_len (tm->traces) > 0 &&
+ clib_mem_get_heap () == tm->current_traced_mheap)
{
have_traces = 1;
total_objects_traced += t->n_allocations;
- /* When not verbose only report allocations of more than 1k. */
- if (!verbose && t->n_bytes < 1024)
+ /* When not verbose only report the 50 biggest allocations */
+ if (!verbose && n >= 50)
continue;
+ n++;
if (t == traces_copy)
s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
{
if (i > 0)
s = format (s, "%U", format_white_space, indent);
-#ifdef CLIB_UNIX
+#if defined(CLIB_UNIX) && !defined(__APPLE__)
/* $$$$ does this actually work? */
s =
format (s, " %U\n", format_clib_elf_symbol_with_address,
return s;
}
-
-u8 *
-format_mheap (u8 * s, va_list * va)
+__clib_export u8 *
+format_clib_mem_heap (u8 * s, va_list * va)
{
- void *heap = va_arg (*va, u8 *);
+ clib_mem_heap_t *heap = va_arg (*va, clib_mem_heap_t *);
int verbose = va_arg (*va, int);
struct dlmallinfo mi;
mheap_trace_main_t *tm = &mheap_trace_main;
+ u32 indent = format_get_indent (s) + 2;
+
+ if (heap == 0)
+ heap = clib_mem_get_heap ();
+
+ mi = mspace_mallinfo (heap->mspace);
+
+ s = format (s, "base %p, size %U",
+ heap->base, format_memory_size, heap->size);
- mi = mspace_mallinfo (heap);
+#define _(i,v,str) \
+ if (heap->flags & CLIB_MEM_HEAP_F_##v) s = format (s, ", %s", str);
+ foreach_clib_mem_heap_flag;
+#undef _
- s = format (s, "total: %U, used: %U, free: %U, trimmable: %U",
+ s = format (s, ", name '%s'", heap->name);
+
+ if (heap->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
+ {
+ clib_mem_page_stats_t stats;
+ clib_mem_get_page_stats (heap->base, heap->log2_page_sz,
+ heap->size >> heap->log2_page_sz, &stats);
+ s = format (s, "\n%U%U", format_white_space, indent,
+ format_clib_mem_page_stats, &stats);
+ }
+
+ s = format (s, "\n%Utotal: %U, used: %U, free: %U, trimmable: %U",
+ format_white_space, indent,
format_msize, mi.arena,
format_msize, mi.uordblks,
format_msize, mi.fordblks, format_msize, mi.keepcost);
if (verbose > 0)
{
- s = format (s, "\n free chunks %llu free fastbin blks %llu",
- mi.ordblks, mi.smblks);
- s =
- format (s, "\n max total allocated %U", format_msize, mi.usmblks);
+ s = format (s, "\n%Ufree chunks %llu free fastbin blks %llu",
+ format_white_space, indent + 2, mi.ordblks, mi.smblks);
+ s = format (s, "\n%Umax total allocated %U",
+ format_white_space, indent + 2, format_msize, mi.usmblks);
}
- s = format (s, "\n%U", format_mheap_trace, tm, verbose);
+ if (heap->flags & CLIB_MEM_HEAP_F_TRACED)
+ s = format (s, "\n%U", format_mheap_trace, tm, verbose);
return s;
}
-void
-clib_mem_usage (clib_mem_usage_t * u)
+__clib_export __clib_flatten void
+clib_mem_get_heap_usage (clib_mem_heap_t *heap, clib_mem_usage_t *usage)
{
- clib_warning ("unimp");
+ struct dlmallinfo mi = mspace_mallinfo (heap->mspace);
+
+ usage->bytes_total = mi.arena; /* non-mmapped space allocated from system */
+ usage->bytes_used = mi.uordblks; /* total allocated space */
+ usage->bytes_free = mi.fordblks; /* total free space */
+ usage->bytes_used_mmap = mi.hblkhd; /* space in mmapped regions */
+ usage->bytes_max = mi.usmblks; /* maximum total allocated space */
+ usage->bytes_free_reclaimed = mi.ordblks; /* number of free chunks */
+ usage->bytes_overhead = mi.keepcost; /* releasable (via malloc_trim) space */
+
+ /* Not supported */
+ usage->bytes_used_sbrk = 0;
+ usage->object_count = 0;
}
/* Call serial number for debugger breakpoints. */
uword clib_mem_validate_serial = 0;
-void
-clib_mem_validate (void)
+__clib_export void
+mheap_trace (clib_mem_heap_t * h, int enable)
{
- clib_warning ("unimp");
-}
+ mheap_trace_main_t *tm = &mheap_trace_main;
-void
-mheap_trace (void *v, int enable)
-{
- (void) mspace_enable_disable_trace (v, enable);
+ clib_spinlock_lock (&tm->lock);
- if (enable == 0)
- mheap_trace_main_free (&mheap_trace_main);
+ if (tm->current_traced_mheap != 0 && tm->current_traced_mheap != h)
+ {
+ clib_warning ("tracing already enabled for another heap, ignoring");
+ goto out;
+ }
+
+ if (enable)
+ {
+ h->flags |= CLIB_MEM_HEAP_F_TRACED;
+ tm->current_traced_mheap = h;
+ }
+ else
+ {
+ h->flags &= ~CLIB_MEM_HEAP_F_TRACED;
+ mheap_trace_main_free (&mheap_trace_main);
+ }
+
+out:
+ clib_spinlock_unlock (&tm->lock);
}
-void
+__clib_export void
clib_mem_trace (int enable)
{
- mheap_trace_main_t *tm = &mheap_trace_main;
+ void *current_heap = clib_mem_get_heap ();
+ mheap_trace (current_heap, enable);
+}
- tm->enabled = enable;
- mheap_trace (clib_mem_get_heap (), enable);
+int
+clib_mem_is_traced (void)
+{
+ clib_mem_heap_t *h = clib_mem_get_heap ();
+ return (h->flags &= CLIB_MEM_HEAP_F_TRACED) != 0;
}
-uword
+__clib_export uword
clib_mem_trace_enable_disable (uword enable)
{
- uword rv;
+ uword rv = !mheap_trace_thread_disable;
+ mheap_trace_thread_disable = !enable;
+ return rv;
+}
+
+__clib_export clib_mem_heap_t *
+clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt, ...)
+{
+ clib_mem_page_sz_t log2_page_sz = clib_mem_get_log2_page_size ();
+ clib_mem_heap_t *h;
+ char *name;
+ u8 *s = 0;
+
+ if (fmt == 0)
+ {
+ name = "";
+ }
+ else if (strchr (fmt, '%'))
+ {
+ va_list va;
+ va_start (va, fmt);
+ s = va_format (0, fmt, &va);
+ vec_add1 (s, 0);
+ va_end (va);
+ name = (char *) s;
+ }
+ else
+ name = fmt;
+
+ h = clib_mem_create_heap_internal (base, size, log2_page_sz, is_locked,
+ name);
+ vec_free (s);
+ return h;
+}
+
+__clib_export void
+clib_mem_destroy_heap (clib_mem_heap_t * h)
+{
mheap_trace_main_t *tm = &mheap_trace_main;
- rv = tm->enabled;
- tm->enabled = enable;
- return rv;
+ if (h->mspace == tm->current_traced_mheap)
+ mheap_trace (h, 0);
+
+ destroy_mspace (h->mspace);
+ if (h->flags & CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY)
+ clib_mem_vm_unmap (h->base);
}
-/*
- * These API functions seem like layering violations, but
- * by introducing them we greatly reduce the number
- * of code changes required to use dlmalloc spaces
- */
-void *
-mheap_alloc_with_lock (void *memory, uword size, int locked)
+__clib_export __clib_flatten uword
+clib_mem_get_heap_free_space (clib_mem_heap_t *h)
+{
+ struct dlmallinfo dlminfo = mspace_mallinfo (h->mspace);
+ return dlminfo.fordblks;
+}
+
+__clib_export __clib_flatten void *
+clib_mem_get_heap_base (clib_mem_heap_t *h)
+{
+ return h->base;
+}
+
+__clib_export __clib_flatten uword
+clib_mem_get_heap_size (clib_mem_heap_t *heap)
+{
+ return heap->size;
+}
+
+/* Memory allocator which may call os_out_of_memory() if it fails */
+static inline void *
+clib_mem_heap_alloc_inline (void *heap, uword size, uword align,
+ int os_out_of_memory_on_failure)
+{
+ clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
+ void *p;
+
+ align = clib_max (CLIB_MEM_MIN_ALIGN, align);
+
+ p = mspace_memalign (h->mspace, align, size);
+
+ if (PREDICT_FALSE (0 == p))
+ {
+ if (os_out_of_memory_on_failure)
+ os_out_of_memory ();
+ return 0;
+ }
+
+ if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
+ mheap_get_trace_internal (h, pointer_to_uword (p), clib_mem_size (p));
+
+ clib_mem_unpoison (p, size);
+ return p;
+}
+
+/* Memory allocator which calls os_out_of_memory() when it fails */
+__clib_export __clib_flatten void *
+clib_mem_alloc (uword size)
{
- void *rv;
- if (memory == 0)
- return create_mspace (size, locked);
+ return clib_mem_heap_alloc_inline (0, size, CLIB_MEM_MIN_ALIGN,
+ /* os_out_of_memory */ 1);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_alloc_aligned (uword size, uword align)
+{
+ return clib_mem_heap_alloc_inline (0, size, align,
+ /* os_out_of_memory */ 1);
+}
+
+/* Memory allocator which calls os_out_of_memory() when it fails */
+__clib_export __clib_flatten void *
+clib_mem_alloc_or_null (uword size)
+{
+ return clib_mem_heap_alloc_inline (0, size, CLIB_MEM_MIN_ALIGN,
+ /* os_out_of_memory */ 0);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_alloc_aligned_or_null (uword size, uword align)
+{
+ return clib_mem_heap_alloc_inline (0, size, align,
+ /* os_out_of_memory */ 0);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_heap_alloc (void *heap, uword size)
+{
+ return clib_mem_heap_alloc_inline (heap, size, CLIB_MEM_MIN_ALIGN,
+ /* os_out_of_memory */ 1);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_heap_alloc_aligned (void *heap, uword size, uword align)
+{
+ return clib_mem_heap_alloc_inline (heap, size, align,
+ /* os_out_of_memory */ 1);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_heap_alloc_or_null (void *heap, uword size)
+{
+ return clib_mem_heap_alloc_inline (heap, size, CLIB_MEM_MIN_ALIGN,
+ /* os_out_of_memory */ 0);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_heap_alloc_aligned_or_null (void *heap, uword size, uword align)
+{
+ return clib_mem_heap_alloc_inline (heap, size, align,
+ /* os_out_of_memory */ 0);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_heap_realloc_aligned (void *heap, void *p, uword new_size,
+ uword align)
+{
+ uword old_alloc_size;
+ clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
+ void *new;
+
+ ASSERT (count_set_bits (align) == 1);
+
+ old_alloc_size = p ? mspace_usable_size (p) : 0;
+
+ if (new_size == old_alloc_size)
+ return p;
+
+ if (p && pointer_is_aligned (p, align) &&
+ mspace_realloc_in_place (h->mspace, p, new_size))
+ {
+ clib_mem_unpoison (p, new_size);
+ if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
+ {
+ mheap_put_trace_internal (h, pointer_to_uword (p), old_alloc_size);
+ mheap_get_trace_internal (h, pointer_to_uword (p),
+ clib_mem_size (p));
+ }
+ }
else
{
- rv = create_mspace_with_base (memory, size, locked);
- if (rv)
- mspace_disable_expand (rv);
- return rv;
+ new = clib_mem_heap_alloc_inline (h, new_size, align, 1);
+
+ clib_mem_unpoison (new, new_size);
+ if (old_alloc_size)
+ {
+ clib_mem_unpoison (p, old_alloc_size);
+ clib_memcpy_fast (new, p, clib_min (new_size, old_alloc_size));
+ clib_mem_heap_free (h, p);
+ }
+ p = new;
}
+
+ return p;
}
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+__clib_export __clib_flatten void *
+clib_mem_heap_realloc (void *heap, void *p, uword new_size)
+{
+ return clib_mem_heap_realloc_aligned (heap, p, new_size, CLIB_MEM_MIN_ALIGN);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_realloc_aligned (void *p, uword new_size, uword align)
+{
+ return clib_mem_heap_realloc_aligned (0, p, new_size, align);
+}
+
+__clib_export __clib_flatten void *
+clib_mem_realloc (void *p, uword new_size)
+{
+ return clib_mem_heap_realloc_aligned (0, p, new_size, CLIB_MEM_MIN_ALIGN);
+}
+
+__clib_export __clib_flatten uword
+clib_mem_heap_is_heap_object (void *heap, void *p)
+{
+ clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
+ return mspace_is_heap_object (h->mspace, p);
+}
+
+__clib_export __clib_flatten uword
+clib_mem_is_heap_object (void *p)
+{
+ return clib_mem_heap_is_heap_object (0, p);
+}
+
+__clib_export __clib_flatten void
+clib_mem_heap_free (void *heap, void *p)
+{
+ clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
+ uword size = clib_mem_size (p);
+
+ /* Make sure object is in the correct heap. */
+ ASSERT (clib_mem_heap_is_heap_object (h, p));
+
+ if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
+ mheap_put_trace_internal (h, pointer_to_uword (p), size);
+ clib_mem_poison (p, clib_mem_size (p));
+
+ mspace_free (h->mspace, p);
+}
+
+__clib_export __clib_flatten void
+clib_mem_free (void *p)
+{
+ clib_mem_heap_free (0, p);
+}
+
+__clib_export __clib_flatten uword
+clib_mem_size (void *p)
+{
+ return mspace_usable_size (p);
+}
+
+__clib_export void
+clib_mem_free_s (void *p)
+{
+ uword size = clib_mem_size (p);
+ clib_mem_unpoison (p, size);
+ memset_s_inline (p, size, 0, size);
+ clib_mem_free (p);
+}