return;
}
- while (__sync_lock_test_and_set (&h->lock, 1))
+ while (clib_atomic_test_and_set (&h->lock))
;
h->owner_cpu = my_cpu;
return v;
}
- /* Round requested size. */
+ /*
+ * Round requested size.
+ *
+ * Step 1: round up to the minimum object size.
+ * Step 2: round up to a multiple of the user data size (e.g. 4)
+ * Step 3: if non-trivial alignment requested, round up
+ * so that the object precisely fills a chunk
+ * as big as the alignment request.
+ *
+ * Step 3 prevents the code from going into "bin search hyperspace":
+ * looking at a huge number of fractional remainder chunks, none of which
+ * will satisfy the alignment constraint. This fixes an allocator
+ * performance issue when one requests a large number of 16 byte objects
+ * aligned to 64 bytes, to name one variation on the theme.
+ */
n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
n_user_data_bytes =
round_pow2 (n_user_data_bytes,
STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
-
+ if (align > MHEAP_ELT_OVERHEAD_BYTES)
+ n_user_data_bytes = clib_max (n_user_data_bytes,
+ align - MHEAP_ELT_OVERHEAD_BYTES);
if (!v)
v = mheap_alloc (0, 64 << 20);
clib_mem_vm_map (h, sizeof (h[0]));
/* Zero vector header: both heap header and vector length. */
- memset (h, 0, sizeof (h[0]));
+ clib_memset (h, 0, sizeof (h[0]));
_vec_len (v) = 0;
h->vm_alloc_offset_from_header = (void *) h - memory;
(clib_address_t) v, h->max_size);
/* Initialize free list heads to empty. */
- memset (h->first_free_elt_uoffset_by_bin, 0xFF,
- sizeof (h->first_free_elt_uoffset_by_bin));
+ clib_memset (h->first_free_elt_uoffset_by_bin, 0xFF,
+ sizeof (h->first_free_elt_uoffset_by_bin));
return v;
}
return mheap_alloc_with_flags (memory, size, flags);
}
+void *
+mheap_alloc_with_lock (void *memory, uword size, int locked)
+{
+ uword flags = 0;
+ void *rv;
+
+ if (memory != 0)
+ flags |= MHEAP_FLAG_DISABLE_VM;
+
+#ifdef CLIB_HAVE_VEC128
+ flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
+#endif
+
+ rv = mheap_alloc_with_flags (memory, size, flags);
+
+ if (rv && locked)
+ {
+ mheap_t *h = mheap_header (rv);
+ h->flags |= MHEAP_FLAG_THREAD_SAFE;
+ }
+ return rv;
+}
+
void *
_mheap_free (void *v)
{
mheap_elt_t *e;
uword i, n_hist;
- memset (hist, 0, sizeof (hist));
+ clib_memset (hist, 0, sizeof (hist));
n_hist = 0;
for (e = v;
mheap_trace_t trace;
/* Spurious Coverity warnings be gone. */
- memset (&trace, 0, sizeof (trace));
+ clib_memset (&trace, 0, sizeof (trace));
n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers),
/* Skip mheap_get_aligned's frame */ 1);
if (!tm->trace_by_callers)
tm->trace_by_callers =
- hash_create_mem (0, sizeof (trace.callers), sizeof (uword));
+ hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
p = hash_get_mem (tm->trace_by_callers, &trace.callers);
if (p)
{
hash_unset_mem (tm->trace_by_callers, t->callers);
vec_add1 (tm->trace_free_list, trace_index);
- memset (t, 0, sizeof (t[0]));
+ clib_memset (t, 0, sizeof (t[0]));
}
}