X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fmheap.c;h=b5566bd6389a894091472bf3cf9606cba8e55c95;hb=a6ef36b2c25de47824a1b45e147ab2fbf67c3a33;hp=fceca95ff7dab4f42ab51bbe1fcd1b1fb6e4b90b;hpb=73710c7da2f8deaea83dbbbfce8737c9c6cd2949;p=vpp.git diff --git a/src/vppinfra/mheap.c b/src/vppinfra/mheap.c index fceca95ff7d..b5566bd6389 100644 --- a/src/vppinfra/mheap.c +++ b/src/vppinfra/mheap.c @@ -41,6 +41,7 @@ #include #include #include +#include #ifdef CLIB_UNIX #include @@ -63,8 +64,8 @@ mheap_maybe_lock (void *v) return; } - while (__sync_lock_test_and_set (&h->lock, 1)) - ; + while (clib_atomic_test_and_set (&h->lock)) + CLIB_PAUSE (); h->owner_cpu = my_cpu; h->recursion_count = 1; @@ -81,8 +82,7 @@ mheap_maybe_unlock (void *v) if (--h->recursion_count == 0) { h->owner_cpu = ~0; - CLIB_MEMORY_BARRIER (); - h->lock = 0; + clib_atomic_release (&h->lock); } } } @@ -663,12 +663,28 @@ mheap_get_aligned (void *v, return v; } - /* Round requested size. */ + /* + * Round requested size. + * + * Step 1: round up to the minimum object size. + * Step 2: round up to a multiple of the user data size (e.g. 4) + * Step 3: if non-trivial alignment requested, round up + * so that the object precisely fills a chunk + * as big as the alignment request. + * + * Step 3 prevents the code from going into "bin search hyperspace": + * looking at a huge number of fractional remainder chunks, none of which + * will satisfy the alignment constraint. This fixes an allocator + * performance issue when one requests a large number of 16 byte objects + * aligned to 64 bytes, to name one variation on the theme. + */ n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES); n_user_data_bytes = round_pow2 (n_user_data_bytes, STRUCT_SIZE_OF (mheap_elt_t, user_data[0])); - + if (align > MHEAP_ELT_OVERHEAD_BYTES) + n_user_data_bytes = clib_max (n_user_data_bytes, + align - MHEAP_ELT_OVERHEAD_BYTES); if (!v) v = mheap_alloc (0, 64 << 20); @@ -919,7 +935,7 @@ mheap_alloc_with_flags (void *memory, uword memory_size, uword flags) clib_mem_vm_map (h, sizeof (h[0])); /* Zero vector header: both heap header and vector length. */ - memset (h, 0, sizeof (h[0])); + clib_memset (h, 0, sizeof (h[0])); _vec_len (v) = 0; h->vm_alloc_offset_from_header = (void *) h - memory; @@ -937,8 +953,8 @@ mheap_alloc_with_flags (void *memory, uword memory_size, uword flags) (clib_address_t) v, h->max_size); /* Initialize free list heads to empty. */ - memset (h->first_free_elt_uoffset_by_bin, 0xFF, - sizeof (h->first_free_elt_uoffset_by_bin)); + clib_memset (h->first_free_elt_uoffset_by_bin, 0xFF, + sizeof (h->first_free_elt_uoffset_by_bin)); return v; } @@ -958,6 +974,29 @@ mheap_alloc (void *memory, uword size) return mheap_alloc_with_flags (memory, size, flags); } +void * +mheap_alloc_with_lock (void *memory, uword size, int locked) +{ + uword flags = 0; + void *rv; + + if (memory != 0) + flags |= MHEAP_FLAG_DISABLE_VM; + +#ifdef CLIB_HAVE_VEC128 + flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; +#endif + + rv = mheap_alloc_with_flags (memory, size, flags); + + if (rv && locked) + { + mheap_t *h = mheap_header (rv); + h->flags |= MHEAP_FLAG_THREAD_SAFE; + } + return rv; +} + void * _mheap_free (void *v) { @@ -1197,7 +1236,7 @@ format_mheap (u8 * s, va_list * va) mheap_elt_t *e; uword i, n_hist; - memset (hist, 0, sizeof (hist)); + clib_memset (hist, 0, sizeof (hist)); n_hist = 0; for (e = v; @@ -1497,7 +1536,7 @@ mheap_get_trace (void *v, uword offset, uword size) mheap_trace_t trace; /* Spurious Coverity warnings be gone. */ - memset (&trace, 0, sizeof (trace)); + clib_memset (&trace, 0, sizeof (trace)); n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), /* Skip mheap_get_aligned's frame */ 1); @@ -1591,7 +1630,7 @@ mheap_put_trace (void *v, uword offset, uword size) { hash_unset_mem (tm->trace_by_callers, t->callers); vec_add1 (tm->trace_free_list, trace_index); - memset (t, 0, sizeof (t[0])); + clib_memset (t, 0, sizeof (t[0])); } }