2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain, as explained at
4 http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5 comments, complaints, performance data, etc to dl@cs.oswego.edu
8 #include <vppinfra/dlmalloc.h>
10 /*------------------------------ internal #includes ---------------------- */
13 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
16 #include <stdio.h> /* for printing in malloc_stats */
17 #endif /* NO_MALLOC_STATS */
19 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
20 #endif /* LACKS_ERRNO_H */
22 #if DLM_ABORT_ON_ASSERT_FAILURE
24 #define assert(x) if(!(x)) DLM_ABORT
25 #else /* DLM_ABORT_ON_ASSERT_FAILURE */
27 #endif /* DLM_ABORT_ON_ASSERT_FAILURE */
34 #if !defined(WIN32) && !defined(LACKS_TIME_H)
35 #include <time.h> /* for magic initialization */
37 #ifndef LACKS_STDLIB_H
38 #include <stdlib.h> /* for abort() */
39 #endif /* LACKS_STDLIB_H */
40 #ifndef LACKS_STRING_H
41 #include <string.h> /* for memset etc */
42 #endif /* LACKS_STRING_H */
44 #ifndef LACKS_STRINGS_H
45 #include <strings.h> /* for ffs */
46 #endif /* LACKS_STRINGS_H */
47 #endif /* USE_BUILTIN_FFS */
49 #ifndef LACKS_SYS_MMAN_H
50 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
51 #if (defined(linux) && !defined(__USE_GNU))
53 #include <sys/mman.h> /* for mmap */
56 #include <sys/mman.h> /* for mmap */
58 #endif /* LACKS_SYS_MMAN_H */
61 #endif /* LACKS_FCNTL_H */
62 #endif /* HAVE_MMAP */
63 #ifndef LACKS_UNISTD_H
64 #include <unistd.h> /* for sbrk, sysconf */
65 #else /* LACKS_UNISTD_H */
66 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
67 extern void* sbrk(ptrdiff_t);
68 #endif /* FreeBSD etc */
69 #endif /* LACKS_UNISTD_H */
71 /* Declarations for locking */
74 #if defined (__SVR4) && defined (__sun) /* solaris */
76 #elif !defined(LACKS_SCHED_H)
78 #endif /* solaris or LACKS_SCHED_H */
79 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
81 #endif /* USE_RECURSIVE_LOCKS ... */
82 #elif defined(_MSC_VER)
84 /* These are already defined on AMD64 builds */
87 #endif /* __cplusplus */
88 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
89 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
92 #endif /* __cplusplus */
94 #pragma intrinsic (_InterlockedCompareExchange)
95 #pragma intrinsic (_InterlockedExchange)
96 #define interlockedcompareexchange _InterlockedCompareExchange
97 #define interlockedexchange _InterlockedExchange
98 #elif defined(WIN32) && defined(__GNUC__)
99 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
100 #define interlockedexchange __sync_lock_test_and_set
102 #else /* USE_LOCKS */
103 #endif /* USE_LOCKS */
106 #define LOCK_AT_FORK 0
109 /* Declarations for bit scanning on win32 */
110 #if defined(_MSC_VER) && _MSC_VER>=1300
111 #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
114 #endif /* __cplusplus */
115 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
116 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
119 #endif /* __cplusplus */
121 #define BitScanForward _BitScanForward
122 #define BitScanReverse _BitScanReverse
123 #pragma intrinsic(_BitScanForward)
124 #pragma intrinsic(_BitScanReverse)
125 #endif /* BitScanForward */
126 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
129 #ifndef malloc_getpagesize
130 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
131 # ifndef _SC_PAGE_SIZE
132 # define _SC_PAGE_SIZE _SC_PAGESIZE
135 # ifdef _SC_PAGE_SIZE
136 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
138 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
139 extern size_t getpagesize();
140 # define malloc_getpagesize getpagesize()
142 # ifdef WIN32 /* use supplied emulation of getpagesize */
143 # define malloc_getpagesize getpagesize()
145 # ifndef LACKS_SYS_PARAM_H
146 # include <sys/param.h>
148 # ifdef EXEC_PAGESIZE
149 # define malloc_getpagesize EXEC_PAGESIZE
153 # define malloc_getpagesize NBPG
155 # define malloc_getpagesize (NBPG * CLSIZE)
159 # define malloc_getpagesize NBPC
162 # define malloc_getpagesize PAGESIZE
163 # else /* just guess */
164 # define malloc_getpagesize ((size_t)4096U)
175 /* ------------------- size_t and alignment properties -------------------- */
177 /* The byte and bit size of a size_t */
178 #define SIZE_T_SIZE (sizeof(size_t))
179 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
181 /* Some constants coerced to size_t */
182 /* Annoying but necessary to avoid errors on some platforms */
183 #define SIZE_T_ZERO ((size_t)0)
184 #define SIZE_T_ONE ((size_t)1)
185 #define SIZE_T_TWO ((size_t)2)
186 #define SIZE_T_FOUR ((size_t)4)
187 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
188 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
189 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
190 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
192 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
193 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
195 /* True if address a has acceptable alignment */
196 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
198 /* the number of bytes to offset an address to align it */
199 #define align_offset(A)\
200 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
201 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
203 /* -------------------------- MMAP preliminaries ------------------------- */
206 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
207 checks to fail so compiler optimizer can delete code rather than
208 using so many "#if"s.
212 /* MORECORE and MMAP must return MFAIL on failure */
213 #define MFAIL ((void*)(MAX_SIZE_T))
214 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
219 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
220 #define MMAP_PROT (PROT_READ|PROT_WRITE)
221 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
222 #define MAP_ANONYMOUS MAP_ANON
223 #endif /* MAP_ANON */
225 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
226 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
227 #else /* MAP_ANONYMOUS */
229 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
230 is unlikely to be needed, but is supplied just in case.
232 #define MMAP_FLAGS (MAP_PRIVATE)
233 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
234 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
235 (dev_zero_fd = open("/dev/zero", O_RDWR), \
236 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
237 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
238 #endif /* MAP_ANONYMOUS */
240 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
244 /* Win32 MMAP via VirtualAlloc */
245 static FORCEINLINE void* win32mmap(size_t size) {
246 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
247 return (ptr != 0)? ptr: MFAIL;
250 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
251 static FORCEINLINE void* win32direct_mmap(size_t size) {
252 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
254 return (ptr != 0)? ptr: MFAIL;
257 /* This function supports releasing coalesed segments */
258 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
259 MEMORY_BASIC_INFORMATION minfo;
260 char* cptr = (char*)ptr;
262 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
264 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
265 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
267 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
269 cptr += minfo.RegionSize;
270 size -= minfo.RegionSize;
275 #define MMAP_DEFAULT(s) win32mmap(s)
276 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
277 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
279 #endif /* HAVE_MMAP */
283 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
285 #endif /* HAVE_MREMAP */
288 * Define CALL_MORECORE
292 #define CALL_MORECORE(S) MORECORE(S)
294 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
295 #endif /* MORECORE */
296 #else /* HAVE_MORECORE */
297 #define CALL_MORECORE(S) MFAIL
298 #endif /* HAVE_MORECORE */
301 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
304 #define USE_MMAP_BIT (SIZE_T_ONE)
307 #define CALL_MMAP(s) MMAP(s)
309 #define CALL_MMAP(s) MMAP_DEFAULT(s)
312 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
314 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
317 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
318 #else /* DIRECT_MMAP */
319 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
320 #endif /* DIRECT_MMAP */
321 #else /* HAVE_MMAP */
322 #define USE_MMAP_BIT (SIZE_T_ZERO)
324 #define MMAP(s) MFAIL
325 #define MUNMAP(a, s) (-1)
326 #define DIRECT_MMAP(s) MFAIL
327 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
328 #define CALL_MMAP(s) MMAP(s)
329 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
330 #endif /* HAVE_MMAP */
335 #if HAVE_MMAP && HAVE_MREMAP
337 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
339 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
341 #else /* HAVE_MMAP && HAVE_MREMAP */
342 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
343 #endif /* HAVE_MMAP && HAVE_MREMAP */
345 /* mstate bit set if contiguous morecore disabled or failed */
346 #define USE_NONCONTIGUOUS_BIT (4U)
348 /* mstate bit set if no expansion allowed */
349 #define USE_NOEXPAND_BIT (8U)
351 /* trace allocations if set */
352 #define USE_TRACE_BIT (16U)
354 /* segment bit set in create_mspace_with_base */
355 #define EXTERN_BIT (8U)
358 /* --------------------------- Lock preliminaries ------------------------ */
361 When locks are defined, there is one global lock, plus
364 The global lock_ensures that mparams.magic and other unique
365 mparams values are initialized only once. It also protects
366 sequences of calls to MORECORE. In many cases sys_alloc requires
367 two calls, that should not be interleaved with calls by other
368 threads. This does not protect against direct calls to MORECORE
369 by other threads not using this lock, so there is still code to
370 cope the best we can on interference.
372 Per-mspace locks surround calls to malloc, free, etc.
373 By default, locks are simple non-reentrant mutexes.
375 Because lock-protected regions generally have bounded times, it is
376 OK to use the supplied simple spinlocks. Spinlocks are likely to
377 improve performance for lightly contended applications, but worsen
378 performance under heavy contention.
380 If USE_LOCKS is > 1, the definitions of lock routines here are
381 bypassed, in which case you will need to define the type MLOCK_T,
382 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
383 and TRY_LOCK. You must also declare a
384 static MLOCK_T malloc_global_mutex = { initialization values };.
389 #define USE_LOCK_BIT (0U)
390 #define INITIAL_LOCK(l) (0)
391 #define DESTROY_LOCK(l) (0)
392 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
393 #define RELEASE_MALLOC_GLOBAL_LOCK()
397 /* ----------------------- User-defined locks ------------------------ */
398 /* Define your own lock implementation here */
399 /* #define INITIAL_LOCK(lk) ... */
400 /* #define DESTROY_LOCK(lk) ... */
401 /* #define ACQUIRE_LOCK(lk) ... */
402 /* #define RELEASE_LOCK(lk) ... */
403 /* #define TRY_LOCK(lk) ... */
404 /* static MLOCK_T malloc_global_mutex = ... */
408 /* First, define CAS_LOCK and CLEAR_LOCK on ints */
409 /* Note CAS_LOCK defined to return 0 on success */
411 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
412 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
413 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
415 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
416 /* Custom spin locks for older gcc on x86 */
417 static FORCEINLINE int x86_cas_lock(int *sl) {
421 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
423 : "r" (val), "m" (*(sl)), "0"(cmp)
428 static FORCEINLINE void x86_clear_lock(int* sl) {
432 __asm__ __volatile__ ("lock; xchgl %0, %1"
434 : "m" (*(sl)), "0"(prev)
438 #define CAS_LOCK(sl) x86_cas_lock(sl)
439 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
441 #else /* Win32 MSC */
442 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
443 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
445 #endif /* ... gcc spins locks ... */
447 /* How to yield for a spin lock */
448 #define SPINS_PER_YIELD 63
449 #if defined(_MSC_VER)
450 #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
451 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
452 #elif defined (__SVR4) && defined (__sun) /* solaris */
453 #define SPIN_LOCK_YIELD thr_yield();
454 #elif !defined(LACKS_SCHED_H)
455 #define SPIN_LOCK_YIELD sched_yield();
457 #define SPIN_LOCK_YIELD
458 #endif /* ... yield ... */
460 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
461 /* Plain spin locks use single word (embedded in malloc_states) */
462 __clib_nosanitize_addr
463 static int spin_acquire_lock(int *sl) {
465 while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
466 if ((++spins & SPINS_PER_YIELD) == 0) {
474 #define TRY_LOCK(sl) !CAS_LOCK(sl)
475 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
476 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
477 #define INITIAL_LOCK(sl) (*sl = 0)
478 #define DESTROY_LOCK(sl) (0)
479 static MLOCK_T malloc_global_mutex = 0;
481 #else /* USE_RECURSIVE_LOCKS */
482 /* types for lock owners */
484 #define THREAD_ID_T DWORD
485 #define CURRENT_THREAD GetCurrentThreadId()
486 #define EQ_OWNER(X,Y) ((X) == (Y))
489 Note: the following assume that pthread_t is a type that can be
490 initialized to (casted) zero. If this is not the case, you will need to
491 somehow redefine these or not use spin locks.
493 #define THREAD_ID_T pthread_t
494 #define CURRENT_THREAD pthread_self()
495 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
498 struct malloc_recursive_lock {
501 THREAD_ID_T threadid;
504 #define MLOCK_T struct malloc_recursive_lock
505 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
507 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
514 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
515 THREAD_ID_T mythreadid = CURRENT_THREAD;
518 if (*((volatile int *)(&lk->sl)) == 0) {
519 if (!CAS_LOCK(&lk->sl)) {
520 lk->threadid = mythreadid;
525 else if (EQ_OWNER(lk->threadid, mythreadid)) {
529 if ((++spins & SPINS_PER_YIELD) == 0) {
535 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
536 THREAD_ID_T mythreadid = CURRENT_THREAD;
537 if (*((volatile int *)(&lk->sl)) == 0) {
538 if (!CAS_LOCK(&lk->sl)) {
539 lk->threadid = mythreadid;
544 else if (EQ_OWNER(lk->threadid, mythreadid)) {
551 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
552 #define TRY_LOCK(lk) recursive_try_lock(lk)
553 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
554 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
555 #define DESTROY_LOCK(lk) (0)
556 #endif /* USE_RECURSIVE_LOCKS */
558 #elif defined(WIN32) /* Win32 critical sections */
559 #define MLOCK_T CRITICAL_SECTION
560 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
561 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
562 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
563 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
564 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
565 #define NEED_GLOBAL_LOCK_INIT
567 static MLOCK_T malloc_global_mutex;
568 static volatile LONG malloc_global_mutex_status;
570 /* Use spin loop to initialize global lock */
571 static void init_malloc_global_mutex() {
573 long stat = malloc_global_mutex_status;
576 /* transition to < 0 while initializing, then to > 0) */
578 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
579 InitializeCriticalSection(&malloc_global_mutex);
580 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
587 #else /* pthreads-based locks */
588 #define MLOCK_T pthread_mutex_t
589 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
590 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
591 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
592 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
593 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
595 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
596 /* Cope with old-style linux recursive lock initialization by adding */
597 /* skipped internal declaration from pthread.h */
598 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
600 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
601 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
602 #endif /* USE_RECURSIVE_LOCKS ... */
604 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
606 static int pthread_init_lock (MLOCK_T *lk) {
607 pthread_mutexattr_t attr;
608 if (pthread_mutexattr_init(&attr)) return 1;
609 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
610 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
612 if (pthread_mutex_init(lk, &attr)) return 1;
613 if (pthread_mutexattr_destroy(&attr)) return 1;
617 #endif /* ... lock types ... */
619 /* Common code for all lock types */
620 #define USE_LOCK_BIT (2U)
622 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
623 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
626 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
627 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
630 #endif /* USE_LOCKS */
632 /* ----------------------- Chunk representations ------------------------ */
635 (The following includes lightly edited explanations by Colin Plumb.)
637 The malloc_chunk declaration below is misleading (but accurate and
638 necessary). It declares a "view" into memory allowing access to
639 necessary fields at known offsets from a given base.
641 Chunks of memory are maintained using a `boundary tag' method as
642 originally described by Knuth. (See the paper by Paul Wilson
643 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
644 techniques.) Sizes of free chunks are stored both in the front of
645 each chunk and at the end. This makes consolidating fragmented
646 chunks into bigger chunks fast. The head fields also hold bits
647 representing whether chunks are free or in use.
649 Here are some pictures to make it clearer. They are "exploded" to
650 show that the state of a chunk can be thought of as extending from
651 the high 31 bits of the head field of its header through the
652 prev_foot and PINUSE_BIT bit of the following chunk header.
654 A chunk that's in use looks like:
656 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
657 | Size of previous chunk (if P = 0) |
658 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
659 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
660 | Size of this chunk 1| +-+
661 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
667 +- size - sizeof(size_t) available payload bytes -+
671 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
672 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
673 | Size of next chunk (may or may not be in use) | +-+
674 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
676 And if it's free, it looks like this:
679 | User payload (must be in use, or we would have merged!) |
680 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
681 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
682 | Size of this chunk 0| +-+
683 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
685 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
687 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
689 +- size - sizeof(struct chunk) unused bytes -+
691 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
692 | Size of this chunk |
693 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
694 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
695 | Size of next chunk (must be in use, or we would have merged)| +-+
696 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
700 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
703 Note that since we always merge adjacent free chunks, the chunks
704 adjacent to a free chunk must be in use.
706 Given a pointer to a chunk (which can be derived trivially from the
707 payload pointer) we can, in O(1) time, find out whether the adjacent
708 chunks are free, and if so, unlink them from the lists that they
709 are on and merge them with the current chunk.
711 Chunks always begin on even word boundaries, so the mem portion
712 (which is returned to the user) is also on an even word boundary, and
713 thus at least double-word aligned.
715 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
716 chunk size (which is always a multiple of two words), is an in-use
717 bit for the *previous* chunk. If that bit is *clear*, then the
718 word before the current chunk size contains the previous chunk
719 size, and can be used to find the front of the previous chunk.
720 The very first chunk allocated always has this bit set, preventing
721 access to non-existent (or non-owned) memory. If pinuse is set for
722 any given chunk, then you CANNOT determine the size of the
723 previous chunk, and might even get a memory addressing fault when
726 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
727 the chunk size redundantly records whether the current chunk is
728 inuse (unless the chunk is mmapped). This redundancy enables usage
729 checks within free and realloc, and reduces indirection when freeing
730 and consolidating chunks.
732 Each freshly allocated chunk must have both cinuse and pinuse set.
733 That is, each allocated chunk borders either a previously allocated
734 and still in-use chunk, or the base of its memory arena. This is
735 ensured by making all allocations from the `lowest' part of any
736 found chunk. Further, no free chunk physically borders another one,
737 so each free chunk is known to be preceded and followed by either
738 inuse chunks or the ends of memory.
740 Note that the `foot' of the current chunk is actually represented
741 as the prev_foot of the NEXT chunk. This makes it easier to
742 deal with alignments etc but can be very confusing when trying
743 to extend or adapt this code.
745 The exceptions to all this are
747 1. The special chunk `top' is the top-most available chunk (i.e.,
748 the one bordering the end of available memory). It is treated
749 specially. Top is never included in any bin, is used only if
750 no other chunk is available, and is released back to the
751 system if it is very large (see M_TRIM_THRESHOLD). In effect,
752 the top chunk is treated as larger (and thus less well
753 fitting) than any other available chunk. The top chunk
754 doesn't update its trailing size field since there is no next
755 contiguous chunk that would have to index off it. However,
756 space is still allocated for it (TOP_FOOT_SIZE) to enable
757 separation or merging when space is extended.
759 3. Chunks allocated via mmap, have both cinuse and pinuse bits
760 cleared in their head fields. Because they are allocated
761 one-by-one, each must carry its own prev_foot field, which is
762 also used to hold the offset this chunk has within its mmapped
763 region, which is needed to preserve alignment. Each mmapped
764 chunk is trailed by the first two fields of a fake next-chunk
765 for sake of usage checks.
769 struct malloc_chunk {
770 size_t prev_foot; /* Size of previous chunk (if free). */
771 size_t head; /* Size and inuse bits. */
772 struct malloc_chunk* fd; /* double links -- used only if free. */
773 struct malloc_chunk* bk;
776 typedef struct malloc_chunk mchunk;
777 typedef struct malloc_chunk* mchunkptr;
778 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
779 typedef unsigned int bindex_t; /* Described below */
780 typedef unsigned int binmap_t; /* Described below */
781 typedef unsigned int flag_t; /* The type of various bit flag sets */
783 /* ------------------- Chunks sizes and alignments ----------------------- */
785 #define MCHUNK_SIZE (sizeof(mchunk))
788 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
790 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
793 /* MMapped chunks need a second word of overhead ... */
794 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
795 /* ... and additional padding for fake next-chunk at foot */
796 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
798 /* The smallest size we can malloc is an aligned minimal chunk */
799 #define MIN_CHUNK_SIZE\
800 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
802 /* conversion from malloc headers to user pointers, and back */
803 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
804 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
805 /* chunk associated with aligned address A */
806 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
808 /* Bounds on request (not chunk) sizes. */
809 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
810 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
812 /* pad request bytes into a usable size */
813 #define pad_request(req) \
814 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
816 /* pad request, checking for minimum (but not maximum) */
817 #define request2size(req) \
818 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
821 /* ------------------ Operations on head and foot fields ----------------- */
824 The head field of a chunk is or'ed with PINUSE_BIT when previous
825 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
826 use, unless mmapped, in which case both bits are cleared.
828 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
831 #define PINUSE_BIT (SIZE_T_ONE)
832 #define CINUSE_BIT (SIZE_T_TWO)
833 #define FLAG4_BIT (SIZE_T_FOUR)
834 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
835 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
837 /* Head value for fenceposts */
838 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
840 /* extraction of fields from head words */
841 #define cinuse(p) ((p)->head & CINUSE_BIT)
842 #define pinuse(p) ((p)->head & PINUSE_BIT)
843 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
844 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
845 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
847 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
849 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
850 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
851 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
853 /* Treat space at ptr +/- offset as a chunk */
854 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
855 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
857 /* Ptr to next or previous physical malloc_chunk. */
858 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
859 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
861 /* extract next chunk's pinuse bit */
862 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
864 /* Get/set size at footer */
865 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
866 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
868 /* Set size, pinuse bit, and foot */
869 #define set_size_and_pinuse_of_free_chunk(p, s)\
870 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
872 /* Set size, pinuse bit, foot, and clear next pinuse */
873 #define set_free_with_pinuse(p, s, n)\
874 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
876 /* Get the internal overhead associated with chunk p */
877 #define overhead_for(p)\
878 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
880 /* Return true if malloced space is not necessarily cleared */
882 #define calloc_must_clear(p) (!is_mmapped(p))
883 #else /* MMAP_CLEARS */
884 #define calloc_must_clear(p) (1)
885 #endif /* MMAP_CLEARS */
887 /* ---------------------- Overlaid data structures ----------------------- */
890 When chunks are not in use, they are treated as nodes of either
893 "Small" chunks are stored in circular doubly-linked lists, and look
896 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
897 | Size of previous chunk |
898 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
899 `head:' | Size of chunk, in bytes |P|
900 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
901 | Forward pointer to next chunk in list |
902 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
903 | Back pointer to previous chunk in list |
904 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
905 | Unused space (may be 0 bytes long) .
908 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
909 `foot:' | Size of chunk, in bytes |
910 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
912 Larger chunks are kept in a form of bitwise digital trees (aka
913 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
914 free chunks greater than 256 bytes, their size doesn't impose any
915 constraints on user chunk sizes. Each node looks like:
917 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
918 | Size of previous chunk |
919 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
920 `head:' | Size of chunk, in bytes |P|
921 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
922 | Forward pointer to next chunk of same size |
923 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
924 | Back pointer to previous chunk of same size |
925 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
926 | Pointer to left child (child[0]) |
927 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
928 | Pointer to right child (child[1]) |
929 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
930 | Pointer to parent |
931 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
932 | bin index of this chunk |
933 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
936 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
937 `foot:' | Size of chunk, in bytes |
938 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
940 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
941 of the same size are arranged in a circularly-linked list, with only
942 the oldest chunk (the next to be used, in our FIFO ordering)
943 actually in the tree. (Tree members are distinguished by a non-null
944 parent pointer.) If a chunk with the same size an an existing node
945 is inserted, it is linked off the existing node using pointers that
946 work in the same way as fd/bk pointers of small chunks.
948 Each tree contains a power of 2 sized range of chunk sizes (the
949 smallest is 0x100 <= x < 0x180), which is is divided in half at each
950 tree level, with the chunks in the smaller half of the range (0x100
951 <= x < 0x140 for the top nose) in the left subtree and the larger
952 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
953 done by inspecting individual bits.
955 Using these rules, each node's left subtree contains all smaller
956 sizes than its right subtree. However, the node at the root of each
957 subtree has no particular ordering relationship to either. (The
958 dividing line between the subtree sizes is based on trie relation.)
959 If we remove the last chunk of a given size from the interior of the
960 tree, we need to replace it with a leaf node. The tree ordering
961 rules permit a node to be replaced by any leaf below it.
963 The smallest chunk in a tree (a common operation in a best-fit
964 allocator) can be found by walking a path to the leftmost leaf in
965 the tree. Unlike a usual binary tree, where we follow left child
966 pointers until we reach a null, here we follow the right child
967 pointer any time the left one is null, until we reach a leaf with
968 both child pointers null. The smallest chunk in the tree will be
969 somewhere along that path.
971 The worst case number of steps to add, find, or remove a node is
972 bounded by the number of bits differentiating chunks within
973 bins. Under current bin calculations, this ranges from 6 up to 21
974 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
975 is of course much better.
978 struct malloc_tree_chunk {
979 /* The first four fields must be compatible with malloc_chunk */
982 struct malloc_tree_chunk* fd;
983 struct malloc_tree_chunk* bk;
985 struct malloc_tree_chunk* child[2];
986 struct malloc_tree_chunk* parent;
990 typedef struct malloc_tree_chunk tchunk;
991 typedef struct malloc_tree_chunk* tchunkptr;
992 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
994 /* A little helper macro for trees */
995 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
997 /* ----------------------------- Segments -------------------------------- */
1000 Each malloc space may include non-contiguous segments, held in a
1001 list headed by an embedded malloc_segment record representing the
1002 top-most space. Segments also include flags holding properties of
1003 the space. Large chunks that are directly allocated by mmap are not
1004 included in this list. They are instead independently created and
1005 destroyed without otherwise keeping track of them.
1007 Segment management mainly comes into play for spaces allocated by
1008 MMAP. Any call to MMAP might or might not return memory that is
1009 adjacent to an existing segment. MORECORE normally contiguously
1010 extends the current space, so this space is almost always adjacent,
1011 which is simpler and faster to deal with. (This is why MORECORE is
1012 used preferentially to MMAP when both are available -- see
1013 sys_alloc.) When allocating using MMAP, we don't use any of the
1014 hinting mechanisms (inconsistently) supported in various
1015 implementations of unix mmap, or distinguish reserving from
1016 committing memory. Instead, we just ask for space, and exploit
1017 contiguity when we get it. It is probably possible to do
1018 better than this on some systems, but no general scheme seems
1019 to be significantly better.
1021 Management entails a simpler variant of the consolidation scheme
1022 used for chunks to reduce fragmentation -- new adjacent memory is
1023 normally prepended or appended to an existing segment. However,
1024 there are limitations compared to chunk consolidation that mostly
1025 reflect the fact that segment processing is relatively infrequent
1026 (occurring only when getting memory from system) and that we
1027 don't expect to have huge numbers of segments:
1029 * Segments are not indexed, so traversal requires linear scans. (It
1030 would be possible to index these, but is not worth the extra
1031 overhead and complexity for most programs on most platforms.)
1032 * New segments are only appended to old ones when holding top-most
1033 memory; if they cannot be prepended to others, they are held in
1036 Except for the top-most segment of an mstate, each segment record
1037 is kept at the tail of its segment. Segments are added by pushing
1038 segment records onto the list headed by &mstate.seg for the
1041 Segment flags control allocation/merge/deallocation policies:
1042 * If EXTERN_BIT set, then we did not allocate this segment,
1043 and so should not try to deallocate or merge with others.
1044 (This currently holds only for the initial segment passed
1045 into create_mspace_with_base.)
1046 * If USE_MMAP_BIT set, the segment may be merged with
1047 other surrounding mmapped segments and trimmed/de-allocated
1049 * If neither bit is set, then the segment was obtained using
1050 MORECORE so can be merged with surrounding MORECORE'd segments
1051 and deallocated/trimmed using MORECORE with negative arguments.
1054 struct malloc_segment {
1055 char* base; /* base address */
1056 size_t size; /* allocated size */
1057 struct malloc_segment* next; /* ptr to next segment */
1058 flag_t sflags; /* mmap and extern flag */
1061 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1062 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1064 typedef struct malloc_segment msegment;
1065 typedef struct malloc_segment* msegmentptr;
1067 /* ---------------------------- malloc_state ----------------------------- */
1070 A malloc_state holds all of the bookkeeping for a space.
1071 The main fields are:
1074 The topmost chunk of the currently active segment. Its size is
1075 cached in topsize. The actual size of topmost space is
1076 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1077 fenceposts and segment records if necessary when getting more
1078 space from the system. The size at which to autotrim top is
1079 cached from mparams in trim_check, except that it is disabled if
1082 Designated victim (dv)
1083 This is the preferred chunk for servicing small requests that
1084 don't have exact fits. It is normally the chunk split off most
1085 recently to service another small request. Its size is cached in
1086 dvsize. The link fields of this chunk are not maintained since it
1087 is not kept in a bin.
1090 An array of bin headers for free chunks. These bins hold chunks
1091 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1092 chunks of all the same size, spaced 8 bytes apart. To simplify
1093 use in double-linked lists, each bin header acts as a malloc_chunk
1094 pointing to the real first node, if it exists (else pointing to
1095 itself). This avoids special-casing for headers. But to avoid
1096 waste, we allocate only the fd/bk pointers of bins, and then use
1097 repositioning tricks to treat these as the fields of a chunk.
1100 Treebins are pointers to the roots of trees holding a range of
1101 sizes. There are 2 equally spaced treebins for each power of two
1102 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1106 There is one bit map for small bins ("smallmap") and one for
1107 treebins ("treemap). Each bin sets its bit when non-empty, and
1108 clears the bit when empty. Bit operations are then used to avoid
1109 bin-by-bin searching -- nearly all "search" is done without ever
1110 looking at bins that won't be selected. The bit maps
1111 conservatively use 32 bits per map word, even if on 64bit system.
1112 For a good description of some of the bit-based techniques used
1113 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1114 supplement at http://hackersdelight.org/). Many of these are
1115 intended to reduce the branchiness of paths through malloc etc, as
1116 well as to reduce the number of memory locations read or written.
1119 A list of segments headed by an embedded malloc_segment record
1120 representing the initial space.
1122 Address check support
1123 The least_addr field is the least address ever obtained from
1124 MORECORE or MMAP. Attempted frees and reallocs of any address less
1125 than this are trapped (unless INSECURE is defined).
1128 A cross-check field that should always hold same value as mparams.magic.
1130 Max allowed footprint
1131 The maximum allowed bytes to allocate from system (zero means no limit)
1134 Bits recording whether to use MMAP, locks, or contiguous MORECORE
1137 Each space keeps track of current and maximum system memory
1138 obtained via MORECORE or MMAP.
1141 Fields holding the amount of unused topmost memory that should trigger
1142 trimming, and a counter to force periodic scanning to release unused
1143 non-topmost segments.
1146 If USE_LOCKS is defined, the "mutex" lock is acquired and released
1147 around every public call using this mspace.
1150 A void* pointer and a size_t field that can be used to help implement
1151 extensions to this malloc.
1154 /* Bin types, widths and sizes */
1155 #define NSMALLBINS (32U)
1156 #define NTREEBINS (32U)
1157 #define SMALLBIN_SHIFT (3U)
1158 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1159 #define TREEBIN_SHIFT (8U)
1160 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1161 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1162 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1164 struct malloc_state {
1173 size_t release_checks;
1175 mchunkptr smallbins[(NSMALLBINS+1)*2];
1176 tbinptr treebins[NTREEBINS];
1178 size_t max_footprint;
1179 size_t footprint_limit; /* zero means no limit */
1182 MLOCK_T mutex; /* locate lock among fields that rarely change */
1183 #endif /* USE_LOCKS */
1185 void* extp; /* Unused but available for extensions */
1189 typedef struct malloc_state* mstate;
1191 /* ------------- Global malloc_state and malloc_params ------------------- */
1194 malloc_params holds global properties, including those that can be
1195 dynamically set using mallopt. There is a single instance, mparams,
1196 initialized in init_mparams. Note that the non-zeroness of "magic"
1197 also serves as an initialization flag.
1200 struct malloc_params {
1204 size_t mmap_threshold;
1205 size_t trim_threshold;
1206 flag_t default_mflags;
1209 static struct malloc_params mparams;
1211 /* Ensure mparams initialized */
1212 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1216 /* The global malloc_state used for all non-"mspace" calls */
1217 static struct malloc_state _gm_;
1219 #define is_global(M) ((M) == &_gm_)
1221 #endif /* !ONLY_MSPACES */
1223 #define is_initialized(M) ((M)->top != 0)
1225 /* -------------------------- system alloc setup ------------------------- */
1227 /* Operations on mflags */
1229 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1230 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1232 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1234 #define disable_lock(M)
1237 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1238 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1240 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1242 #define disable_mmap(M)
1245 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1246 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1247 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1248 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1249 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1250 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1251 #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
1253 #define set_lock(M,L)\
1254 ((M)->mflags = (L)?\
1255 ((M)->mflags | USE_LOCK_BIT) :\
1256 ((M)->mflags & ~USE_LOCK_BIT))
1258 /* page-align a size */
1259 #define page_align(S)\
1260 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1262 /* granularity-align a size */
1263 #define granularity_align(S)\
1264 (((S) + (mparams.granularity - SIZE_T_ONE))\
1265 & ~(mparams.granularity - SIZE_T_ONE))
1268 /* For mmap, use granularity alignment on windows, else page-align */
1270 #define mmap_align(S) granularity_align(S)
1272 #define mmap_align(S) page_align(S)
1275 /* For sys_alloc, enough padding to ensure can malloc request on success */
1276 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1278 #define is_page_aligned(S)\
1279 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1280 #define is_granularity_aligned(S)\
1281 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1283 /* True if segment S holds address A */
1284 #define segment_holds(S, A)\
1285 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1287 /* Return segment holding given address */
1288 __clib_nosanitize_addr
1289 static msegmentptr segment_holding(mstate m, char* addr) {
1290 msegmentptr sp = &m->seg;
1292 if (addr >= sp->base && addr < sp->base + sp->size)
1294 if ((sp = sp->next) == 0)
1299 /* Return true if segment contains a segment link */
1300 __clib_nosanitize_addr
1301 static int has_segment_link(mstate m, msegmentptr ss) {
1302 msegmentptr sp = &m->seg;
1304 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1306 if ((sp = sp->next) == 0)
1311 #ifndef MORECORE_CANNOT_TRIM
1312 #define should_trim(M,s) ((s) > (M)->trim_check)
1313 #else /* MORECORE_CANNOT_TRIM */
1314 #define should_trim(M,s) (0)
1315 #endif /* MORECORE_CANNOT_TRIM */
1318 TOP_FOOT_SIZE is padding at the end of a segment, including space
1319 that may be needed to place segment records and fenceposts when new
1320 noncontiguous segments are added.
1322 #define TOP_FOOT_SIZE\
1323 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1326 /* ------------------------------- Hooks -------------------------------- */
1329 PREACTION should be defined to return 0 on success, and nonzero on
1330 failure. If you are not using locking, you can redefine these to do
1335 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1336 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1337 #else /* USE_LOCKS */
1340 #define PREACTION(M) (0)
1341 #endif /* PREACTION */
1344 #define POSTACTION(M)
1345 #endif /* POSTACTION */
1347 #endif /* USE_LOCKS */
1350 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1351 USAGE_ERROR_ACTION is triggered on detected bad frees and
1352 reallocs. The argument p is an address that might have triggered the
1353 fault. It is ignored by the two predefined actions, but might be
1354 useful in custom actions that try to help diagnose errors.
1357 #if PROCEED_ON_ERROR
1359 /* A count of the number of corruption errors causing resets */
1360 int malloc_corruption_error_count;
1362 /* default corruption action */
1363 static void reset_on_error(mstate m);
1365 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1366 #define USAGE_ERROR_ACTION(m, p)
1368 #else /* PROCEED_ON_ERROR */
1370 #ifndef CORRUPTION_ERROR_ACTION
1371 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1372 #endif /* CORRUPTION_ERROR_ACTION */
1374 #ifndef USAGE_ERROR_ACTION
1375 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1376 #endif /* USAGE_ERROR_ACTION */
1378 #endif /* PROCEED_ON_ERROR */
1381 /* -------------------------- Debugging setup ---------------------------- */
1385 #define check_free_chunk(M,P)
1386 #define check_inuse_chunk(M,P)
1387 #define check_malloced_chunk(M,P,N)
1388 #define check_mmapped_chunk(M,P)
1389 #define check_malloc_state(M)
1390 #define check_top_chunk(M,P)
1393 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
1394 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1395 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
1396 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1397 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1398 #define check_malloc_state(M) do_check_malloc_state(M)
1400 static void do_check_any_chunk(mstate m, mchunkptr p);
1401 static void do_check_top_chunk(mstate m, mchunkptr p);
1402 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1403 static void do_check_inuse_chunk(mstate m, mchunkptr p);
1404 static void do_check_free_chunk(mstate m, mchunkptr p);
1405 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1406 static void do_check_tree(mstate m, tchunkptr t);
1407 static void do_check_treebin(mstate m, bindex_t i);
1408 static void do_check_smallbin(mstate m, bindex_t i);
1409 static void do_check_malloc_state(mstate m);
1410 static int bin_find(mstate m, mchunkptr x);
1411 static size_t traverse_and_check(mstate m);
1414 /* ---------------------------- Indexing Bins ---------------------------- */
1416 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1417 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1418 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1419 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1421 /* addressing by index. See above about smallbin repositioning */
1422 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1423 #define treebin_at(M,i) (&((M)->treebins[i]))
1425 /* assign tree index for size S to variable I. Use x86 asm if possible */
1426 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1427 #define compute_tree_index(S, I)\
1429 unsigned int X = S >> TREEBIN_SHIFT;\
1432 else if (X > 0xFFFF)\
1435 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1436 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1440 #elif defined (__INTEL_COMPILER)
1441 #define compute_tree_index(S, I)\
1443 size_t X = S >> TREEBIN_SHIFT;\
1446 else if (X > 0xFFFF)\
1449 unsigned int K = _bit_scan_reverse (X); \
1450 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1454 #elif defined(_MSC_VER) && _MSC_VER>=1300
1455 #define compute_tree_index(S, I)\
1457 size_t X = S >> TREEBIN_SHIFT;\
1460 else if (X > 0xFFFF)\
1464 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1465 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1470 #define compute_tree_index(S, I)\
1472 size_t X = S >> TREEBIN_SHIFT;\
1475 else if (X > 0xFFFF)\
1478 unsigned int Y = (unsigned int)X;\
1479 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1480 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1482 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1483 K = 14 - N + ((Y <<= K) >> 15);\
1484 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1489 /* Bit representing maximum resolved size in a treebin at i */
1490 #define bit_for_tree_index(i) \
1491 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1493 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
1494 #define leftshift_for_tree_index(i) \
1495 ((i == NTREEBINS-1)? 0 : \
1496 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1498 /* The size of the smallest chunk held in bin with index i */
1499 #define minsize_for_tree_index(i) \
1500 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1501 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1504 /* ------------------------ Operations on bin maps ----------------------- */
1506 /* bit corresponding to given index */
1507 #define idx2bit(i) ((binmap_t)(1) << (i))
1509 /* Mark/Clear bits with given index */
1510 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1511 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1512 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1514 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1515 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1516 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1518 /* isolate the least set bit of a bitmap */
1519 #define least_bit(x) ((x) & -(x))
1521 /* mask with all bits to left of least bit of x on */
1522 #define left_bits(x) ((x<<1) | -(x<<1))
1524 /* mask with all bits to left of or equal to least bit of x on */
1525 #define same_or_left_bits(x) ((x) | -(x))
1527 /* index corresponding to given bit. Use x86 asm if possible */
1529 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1530 #define compute_bit2idx(X, I)\
1533 J = __builtin_ctz(X); \
1537 #elif defined (__INTEL_COMPILER)
1538 #define compute_bit2idx(X, I)\
1541 J = _bit_scan_forward (X); \
1545 #elif defined(_MSC_VER) && _MSC_VER>=1300
1546 #define compute_bit2idx(X, I)\
1549 _BitScanForward((DWORD *) &J, X);\
1553 #elif USE_BUILTIN_FFS
1554 #define compute_bit2idx(X, I) I = ffs(X)-1
1557 #define compute_bit2idx(X, I)\
1559 unsigned int Y = X - 1;\
1560 unsigned int K = Y >> (16-4) & 16;\
1561 unsigned int N = K; Y >>= K;\
1562 N += K = Y >> (8-3) & 8; Y >>= K;\
1563 N += K = Y >> (4-2) & 4; Y >>= K;\
1564 N += K = Y >> (2-1) & 2; Y >>= K;\
1565 N += K = Y >> (1-0) & 1; Y >>= K;\
1566 I = (bindex_t)(N + Y);\
1571 /* ----------------------- Runtime Check Support ------------------------- */
1574 For security, the main invariant is that malloc/free/etc never
1575 writes to a static address other than malloc_state, unless static
1576 malloc_state itself has been corrupted, which cannot occur via
1577 malloc (because of these checks). In essence this means that we
1578 believe all pointers, sizes, maps etc held in malloc_state, but
1579 check all of those linked or offsetted from other embedded data
1580 structures. These checks are interspersed with main code in a way
1581 that tends to minimize their run-time cost.
1583 When FOOTERS is defined, in addition to range checking, we also
1584 verify footer fields of inuse chunks, which can be used guarantee
1585 that the mstate controlling malloc/free is intact. This is a
1586 streamlined version of the approach described by William Robertson
1587 et al in "Run-time Detection of Heap-based Overflows" LISA'03
1588 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1589 of an inuse chunk holds the xor of its mstate and a random seed,
1590 that is checked upon calls to free() and realloc(). This is
1591 (probabilistically) unguessable from outside the program, but can be
1592 computed by any code successfully malloc'ing any chunk, so does not
1593 itself provide protection against code that has already broken
1594 security through some other means. Unlike Robertson et al, we
1595 always dynamically check addresses of all offset chunks (previous,
1596 next, etc). This turns out to be cheaper than relying on hashes.
1600 /* Check if address a is at least as high as any from MORECORE or MMAP */
1601 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1602 /* Check if address of next chunk n is higher than base chunk p */
1603 #define ok_next(p, n) ((char*)(p) < (char*)(n))
1604 /* Check if p has inuse status */
1605 #define ok_inuse(p) is_inuse(p)
1606 /* Check if p has its pinuse bit on */
1607 #define ok_pinuse(p) pinuse(p)
1609 #else /* !INSECURE */
1610 #define ok_address(M, a) (1)
1611 #define ok_next(b, n) (1)
1612 #define ok_inuse(p) (1)
1613 #define ok_pinuse(p) (1)
1614 #endif /* !INSECURE */
1616 #if (FOOTERS && !INSECURE)
1617 /* Check if (alleged) mstate m has expected magic field */
1618 __clib_nosanitize_addr
1620 ok_magic (const mstate m)
1622 return (m->magic == mparams.magic);
1624 #else /* (FOOTERS && !INSECURE) */
1625 #define ok_magic(M) (1)
1626 #endif /* (FOOTERS && !INSECURE) */
1628 /* In gcc, use __builtin_expect to minimize impact of checks */
1630 #if defined(__GNUC__) && __GNUC__ >= 3
1631 #define RTCHECK(e) __builtin_expect(e, 1)
1633 #define RTCHECK(e) (e)
1635 #else /* !INSECURE */
1636 #define RTCHECK(e) (1)
1637 #endif /* !INSECURE */
1639 /* macros to set up inuse chunks with or without footers */
1643 #define mark_inuse_foot(M,p,s)
1645 /* Macros for setting head/foot of non-mmapped chunks */
1647 /* Set cinuse bit and pinuse bit of next chunk */
1648 #define set_inuse(M,p,s)\
1649 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1650 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1652 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1653 #define set_inuse_and_pinuse(M,p,s)\
1654 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1655 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1657 /* Set size, cinuse and pinuse bit of this chunk */
1658 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1659 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1663 /* Set foot of inuse chunk to be xor of mstate and seed */
1664 #define mark_inuse_foot(M,p,s)\
1665 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1667 #define get_mstate_for(p)\
1668 ((mstate)(((mchunkptr)((char*)(p) +\
1669 (chunksize(p))))->prev_foot ^ mparams.magic))
1671 #define set_inuse(M,p,s)\
1672 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1673 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1674 mark_inuse_foot(M,p,s))
1676 #define set_inuse_and_pinuse(M,p,s)\
1677 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1678 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1679 mark_inuse_foot(M,p,s))
1681 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1682 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1683 mark_inuse_foot(M, p, s))
1685 #endif /* !FOOTERS */
1687 /* ---------------------------- setting mparams -------------------------- */
1690 static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1691 static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1692 static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1693 #endif /* LOCK_AT_FORK */
1695 /* Initialize mparams */
1696 static int init_mparams(void) {
1697 #ifdef NEED_GLOBAL_LOCK_INIT
1698 if (malloc_global_mutex_status <= 0)
1699 init_malloc_global_mutex();
1702 ACQUIRE_MALLOC_GLOBAL_LOCK();
1703 if (mparams.magic == 0) {
1709 psize = malloc_getpagesize;
1710 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1713 SYSTEM_INFO system_info;
1714 GetSystemInfo(&system_info);
1715 psize = system_info.dwPageSize;
1716 gsize = ((DEFAULT_GRANULARITY != 0)?
1717 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1721 /* Sanity-check configuration:
1722 size_t must be unsigned and as wide as pointer type.
1723 ints must be at least 4 bytes.
1724 alignment must be at least 8.
1725 Alignment, min chunk size, and page size must all be powers of 2.
1727 if ((sizeof(size_t) != sizeof(char*)) ||
1728 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1729 (sizeof(int) < 4) ||
1730 (MALLOC_ALIGNMENT < (size_t)8U) ||
1731 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
1732 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1733 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1734 ((psize & (psize-SIZE_T_ONE)) != 0))
1736 mparams.granularity = gsize;
1737 mparams.page_size = psize;
1738 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1739 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1740 #if MORECORE_CONTIGUOUS
1741 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
1742 #else /* MORECORE_CONTIGUOUS */
1743 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1744 #endif /* MORECORE_CONTIGUOUS */
1747 /* Set up lock for main malloc area */
1748 gm->mflags = mparams.default_mflags;
1749 (void)INITIAL_LOCK(&gm->mutex);
1752 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1756 #ifndef DLM_MAGIC_CONSTANT
1759 unsigned char buf[sizeof(size_t)];
1760 /* Try to use /dev/urandom, else fall back on using time */
1761 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1762 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1763 magic = *((size_t *) buf);
1767 #endif /* USE_DEV_RANDOM */
1769 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1770 #elif defined(LACKS_TIME_H)
1771 magic = (size_t)&magic ^ (size_t)0x55555555U;
1773 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1775 magic |= (size_t)8U; /* ensure nonzero */
1776 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
1778 magic = DLM_MAGIC_CONSTANT;
1780 /* Until memory modes commonly available, use volatile-write */
1781 (*(volatile size_t *)(&(mparams.magic))) = magic;
1785 RELEASE_MALLOC_GLOBAL_LOCK();
1789 /* support for mallopt */
1790 static int change_mparam(int param_number, int value) {
1792 ensure_initialization();
1793 val = (value == -1)? MAX_SIZE_T : (size_t)value;
1794 switch(param_number) {
1795 case M_TRIM_THRESHOLD:
1796 mparams.trim_threshold = val;
1799 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1800 mparams.granularity = val;
1805 case M_MMAP_THRESHOLD:
1806 mparams.mmap_threshold = val;
1814 /* ------------------------- Debugging Support --------------------------- */
1816 /* Check properties of any chunk, whether free, inuse, mmapped etc */
1817 static void do_check_any_chunk(mstate m, mchunkptr p) {
1818 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1819 assert(ok_address(m, p));
1822 /* Check properties of top chunk */
1823 static void do_check_top_chunk(mstate m, mchunkptr p) {
1824 msegmentptr sp = segment_holding(m, (char*)p);
1825 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1827 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1828 assert(ok_address(m, p));
1829 assert(sz == m->topsize);
1831 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1833 assert(!pinuse(chunk_plus_offset(p, sz)));
1836 /* Check properties of (inuse) mmapped chunks */
1837 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1838 size_t sz = chunksize(p);
1839 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1840 assert(is_mmapped(p));
1841 assert(use_mmap(m));
1842 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1843 assert(ok_address(m, p));
1844 assert(!is_small(sz));
1845 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1846 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1847 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1850 /* Check properties of inuse chunks */
1851 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1852 do_check_any_chunk(m, p);
1853 assert(is_inuse(p));
1854 assert(next_pinuse(p));
1855 /* If not pinuse and not mmapped, previous chunk has OK offset */
1856 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1858 do_check_mmapped_chunk(m, p);
1861 /* Check properties of free chunks */
1862 static void do_check_free_chunk(mstate m, mchunkptr p) {
1863 size_t sz = chunksize(p);
1864 mchunkptr next = chunk_plus_offset(p, sz);
1865 do_check_any_chunk(m, p);
1866 assert(!is_inuse(p));
1867 assert(!next_pinuse(p));
1868 assert (!is_mmapped(p));
1869 if (p != m->dv && p != m->top) {
1870 if (sz >= MIN_CHUNK_SIZE) {
1871 assert((sz & CHUNK_ALIGN_MASK) == 0);
1872 assert(is_aligned(chunk2mem(p)));
1873 assert(next->prev_foot == sz);
1875 assert (next == m->top || is_inuse(next));
1876 assert(p->fd->bk == p);
1877 assert(p->bk->fd == p);
1879 else /* markers are always of size SIZE_T_SIZE */
1880 assert(sz == SIZE_T_SIZE);
1884 /* Check properties of malloced chunks at the point they are malloced */
1885 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1887 mchunkptr p = mem2chunk(mem);
1888 size_t sz = p->head & ~INUSE_BITS;
1889 do_check_inuse_chunk(m, p);
1890 assert((sz & CHUNK_ALIGN_MASK) == 0);
1891 assert(sz >= MIN_CHUNK_SIZE);
1893 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1894 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1898 /* Check a tree and its subtrees. */
1899 static void do_check_tree(mstate m, tchunkptr t) {
1902 bindex_t tindex = t->index;
1903 size_t tsize = chunksize(t);
1905 compute_tree_index(tsize, idx);
1906 assert(tindex == idx);
1907 assert(tsize >= MIN_LARGE_SIZE);
1908 assert(tsize >= minsize_for_tree_index(idx));
1909 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1911 do { /* traverse through chain of same-sized nodes */
1912 do_check_any_chunk(m, ((mchunkptr)u));
1913 assert(u->index == tindex);
1914 assert(chunksize(u) == tsize);
1915 assert(!is_inuse(u));
1916 assert(!next_pinuse(u));
1917 assert(u->fd->bk == u);
1918 assert(u->bk->fd == u);
1919 if (u->parent == 0) {
1920 assert(u->child[0] == 0);
1921 assert(u->child[1] == 0);
1924 assert(head == 0); /* only one node on chain has parent */
1926 assert(u->parent != u);
1927 assert (u->parent->child[0] == u ||
1928 u->parent->child[1] == u ||
1929 *((tbinptr*)(u->parent)) == u);
1930 if (u->child[0] != 0) {
1931 assert(u->child[0]->parent == u);
1932 assert(u->child[0] != u);
1933 do_check_tree(m, u->child[0]);
1935 if (u->child[1] != 0) {
1936 assert(u->child[1]->parent == u);
1937 assert(u->child[1] != u);
1938 do_check_tree(m, u->child[1]);
1940 if (u->child[0] != 0 && u->child[1] != 0) {
1941 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1949 /* Check all the chunks in a treebin. */
1950 static void do_check_treebin(mstate m, bindex_t i) {
1951 tbinptr* tb = treebin_at(m, i);
1953 int empty = (m->treemap & (1U << i)) == 0;
1957 do_check_tree(m, t);
1960 /* Check all the chunks in a smallbin. */
1961 static void do_check_smallbin(mstate m, bindex_t i) {
1962 sbinptr b = smallbin_at(m, i);
1963 mchunkptr p = b->bk;
1964 unsigned int empty = (m->smallmap & (1U << i)) == 0;
1968 for (; p != b; p = p->bk) {
1969 size_t size = chunksize(p);
1971 /* each chunk claims to be free */
1972 do_check_free_chunk(m, p);
1973 /* chunk belongs in bin */
1974 assert(small_index(size) == i);
1975 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1976 /* chunk is followed by an inuse chunk */
1978 if (q->head != FENCEPOST_HEAD)
1979 do_check_inuse_chunk(m, q);
1984 /* Find x in a bin. Used in other check functions. */
1985 static int bin_find(mstate m, mchunkptr x) {
1986 size_t size = chunksize(x);
1987 if (is_small(size)) {
1988 bindex_t sidx = small_index(size);
1989 sbinptr b = smallbin_at(m, sidx);
1990 if (smallmap_is_marked(m, sidx)) {
1995 } while ((p = p->fd) != b);
2000 compute_tree_index(size, tidx);
2001 if (treemap_is_marked(m, tidx)) {
2002 tchunkptr t = *treebin_at(m, tidx);
2003 size_t sizebits = size << leftshift_for_tree_index(tidx);
2004 while (t != 0 && chunksize(t) != size) {
2005 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2011 if (u == (tchunkptr)x)
2013 } while ((u = u->fd) != t);
2020 /* Traverse each chunk and check it; return total */
2021 static size_t traverse_and_check(mstate m) {
2023 if (is_initialized(m)) {
2024 msegmentptr s = &m->seg;
2025 sum += m->topsize + TOP_FOOT_SIZE;
2027 mchunkptr q = align_as_chunk(s->base);
2028 mchunkptr lastq = 0;
2030 while (segment_holds(s, q) &&
2031 q != m->top && q->head != FENCEPOST_HEAD) {
2032 sum += chunksize(q);
2034 assert(!bin_find(m, q));
2035 do_check_inuse_chunk(m, q);
2038 assert(q == m->dv || bin_find(m, q));
2039 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2040 do_check_free_chunk(m, q);
2052 /* Check all properties of malloc_state. */
2053 static void do_check_malloc_state(mstate m) {
2057 for (i = 0; i < NSMALLBINS; ++i)
2058 do_check_smallbin(m, i);
2059 for (i = 0; i < NTREEBINS; ++i)
2060 do_check_treebin(m, i);
2062 if (m->dvsize != 0) { /* check dv chunk */
2063 do_check_any_chunk(m, m->dv);
2064 assert(m->dvsize == chunksize(m->dv));
2065 assert(m->dvsize >= MIN_CHUNK_SIZE);
2066 assert(bin_find(m, m->dv) == 0);
2069 if (m->top != 0) { /* check top chunk */
2070 do_check_top_chunk(m, m->top);
2071 /*assert(m->topsize == chunksize(m->top)); redundant */
2072 assert(m->topsize > 0);
2073 assert(bin_find(m, m->top) == 0);
2076 total = traverse_and_check(m);
2077 assert(total <= m->footprint);
2078 assert(m->footprint <= m->max_footprint);
2082 /* ----------------------------- statistics ------------------------------ */
2085 __clib_nosanitize_addr
2086 static struct dlmallinfo internal_mallinfo(mstate m) {
2087 struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2088 ensure_initialization();
2089 if (!PREACTION(m)) {
2090 check_malloc_state(m);
2091 if (is_initialized(m)) {
2092 size_t nfree = SIZE_T_ONE; /* top always free */
2093 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2095 msegmentptr s = &m->seg;
2097 mchunkptr q = align_as_chunk(s->base);
2098 while (segment_holds(s, q) &&
2099 q != m->top && q->head != FENCEPOST_HEAD) {
2100 size_t sz = chunksize(q);
2113 nm.hblkhd = m->footprint - sum;
2114 nm.usmblks = m->max_footprint;
2115 nm.uordblks = m->footprint - mfree;
2116 nm.fordblks = mfree;
2117 nm.keepcost = m->topsize;
2124 #endif /* !NO_MALLINFO */
2126 #if !NO_MALLOC_STATS
2127 static void internal_malloc_stats(mstate m) {
2128 ensure_initialization();
2129 if (!PREACTION(m)) {
2133 check_malloc_state(m);
2134 if (is_initialized(m)) {
2135 msegmentptr s = &m->seg;
2136 maxfp = m->max_footprint;
2138 used = fp - (m->topsize + TOP_FOOT_SIZE);
2141 mchunkptr q = align_as_chunk(s->base);
2142 while (segment_holds(s, q) &&
2143 q != m->top && q->head != FENCEPOST_HEAD) {
2145 used -= chunksize(q);
2151 POSTACTION(m); /* drop lock */
2152 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2153 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2154 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2157 #endif /* NO_MALLOC_STATS */
2159 /* ----------------------- Operations on smallbins ----------------------- */
2162 Various forms of linking and unlinking are defined as macros. Even
2163 the ones for trees, which are very long but have very short typical
2164 paths. This is ugly but reduces reliance on inlining support of
2168 /* Link a free chunk into a smallbin */
2169 #define insert_small_chunk(M, P, S) {\
2170 bindex_t I = small_index(S);\
2171 mchunkptr B = smallbin_at(M, I);\
2173 assert(S >= MIN_CHUNK_SIZE);\
2174 if (!smallmap_is_marked(M, I))\
2175 mark_smallmap(M, I);\
2176 else if (RTCHECK(ok_address(M, B->fd)))\
2179 CORRUPTION_ERROR_ACTION(M);\
2187 /* Unlink a chunk from a smallbin */
2188 #define unlink_small_chunk(M, P, S) {\
2189 mchunkptr F = P->fd;\
2190 mchunkptr B = P->bk;\
2191 bindex_t I = small_index(S);\
2194 assert(chunksize(P) == small_index2size(I));\
2195 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2197 clear_smallmap(M, I);\
2199 else if (RTCHECK(B == smallbin_at(M,I) ||\
2200 (ok_address(M, B) && B->fd == P))) {\
2205 CORRUPTION_ERROR_ACTION(M);\
2209 CORRUPTION_ERROR_ACTION(M);\
2213 /* Unlink the first chunk from a smallbin */
2214 #define unlink_first_small_chunk(M, B, P, I) {\
2215 mchunkptr F = P->fd;\
2218 assert(chunksize(P) == small_index2size(I));\
2220 clear_smallmap(M, I);\
2222 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2227 CORRUPTION_ERROR_ACTION(M);\
2231 /* Replace dv node, binning the old one */
2232 /* Used only when dvsize known to be small */
2233 #define replace_dv(M, P, S) {\
2234 size_t DVS = M->dvsize;\
2235 assert(is_small(DVS));\
2237 mchunkptr DV = M->dv;\
2238 insert_small_chunk(M, DV, DVS);\
2244 /* ------------------------- Operations on trees ------------------------- */
2246 /* Insert chunk into tree */
2247 #define insert_large_chunk(M, X, S) {\
2250 compute_tree_index(S, I);\
2251 H = treebin_at(M, I);\
2253 X->child[0] = X->child[1] = 0;\
2254 if (!treemap_is_marked(M, I)) {\
2255 mark_treemap(M, I);\
2257 X->parent = (tchunkptr)H;\
2262 size_t K = S << leftshift_for_tree_index(I);\
2264 if (chunksize(T) != S) {\
2265 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2269 else if (RTCHECK(ok_address(M, C))) {\
2276 CORRUPTION_ERROR_ACTION(M);\
2281 tchunkptr F = T->fd;\
2282 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2290 CORRUPTION_ERROR_ACTION(M);\
2301 1. If x is a chained node, unlink it from its same-sized fd/bk links
2302 and choose its bk node as its replacement.
2303 2. If x was the last node of its size, but not a leaf node, it must
2304 be replaced with a leaf node (not merely one with an open left or
2305 right), to make sure that lefts and rights of descendents
2306 correspond properly to bit masks. We use the rightmost descendent
2307 of x. We could use any other leaf, but this is easy to locate and
2308 tends to counteract removal of leftmosts elsewhere, and so keeps
2309 paths shorter than minimally guaranteed. This doesn't loop much
2310 because on average a node in a tree is near the bottom.
2311 3. If x is the base of a chain (i.e., has parent links) relink
2312 x's parent and children to x's replacement (or null if none).
2315 #define unlink_large_chunk(M, X) {\
2316 tchunkptr XP = X->parent;\
2319 tchunkptr F = X->fd;\
2321 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2326 CORRUPTION_ERROR_ACTION(M);\
2331 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2332 ((R = *(RP = &(X->child[0]))) != 0)) {\
2334 while ((*(CP = &(R->child[1])) != 0) ||\
2335 (*(CP = &(R->child[0])) != 0)) {\
2338 if (RTCHECK(ok_address(M, RP)))\
2341 CORRUPTION_ERROR_ACTION(M);\
2346 tbinptr* H = treebin_at(M, X->index);\
2348 if ((*H = R) == 0) \
2349 clear_treemap(M, X->index);\
2351 else if (RTCHECK(ok_address(M, XP))) {\
2352 if (XP->child[0] == X) \
2358 CORRUPTION_ERROR_ACTION(M);\
2360 if (RTCHECK(ok_address(M, R))) {\
2363 if ((C0 = X->child[0]) != 0) {\
2364 if (RTCHECK(ok_address(M, C0))) {\
2369 CORRUPTION_ERROR_ACTION(M);\
2371 if ((C1 = X->child[1]) != 0) {\
2372 if (RTCHECK(ok_address(M, C1))) {\
2377 CORRUPTION_ERROR_ACTION(M);\
2381 CORRUPTION_ERROR_ACTION(M);\
2386 /* Relays to large vs small bin operations */
2388 #define insert_chunk(M, P, S)\
2389 if (is_small(S)) insert_small_chunk(M, P, S)\
2390 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2392 #define unlink_chunk(M, P, S)\
2393 if (is_small(S)) unlink_small_chunk(M, P, S)\
2394 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2397 /* Relays to internal calls to malloc/free from realloc, memalign etc */
2400 #define internal_malloc(m, b) mspace_malloc(m, b)
2401 #define internal_free(m, mem) mspace_free(m,mem);
2402 #else /* ONLY_MSPACES */
2404 #define internal_malloc(m, b)\
2405 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2406 #define internal_free(m, mem)\
2407 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2409 #define internal_malloc(m, b) dlmalloc(b)
2410 #define internal_free(m, mem) dlfree(mem)
2411 #endif /* MSPACES */
2412 #endif /* ONLY_MSPACES */
2414 /* ----------------------- Direct-mmapping chunks ----------------------- */
2417 Directly mmapped chunks are set up with an offset to the start of
2418 the mmapped region stored in the prev_foot field of the chunk. This
2419 allows reconstruction of the required argument to MUNMAP when freed,
2420 and also allows adjustment of the returned chunk to meet alignment
2421 requirements (especially in memalign).
2424 /* Malloc using mmap */
2425 static void* mmap_alloc(mstate m, size_t nb) {
2426 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2427 if (m->footprint_limit != 0) {
2428 size_t fp = m->footprint + mmsize;
2429 if (fp <= m->footprint || fp > m->footprint_limit)
2432 if (mmsize > nb) { /* Check for wrap around 0 */
2433 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2435 size_t offset = align_offset(chunk2mem(mm));
2436 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2437 mchunkptr p = (mchunkptr)(mm + offset);
2438 p->prev_foot = offset;
2440 mark_inuse_foot(m, p, psize);
2441 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2442 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2444 if (m->least_addr == 0 || mm < m->least_addr)
2446 if ((m->footprint += mmsize) > m->max_footprint)
2447 m->max_footprint = m->footprint;
2448 assert(is_aligned(chunk2mem(p)));
2449 check_mmapped_chunk(m, p);
2450 return chunk2mem(p);
2456 /* Realloc using mmap */
2457 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2458 size_t oldsize = chunksize(oldp);
2459 (void)flags; /* placate people compiling -Wunused */
2460 if (is_small(nb)) /* Can't shrink mmap regions below small size */
2462 /* Keep old chunk if big enough but not too big */
2463 if (oldsize >= nb + SIZE_T_SIZE &&
2464 (oldsize - nb) <= (mparams.granularity << 1))
2467 size_t offset = oldp->prev_foot;
2468 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2469 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2470 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2471 oldmmsize, newmmsize, flags);
2473 mchunkptr newp = (mchunkptr)(cp + offset);
2474 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2476 mark_inuse_foot(m, newp, psize);
2477 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2478 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2480 if (cp < m->least_addr)
2482 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2483 m->max_footprint = m->footprint;
2484 check_mmapped_chunk(m, newp);
2492 /* -------------------------- mspace management -------------------------- */
2494 /* Initialize top chunk and its size */
2495 __clib_nosanitize_addr
2496 static void init_top(mstate m, mchunkptr p, size_t psize) {
2497 /* Ensure alignment */
2498 size_t offset = align_offset(chunk2mem(p));
2499 p = (mchunkptr)((char*)p + offset);
2504 p->head = psize | PINUSE_BIT;
2505 /* set size of fake trailing chunk holding overhead space only once */
2506 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2507 m->trim_check = mparams.trim_threshold; /* reset on each update */
2510 /* Initialize bins for a new mstate that is otherwise zeroed out */
2511 static void init_bins(mstate m) {
2512 /* Establish circular links for smallbins */
2514 for (i = 0; i < NSMALLBINS; ++i) {
2515 sbinptr bin = smallbin_at(m,i);
2516 bin->fd = bin->bk = bin;
2520 #if PROCEED_ON_ERROR
2522 /* default corruption action */
2523 static void reset_on_error(mstate m) {
2525 ++malloc_corruption_error_count;
2526 /* Reinitialize fields to forget about all memory */
2527 m->smallmap = m->treemap = 0;
2528 m->dvsize = m->topsize = 0;
2533 for (i = 0; i < NTREEBINS; ++i)
2534 *treebin_at(m, i) = 0;
2537 #endif /* PROCEED_ON_ERROR */
2539 /* Allocate chunk and prepend remainder with chunk in successor base. */
2540 __clib_nosanitize_addr
2541 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2543 mchunkptr p = align_as_chunk(newbase);
2544 mchunkptr oldfirst = align_as_chunk(oldbase);
2545 size_t psize = (char*)oldfirst - (char*)p;
2546 mchunkptr q = chunk_plus_offset(p, nb);
2547 size_t qsize = psize - nb;
2548 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2550 assert((char*)oldfirst > (char*)q);
2551 assert(pinuse(oldfirst));
2552 assert(qsize >= MIN_CHUNK_SIZE);
2554 /* consolidate remainder with first chunk of old base */
2555 if (oldfirst == m->top) {
2556 size_t tsize = m->topsize += qsize;
2558 q->head = tsize | PINUSE_BIT;
2559 check_top_chunk(m, q);
2561 else if (oldfirst == m->dv) {
2562 size_t dsize = m->dvsize += qsize;
2564 set_size_and_pinuse_of_free_chunk(q, dsize);
2567 if (!is_inuse(oldfirst)) {
2568 size_t nsize = chunksize(oldfirst);
2569 unlink_chunk(m, oldfirst, nsize);
2570 oldfirst = chunk_plus_offset(oldfirst, nsize);
2573 set_free_with_pinuse(q, qsize, oldfirst);
2574 insert_chunk(m, q, qsize);
2575 check_free_chunk(m, q);
2578 check_malloced_chunk(m, chunk2mem(p), nb);
2579 return chunk2mem(p);
2582 /* Add a segment to hold a new noncontiguous region */
2583 __clib_nosanitize_addr
2584 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2585 /* Determine locations and sizes of segment, fenceposts, old top */
2586 char* old_top = (char*)m->top;
2587 msegmentptr oldsp = segment_holding(m, old_top);
2588 char* old_end = oldsp->base + oldsp->size;
2589 size_t ssize = pad_request(sizeof(struct malloc_segment));
2590 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2591 size_t offset = align_offset(chunk2mem(rawsp));
2592 char* asp = rawsp + offset;
2593 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2594 mchunkptr sp = (mchunkptr)csp;
2595 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2596 mchunkptr tnext = chunk_plus_offset(sp, ssize);
2597 mchunkptr p = tnext;
2600 /* reset top to new space */
2601 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2603 /* Set up segment record */
2604 assert(is_aligned(ss));
2605 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2606 *ss = m->seg; /* Push current record */
2607 m->seg.base = tbase;
2608 m->seg.size = tsize;
2609 m->seg.sflags = mmapped;
2612 /* Insert trailing fenceposts */
2614 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2615 p->head = FENCEPOST_HEAD;
2617 if ((char*)(&(nextp->head)) < old_end)
2622 assert(nfences >= 2);
2624 /* Insert the rest of old top into a bin as an ordinary free chunk */
2625 if (csp != old_top) {
2626 mchunkptr q = (mchunkptr)old_top;
2627 size_t psize = csp - old_top;
2628 mchunkptr tn = chunk_plus_offset(q, psize);
2629 set_free_with_pinuse(q, psize, tn);
2630 insert_chunk(m, q, psize);
2633 check_top_chunk(m, m->top);
2636 /* -------------------------- System allocation -------------------------- */
2638 /* Get memory from system using MORECORE or MMAP */
2639 __clib_nosanitize_addr
2640 static void* sys_alloc(mstate m, size_t nb) {
2641 char* tbase = CMFAIL;
2643 flag_t mmap_flag = 0;
2644 size_t asize; /* allocation size */
2646 ensure_initialization();
2648 if (use_noexpand(m))
2651 /* Directly map large chunks, but only if already initialized */
2652 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2653 void* mem = mmap_alloc(m, nb);
2658 asize = granularity_align(nb + SYS_ALLOC_PADDING);
2660 return 0; /* wraparound */
2661 if (m->footprint_limit != 0) {
2662 size_t fp = m->footprint + asize;
2663 if (fp <= m->footprint || fp > m->footprint_limit)
2668 Try getting memory in any of three ways (in most-preferred to
2669 least-preferred order):
2670 1. A call to MORECORE that can normally contiguously extend memory.
2671 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2672 or main space is mmapped or a previous contiguous call failed)
2673 2. A call to MMAP new space (disabled if not HAVE_MMAP).
2674 Note that under the default settings, if MORECORE is unable to
2675 fulfill a request, and HAVE_MMAP is true, then mmap is
2676 used as a noncontiguous system allocator. This is a useful backup
2677 strategy for systems with holes in address spaces -- in this case
2678 sbrk cannot contiguously expand the heap, but mmap may be able to
2680 3. A call to MORECORE that cannot usually contiguously extend memory.
2681 (disabled if not HAVE_MORECORE)
2683 In all cases, we need to request enough bytes from system to ensure
2684 we can malloc nb bytes upon success, so pad with enough space for
2685 top_foot, plus alignment-pad to make sure we don't lose bytes if
2686 not on boundary, and round this up to a granularity unit.
2689 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2691 size_t ssize = asize; /* sbrk call size */
2692 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2693 ACQUIRE_MALLOC_GLOBAL_LOCK();
2695 if (ss == 0) { /* First time through or recovery */
2696 char* base = (char*)CALL_MORECORE(0);
2697 if (base != CMFAIL) {
2699 /* Adjust to end on a page boundary */
2700 if (!is_page_aligned(base))
2701 ssize += (page_align((size_t)base) - (size_t)base);
2702 fp = m->footprint + ssize; /* recheck limits */
2703 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2704 (m->footprint_limit == 0 ||
2705 (fp > m->footprint && fp <= m->footprint_limit)) &&
2706 (br = (char*)(CALL_MORECORE(ssize))) == base) {
2713 /* Subtract out existing available top space from MORECORE request. */
2714 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2715 /* Use mem here only if it did continuously extend old space */
2716 if (ssize < HALF_MAX_SIZE_T &&
2717 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2723 if (tbase == CMFAIL) { /* Cope with partial failure */
2724 if (br != CMFAIL) { /* Try to use/extend the space we did get */
2725 if (ssize < HALF_MAX_SIZE_T &&
2726 ssize < nb + SYS_ALLOC_PADDING) {
2727 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2728 if (esize < HALF_MAX_SIZE_T) {
2729 char* end = (char*)CALL_MORECORE(esize);
2732 else { /* Can't use; try to release */
2733 (void) CALL_MORECORE(-ssize);
2739 if (br != CMFAIL) { /* Use the space we did get */
2744 disable_contiguous(m); /* Don't try contiguous path in the future */
2747 RELEASE_MALLOC_GLOBAL_LOCK();
2750 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2751 char* mp = (char*)(CALL_MMAP(asize));
2755 mmap_flag = USE_MMAP_BIT;
2759 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2760 if (asize < HALF_MAX_SIZE_T) {
2763 ACQUIRE_MALLOC_GLOBAL_LOCK();
2764 br = (char*)(CALL_MORECORE(asize));
2765 end = (char*)(CALL_MORECORE(0));
2766 RELEASE_MALLOC_GLOBAL_LOCK();
2767 if (br != CMFAIL && end != CMFAIL && br < end) {
2768 size_t ssize = end - br;
2769 if (ssize > nb + TOP_FOOT_SIZE) {
2777 if (tbase != CMFAIL) {
2779 if ((m->footprint += tsize) > m->max_footprint)
2780 m->max_footprint = m->footprint;
2782 if (!is_initialized(m)) { /* first-time initialization */
2783 if (m->least_addr == 0 || tbase < m->least_addr)
2784 m->least_addr = tbase;
2785 m->seg.base = tbase;
2786 m->seg.size = tsize;
2787 m->seg.sflags = mmap_flag;
2788 m->magic = mparams.magic;
2789 m->release_checks = MAX_RELEASE_CHECK_RATE;
2793 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2797 /* Offset top by embedded malloc_state */
2798 mchunkptr mn = next_chunk(mem2chunk(m));
2799 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2804 /* Try to merge with an existing segment */
2805 msegmentptr sp = &m->seg;
2806 /* Only consider most recent segment if traversal suppressed */
2807 while (sp != 0 && tbase != sp->base + sp->size)
2808 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2810 !is_extern_segment(sp) &&
2811 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2812 segment_holds(sp, m->top)) { /* append */
2814 init_top(m, m->top, m->topsize + tsize);
2817 if (tbase < m->least_addr)
2818 m->least_addr = tbase;
2820 while (sp != 0 && sp->base != tbase + tsize)
2821 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2823 !is_extern_segment(sp) &&
2824 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2825 char* oldbase = sp->base;
2828 return prepend_alloc(m, tbase, oldbase, nb);
2831 add_segment(m, tbase, tsize, mmap_flag);
2835 if (nb < m->topsize) { /* Allocate from new or extended top space */
2836 size_t rsize = m->topsize -= nb;
2837 mchunkptr p = m->top;
2838 mchunkptr r = m->top = chunk_plus_offset(p, nb);
2839 r->head = rsize | PINUSE_BIT;
2840 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2841 check_top_chunk(m, m->top);
2842 check_malloced_chunk(m, chunk2mem(p), nb);
2843 return chunk2mem(p);
2847 MALLOC_FAILURE_ACTION;
2851 /* ----------------------- system deallocation -------------------------- */
2853 /* Unmap and unlink any mmapped segments that don't contain used chunks */
2854 __clib_nosanitize_addr
2855 static size_t release_unused_segments(mstate m) {
2856 size_t released = 0;
2858 msegmentptr pred = &m->seg;
2859 msegmentptr sp = pred->next;
2861 char* base = sp->base;
2862 size_t size = sp->size;
2863 msegmentptr next = sp->next;
2865 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2866 mchunkptr p = align_as_chunk(base);
2867 size_t psize = chunksize(p);
2868 /* Can unmap if first chunk holds entire segment and not pinned */
2869 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2870 tchunkptr tp = (tchunkptr)p;
2871 assert(segment_holds(sp, (char*)sp));
2877 unlink_large_chunk(m, tp);
2879 if (CALL_MUNMAP(base, size) == 0) {
2881 m->footprint -= size;
2882 /* unlink obsoleted record */
2886 else { /* back out if cannot unmap */
2887 insert_large_chunk(m, tp, psize);
2891 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2896 /* Reset check counter */
2897 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2898 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2902 __clib_nosanitize_addr
2903 static int sys_trim(mstate m, size_t pad) {
2904 size_t released = 0;
2905 ensure_initialization();
2906 if (pad < MAX_REQUEST && is_initialized(m)) {
2907 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2909 if (m->topsize > pad) {
2910 /* Shrink top space in granularity-size units, keeping at least one */
2911 size_t unit = mparams.granularity;
2912 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2914 msegmentptr sp = segment_holding(m, (char*)m->top);
2916 if (!is_extern_segment(sp)) {
2917 if (is_mmapped_segment(sp)) {
2919 sp->size >= extra &&
2920 !has_segment_link(m, sp)) { /* can't shrink if pinned */
2921 size_t newsize = sp->size - extra;
2922 (void)newsize; /* placate people compiling -Wunused-variable */
2923 /* Prefer mremap, fall back to munmap */
2924 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2925 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2930 else if (HAVE_MORECORE) {
2931 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2932 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2933 ACQUIRE_MALLOC_GLOBAL_LOCK();
2935 /* Make sure end of memory is where we last set it. */
2936 char* old_br = (char*)(CALL_MORECORE(0));
2937 if (old_br == sp->base + sp->size) {
2938 char* rel_br = (char*)(CALL_MORECORE(-extra));
2939 char* new_br = (char*)(CALL_MORECORE(0));
2940 if (rel_br != CMFAIL && new_br < old_br)
2941 released = old_br - new_br;
2944 RELEASE_MALLOC_GLOBAL_LOCK();
2948 if (released != 0) {
2949 sp->size -= released;
2950 m->footprint -= released;
2951 init_top(m, m->top, m->topsize - released);
2952 check_top_chunk(m, m->top);
2956 /* Unmap any unused mmapped segments */
2958 released += release_unused_segments(m);
2960 /* On failure, disable autotrim to avoid repeated failed future calls */
2961 if (released == 0 && m->topsize > m->trim_check)
2962 m->trim_check = MAX_SIZE_T;
2965 return (released != 0)? 1 : 0;
2968 /* Consolidate and bin a chunk. Differs from exported versions
2969 of free mainly in that the chunk need not be marked as inuse.
2971 __clib_nosanitize_addr
2972 static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2973 mchunkptr next = chunk_plus_offset(p, psize);
2976 size_t prevsize = p->prev_foot;
2977 if (is_mmapped(p)) {
2978 psize += prevsize + MMAP_FOOT_PAD;
2979 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2980 m->footprint -= psize;
2983 prev = chunk_minus_offset(p, prevsize);
2986 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2988 unlink_chunk(m, p, prevsize);
2990 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2992 set_free_with_pinuse(p, psize, next);
2997 CORRUPTION_ERROR_ACTION(m);
3001 if (RTCHECK(ok_address(m, next))) {
3002 if (!cinuse(next)) { /* consolidate forward */
3003 if (next == m->top) {
3004 size_t tsize = m->topsize += psize;
3006 p->head = tsize | PINUSE_BIT;
3013 else if (next == m->dv) {
3014 size_t dsize = m->dvsize += psize;
3016 set_size_and_pinuse_of_free_chunk(p, dsize);
3020 size_t nsize = chunksize(next);
3022 unlink_chunk(m, next, nsize);
3023 set_size_and_pinuse_of_free_chunk(p, psize);
3031 set_free_with_pinuse(p, psize, next);
3033 insert_chunk(m, p, psize);
3036 CORRUPTION_ERROR_ACTION(m);
3040 /* ---------------------------- malloc --------------------------- */
3042 /* allocate a large request from the best fitting chunk in a treebin */
3043 __clib_nosanitize_addr
3044 static void* tmalloc_large(mstate m, size_t nb) {
3046 size_t rsize = -nb; /* Unsigned negation */
3049 compute_tree_index(nb, idx);
3050 if ((t = *treebin_at(m, idx)) != 0) {
3051 /* Traverse tree for this bin looking for node with size == nb */
3052 size_t sizebits = nb << leftshift_for_tree_index(idx);
3053 tchunkptr rst = 0; /* The deepest untaken right subtree */
3056 size_t trem = chunksize(t) - nb;
3059 if ((rsize = trem) == 0)
3063 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3064 if (rt != 0 && rt != t)
3067 t = rst; /* set t to least subtree holding sizes > nb */
3073 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3074 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3075 if (leftbits != 0) {
3077 binmap_t leastbit = least_bit(leftbits);
3078 compute_bit2idx(leastbit, i);
3079 t = *treebin_at(m, i);
3083 while (t != 0) { /* find smallest of tree or subtree */
3084 size_t trem = chunksize(t) - nb;
3089 t = leftmost_child(t);
3092 /* If dv is a better fit, return 0 so malloc will use it */
3093 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3094 if (RTCHECK(ok_address(m, v))) { /* split */
3095 mchunkptr r = chunk_plus_offset(v, nb);
3096 assert(chunksize(v) == rsize + nb);
3097 if (RTCHECK(ok_next(v, r))) {
3098 unlink_large_chunk(m, v);
3099 if (rsize < MIN_CHUNK_SIZE)
3100 set_inuse_and_pinuse(m, v, (rsize + nb));
3102 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3103 set_size_and_pinuse_of_free_chunk(r, rsize);
3104 insert_chunk(m, r, rsize);
3106 return chunk2mem(v);
3109 CORRUPTION_ERROR_ACTION(m);
3114 /* allocate a small request from the best fitting chunk in a treebin */
3115 __clib_nosanitize_addr
3116 static void* tmalloc_small(mstate m, size_t nb) {
3120 binmap_t leastbit = least_bit(m->treemap);
3121 compute_bit2idx(leastbit, i);
3122 v = t = *treebin_at(m, i);
3123 rsize = chunksize(t) - nb;
3125 while ((t = leftmost_child(t)) != 0) {
3126 size_t trem = chunksize(t) - nb;
3133 if (RTCHECK(ok_address(m, v))) {
3134 mchunkptr r = chunk_plus_offset(v, nb);
3135 assert(chunksize(v) == rsize + nb);
3136 if (RTCHECK(ok_next(v, r))) {
3137 unlink_large_chunk(m, v);
3138 if (rsize < MIN_CHUNK_SIZE)
3139 set_inuse_and_pinuse(m, v, (rsize + nb));
3141 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3142 set_size_and_pinuse_of_free_chunk(r, rsize);
3143 replace_dv(m, r, rsize);
3145 return chunk2mem(v);
3149 CORRUPTION_ERROR_ACTION(m);
3155 void* dlmalloc(size_t bytes) {
3158 If a small request (< 256 bytes minus per-chunk overhead):
3159 1. If one exists, use a remainderless chunk in associated smallbin.
3160 (Remainderless means that there are too few excess bytes to
3161 represent as a chunk.)
3162 2. If it is big enough, use the dv chunk, which is normally the
3163 chunk adjacent to the one used for the most recent small request.
3164 3. If one exists, split the smallest available chunk in a bin,
3165 saving remainder in dv.
3166 4. If it is big enough, use the top chunk.
3167 5. If available, get memory from system and use it
3168 Otherwise, for a large request:
3169 1. Find the smallest available binned chunk that fits, and use it
3170 if it is better fitting than dv chunk, splitting if necessary.
3171 2. If better fitting than any binned chunk, use the dv chunk.
3172 3. If it is big enough, use the top chunk.
3173 4. If request size >= mmap threshold, try to directly mmap this chunk.
3174 5. If available, get memory from system and use it
3176 The ugly goto's here ensure that postaction occurs along all paths.
3180 ensure_initialization(); /* initialize in sys_alloc if not using locks */
3183 if (!PREACTION(gm)) {
3186 if (bytes <= MAX_SMALL_REQUEST) {
3189 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3190 idx = small_index(nb);
3191 smallbits = gm->smallmap >> idx;
3193 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3195 idx += ~smallbits & 1; /* Uses next bin if idx empty */
3196 b = smallbin_at(gm, idx);
3198 assert(chunksize(p) == small_index2size(idx));
3199 unlink_first_small_chunk(gm, b, p, idx);
3200 set_inuse_and_pinuse(gm, p, small_index2size(idx));
3202 check_malloced_chunk(gm, mem, nb);
3206 else if (nb > gm->dvsize) {
3207 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3211 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3212 binmap_t leastbit = least_bit(leftbits);
3213 compute_bit2idx(leastbit, i);
3214 b = smallbin_at(gm, i);
3216 assert(chunksize(p) == small_index2size(i));
3217 unlink_first_small_chunk(gm, b, p, i);
3218 rsize = small_index2size(i) - nb;
3219 /* Fit here cannot be remainderless if 4byte sizes */
3220 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3221 set_inuse_and_pinuse(gm, p, small_index2size(i));
3223 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3224 r = chunk_plus_offset(p, nb);
3225 set_size_and_pinuse_of_free_chunk(r, rsize);
3226 replace_dv(gm, r, rsize);
3229 check_malloced_chunk(gm, mem, nb);
3233 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3234 check_malloced_chunk(gm, mem, nb);
3239 else if (bytes >= MAX_REQUEST)
3240 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3242 nb = pad_request(bytes);
3243 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3244 check_malloced_chunk(gm, mem, nb);
3249 if (nb <= gm->dvsize) {
3250 size_t rsize = gm->dvsize - nb;
3251 mchunkptr p = gm->dv;
3252 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3253 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3255 set_size_and_pinuse_of_free_chunk(r, rsize);
3256 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3258 else { /* exhaust dv */
3259 size_t dvs = gm->dvsize;
3262 set_inuse_and_pinuse(gm, p, dvs);
3265 check_malloced_chunk(gm, mem, nb);
3269 else if (nb < gm->topsize) { /* Split top */
3270 size_t rsize = gm->topsize -= nb;
3271 mchunkptr p = gm->top;
3272 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3273 r->head = rsize | PINUSE_BIT;
3274 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3276 check_top_chunk(gm, gm->top);
3277 check_malloced_chunk(gm, mem, nb);
3281 mem = sys_alloc(gm, nb);
3291 /* ---------------------------- free --------------------------- */
3293 void dlfree(void* mem) {
3295 Consolidate freed chunks with preceding or succeeding bordering
3296 free chunks, if they exist, and then place in a bin. Intermixed
3297 with special cases for top, dv, mmapped chunks, and usage errors.
3301 mchunkptr p = mem2chunk(mem);
3303 mstate fm = get_mstate_for(p);
3304 if (!ok_magic(fm)) {
3305 USAGE_ERROR_ACTION(fm, p);
3310 #endif /* FOOTERS */
3311 if (!PREACTION(fm)) {
3312 check_inuse_chunk(fm, p);
3313 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3314 size_t psize = chunksize(p);
3315 mchunkptr next = chunk_plus_offset(p, psize);
3317 size_t prevsize = p->prev_foot;
3318 if (is_mmapped(p)) {
3319 psize += prevsize + MMAP_FOOT_PAD;
3320 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3321 fm->footprint -= psize;
3325 mchunkptr prev = chunk_minus_offset(p, prevsize);
3328 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3330 unlink_chunk(fm, p, prevsize);
3332 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3334 set_free_with_pinuse(p, psize, next);
3343 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3344 if (!cinuse(next)) { /* consolidate forward */
3345 if (next == fm->top) {
3346 size_t tsize = fm->topsize += psize;
3348 p->head = tsize | PINUSE_BIT;
3353 if (should_trim(fm, tsize))
3357 else if (next == fm->dv) {
3358 size_t dsize = fm->dvsize += psize;
3360 set_size_and_pinuse_of_free_chunk(p, dsize);
3364 size_t nsize = chunksize(next);
3366 unlink_chunk(fm, next, nsize);
3367 set_size_and_pinuse_of_free_chunk(p, psize);
3375 set_free_with_pinuse(p, psize, next);
3377 if (is_small(psize)) {
3378 insert_small_chunk(fm, p, psize);
3379 check_free_chunk(fm, p);
3382 tchunkptr tp = (tchunkptr)p;
3383 insert_large_chunk(fm, tp, psize);
3384 check_free_chunk(fm, p);
3385 if (--fm->release_checks == 0)
3386 release_unused_segments(fm);
3392 USAGE_ERROR_ACTION(fm, p);
3399 #endif /* FOOTERS */
3402 void* dlcalloc(size_t n_elements, size_t elem_size) {
3405 if (n_elements != 0) {
3406 req = n_elements * elem_size;
3407 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3408 (req / n_elements != elem_size))
3409 req = MAX_SIZE_T; /* force downstream failure on overflow */
3411 mem = dlmalloc(req);
3412 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3413 memset(mem, 0, req);
3417 #endif /* !ONLY_MSPACES */
3419 /* ------------ Internal support for realloc, memalign, etc -------------- */
3421 /* Try to realloc; only in-place unless can_move true */
3422 static __clib_nosanitize_addr mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3425 size_t oldsize = chunksize(p);
3426 mchunkptr next = chunk_plus_offset(p, oldsize);
3427 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3428 ok_next(p, next) && ok_pinuse(next))) {
3429 if (is_mmapped(p)) {
3430 newp = mmap_resize(m, p, nb, can_move);
3432 else if (oldsize >= nb) { /* already big enough */
3433 size_t rsize = oldsize - nb;
3434 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3435 mchunkptr r = chunk_plus_offset(p, nb);
3436 set_inuse(m, p, nb);
3437 set_inuse(m, r, rsize);
3438 dispose_chunk(m, r, rsize);
3442 else if (next == m->top) { /* extend into top */
3443 if (oldsize + m->topsize > nb) {
3444 size_t newsize = oldsize + m->topsize;
3445 size_t newtopsize = newsize - nb;
3446 mchunkptr newtop = chunk_plus_offset(p, nb);
3447 set_inuse(m, p, nb);
3448 newtop->head = newtopsize |PINUSE_BIT;
3450 m->topsize = newtopsize;
3454 else if (next == m->dv) { /* extend into dv */
3455 size_t dvs = m->dvsize;
3456 if (oldsize + dvs >= nb) {
3457 size_t dsize = oldsize + dvs - nb;
3458 if (dsize >= MIN_CHUNK_SIZE) {
3459 mchunkptr r = chunk_plus_offset(p, nb);
3460 mchunkptr n = chunk_plus_offset(r, dsize);
3461 set_inuse(m, p, nb);
3462 set_size_and_pinuse_of_free_chunk(r, dsize);
3467 else { /* exhaust dv */
3468 size_t newsize = oldsize + dvs;
3469 set_inuse(m, p, newsize);
3476 else if (!cinuse(next)) { /* extend into next free chunk */
3477 size_t nextsize = chunksize(next);
3478 if (oldsize + nextsize >= nb) {
3479 size_t rsize = oldsize + nextsize - nb;
3480 unlink_chunk(m, next, nextsize);
3481 if (rsize < MIN_CHUNK_SIZE) {
3482 size_t newsize = oldsize + nextsize;
3483 set_inuse(m, p, newsize);
3486 mchunkptr r = chunk_plus_offset(p, nb);
3487 set_inuse(m, p, nb);
3488 set_inuse(m, r, rsize);
3489 dispose_chunk(m, r, rsize);
3496 USAGE_ERROR_ACTION(m, chunk2mem(p));
3501 __clib_nosanitize_addr
3502 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3504 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3505 alignment = MIN_CHUNK_SIZE;
3506 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3507 size_t a = MALLOC_ALIGNMENT << 1;
3508 while (a < alignment) a <<= 1;
3511 if (bytes >= MAX_REQUEST - alignment) {
3512 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3513 MALLOC_FAILURE_ACTION;
3517 size_t nb = request2size(bytes);
3518 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3519 mem = internal_malloc(m, req);
3521 mchunkptr p = mem2chunk(mem);
3524 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3526 Find an aligned spot inside chunk. Since we need to give
3527 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3528 the first calculation places us at a spot with less than
3529 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3530 We've allocated enough total room so that this is always
3533 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3536 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3538 mchunkptr newp = (mchunkptr)pos;
3539 size_t leadsize = pos - (char*)(p);
3540 size_t newsize = chunksize(p) - leadsize;
3542 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3543 newp->prev_foot = p->prev_foot + leadsize;
3544 newp->head = newsize;
3546 else { /* Otherwise, give back leader, use the rest */
3547 set_inuse(m, newp, newsize);
3548 set_inuse(m, p, leadsize);
3549 dispose_chunk(m, p, leadsize);
3554 /* Give back spare room at the end */
3555 if (!is_mmapped(p)) {
3556 size_t size = chunksize(p);
3557 if (size > nb + MIN_CHUNK_SIZE) {
3558 size_t remainder_size = size - nb;
3559 mchunkptr remainder = chunk_plus_offset(p, nb);
3560 set_inuse(m, p, nb);
3561 set_inuse(m, remainder, remainder_size);
3562 dispose_chunk(m, remainder, remainder_size);
3567 assert (chunksize(p) >= nb);
3568 assert(((size_t)mem & (alignment - 1)) == 0);
3569 check_inuse_chunk(m, p);
3577 Common support for independent_X routines, handling
3578 all of the combinations that can result.
3580 bit 0 set if all elements are same size (using sizes[0])
3581 bit 1 set if elements should be zeroed
3583 static void** ialloc(mstate m,
3589 size_t element_size; /* chunksize of each element, if all same */
3590 size_t contents_size; /* total size of elements */
3591 size_t array_size; /* request size of pointer array */
3592 void* mem; /* malloced aggregate space */
3593 mchunkptr p; /* corresponding chunk */
3594 size_t remainder_size; /* remaining bytes while splitting */
3595 void** marray; /* either "chunks" or malloced ptr array */
3596 mchunkptr array_chunk; /* chunk for malloced ptr array */
3597 flag_t was_enabled; /* to disable mmap */
3601 ensure_initialization();
3602 /* compute array length, if needed */
3604 if (n_elements == 0)
3605 return chunks; /* nothing to do */
3610 /* if empty req, must still return chunk representing empty array */
3611 if (n_elements == 0)
3612 return (void**)internal_malloc(m, 0);
3614 array_size = request2size(n_elements * (sizeof(void*)));
3617 /* compute total element size */
3618 if (opts & 0x1) { /* all-same-size */
3619 element_size = request2size(*sizes);
3620 contents_size = n_elements * element_size;
3622 else { /* add up all the sizes */
3625 for (i = 0; i != n_elements; ++i)
3626 contents_size += request2size(sizes[i]);
3629 size = contents_size + array_size;
3632 Allocate the aggregate chunk. First disable direct-mmapping so
3633 malloc won't use it, since we would not be able to later
3634 free/realloc space internal to a segregated mmap region.
3636 was_enabled = use_mmap(m);
3638 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3644 if (PREACTION(m)) return 0;
3646 remainder_size = chunksize(p);
3648 assert(!is_mmapped(p));
3650 if (opts & 0x2) { /* optionally clear the elements */
3651 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3654 /* If not provided, allocate the pointer array as final part of chunk */
3656 size_t array_chunk_size;
3657 array_chunk = chunk_plus_offset(p, contents_size);
3658 array_chunk_size = remainder_size - contents_size;
3659 marray = (void**) (chunk2mem(array_chunk));
3660 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3661 remainder_size = contents_size;
3664 /* split out elements */
3665 for (i = 0; ; ++i) {
3666 marray[i] = chunk2mem(p);
3667 if (i != n_elements-1) {
3668 if (element_size != 0)
3669 size = element_size;
3671 size = request2size(sizes[i]);
3672 remainder_size -= size;
3673 set_size_and_pinuse_of_inuse_chunk(m, p, size);
3674 p = chunk_plus_offset(p, size);
3676 else { /* the final element absorbs any overallocation slop */
3677 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3683 if (marray != chunks) {
3684 /* final element must have exactly exhausted chunk */
3685 if (element_size != 0) {
3686 assert(remainder_size == element_size);
3689 assert(remainder_size == request2size(sizes[i]));
3691 check_inuse_chunk(m, mem2chunk(marray));
3693 for (i = 0; i != n_elements; ++i)
3694 check_inuse_chunk(m, mem2chunk(marray[i]));
3702 /* Try to free all pointers in the given array.
3703 Note: this could be made faster, by delaying consolidation,
3704 at the price of disabling some user integrity checks, We
3705 still optimize some consolidations by combining adjacent
3706 chunks before freeing, which will occur often if allocated
3707 with ialloc or the array is sorted.
3709 static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3711 if (!PREACTION(m)) {
3713 void** fence = &(array[nelem]);
3714 for (a = array; a != fence; ++a) {
3717 mchunkptr p = mem2chunk(mem);
3718 size_t psize = chunksize(p);
3720 if (get_mstate_for(p) != m) {
3725 check_inuse_chunk(m, p);
3727 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3728 void ** b = a + 1; /* try to merge with next chunk */
3729 mchunkptr next = next_chunk(p);
3730 if (b != fence && *b == chunk2mem(next)) {
3731 size_t newsize = chunksize(next) + psize;
3732 set_inuse(m, p, newsize);
3736 dispose_chunk(m, p, psize);
3739 CORRUPTION_ERROR_ACTION(m);
3744 if (should_trim(m, m->topsize))
3752 #if MALLOC_INSPECT_ALL
3753 static void internal_inspect_all(mstate m,
3754 void(*handler)(void *start,
3757 void* callback_arg),
3759 if (is_initialized(m)) {
3760 mchunkptr top = m->top;
3762 for (s = &m->seg; s != 0; s = s->next) {
3763 mchunkptr q = align_as_chunk(s->base);
3764 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3765 mchunkptr next = next_chunk(q);
3766 size_t sz = chunksize(q);
3770 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3771 start = chunk2mem(q);
3775 if (is_small(sz)) { /* offset by possible bookkeeping */
3776 start = (void*)((char*)q + sizeof(struct malloc_chunk));
3779 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3782 if (start < (void*)next) /* skip if all space is bookkeeping */
3783 handler(start, next, used, arg);
3791 #endif /* MALLOC_INSPECT_ALL */
3793 /* ------------------ Exported realloc, memalign, etc -------------------- */
3797 void* dlrealloc(void* oldmem, size_t bytes) {
3800 mem = dlmalloc(bytes);
3802 else if (bytes >= MAX_REQUEST) {
3803 MALLOC_FAILURE_ACTION;
3805 #ifdef REALLOC_ZERO_BYTES_FREES
3806 else if (bytes == 0) {
3809 #endif /* REALLOC_ZERO_BYTES_FREES */
3811 size_t nb = request2size(bytes);
3812 mchunkptr oldp = mem2chunk(oldmem);
3816 mstate m = get_mstate_for(oldp);
3818 USAGE_ERROR_ACTION(m, oldmem);
3821 #endif /* FOOTERS */
3822 if (!PREACTION(m)) {
3823 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3826 check_inuse_chunk(m, newp);
3827 mem = chunk2mem(newp);
3830 mem = internal_malloc(m, bytes);
3832 size_t oc = chunksize(oldp) - overhead_for(oldp);
3833 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3834 internal_free(m, oldmem);
3842 void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3845 if (bytes >= MAX_REQUEST) {
3846 MALLOC_FAILURE_ACTION;
3849 size_t nb = request2size(bytes);
3850 mchunkptr oldp = mem2chunk(oldmem);
3854 mstate m = get_mstate_for(oldp);
3856 USAGE_ERROR_ACTION(m, oldmem);
3859 #endif /* FOOTERS */
3860 if (!PREACTION(m)) {
3861 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3864 check_inuse_chunk(m, newp);
3873 void* dlmemalign(size_t alignment, size_t bytes) {
3874 if (alignment <= MALLOC_ALIGNMENT) {
3875 return dlmalloc(bytes);
3877 return internal_memalign(gm, alignment, bytes);
3880 int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3882 if (alignment == MALLOC_ALIGNMENT)
3883 mem = dlmalloc(bytes);
3885 size_t d = alignment / sizeof(void*);
3886 size_t r = alignment % sizeof(void*);
3887 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3889 else if (bytes <= MAX_REQUEST - alignment) {
3890 if (alignment < MIN_CHUNK_SIZE)
3891 alignment = MIN_CHUNK_SIZE;
3892 mem = internal_memalign(gm, alignment, bytes);
3903 void* dlvalloc(size_t bytes) {
3905 ensure_initialization();
3906 pagesz = mparams.page_size;
3907 return dlmemalign(pagesz, bytes);
3910 void* dlpvalloc(size_t bytes) {
3912 ensure_initialization();
3913 pagesz = mparams.page_size;
3914 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3917 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3919 size_t sz = elem_size; /* serves as 1-element array */
3920 return ialloc(gm, n_elements, &sz, 3, chunks);
3923 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3925 return ialloc(gm, n_elements, sizes, 0, chunks);
3928 size_t dlbulk_free(void* array[], size_t nelem) {
3929 return internal_bulk_free(gm, array, nelem);
3932 #if MALLOC_INSPECT_ALL
3933 void dlmalloc_inspect_all(void(*handler)(void *start,
3936 void* callback_arg),
3938 ensure_initialization();
3939 if (!PREACTION(gm)) {
3940 internal_inspect_all(gm, handler, arg);
3944 #endif /* MALLOC_INSPECT_ALL */
3946 int dlmalloc_trim(size_t pad) {
3948 ensure_initialization();
3949 if (!PREACTION(gm)) {
3950 result = sys_trim(gm, pad);
3956 size_t dlmalloc_footprint(void) {
3957 return gm->footprint;
3960 size_t dlmalloc_max_footprint(void) {
3961 return gm->max_footprint;
3964 size_t dlmalloc_footprint_limit(void) {
3965 size_t maf = gm->footprint_limit;
3966 return maf == 0 ? MAX_SIZE_T : maf;
3969 size_t dlmalloc_set_footprint_limit(size_t bytes) {
3970 size_t result; /* invert sense of 0 */
3972 result = granularity_align(1); /* Use minimal size */
3973 if (bytes == MAX_SIZE_T)
3974 result = 0; /* disable */
3976 result = granularity_align(bytes);
3977 return gm->footprint_limit = result;
3981 struct dlmallinfo dlmallinfo(void) {
3982 return internal_mallinfo(gm);
3984 #endif /* NO_MALLINFO */
3986 #if !NO_MALLOC_STATS
3987 void dlmalloc_stats() {
3988 internal_malloc_stats(gm);
3990 #endif /* NO_MALLOC_STATS */
3992 int dlmallopt(int param_number, int value) {
3993 return change_mparam(param_number, value);
3996 size_t dlmalloc_usable_size(void* mem) {
3998 mchunkptr p = mem2chunk(mem);
4000 return chunksize(p) - overhead_for(p);
4005 #endif /* !ONLY_MSPACES */
4007 /* ----------------------------- user mspaces ---------------------------- */
4011 static mstate init_user_mstate(char* tbase, size_t tsize) {
4012 size_t msize = pad_request(sizeof(struct malloc_state));
4014 mchunkptr msp = align_as_chunk(tbase);
4015 mstate m = (mstate)(chunk2mem(msp));
4016 memset(m, 0, msize);
4017 (void)INITIAL_LOCK(&m->mutex);
4018 msp->head = (msize|INUSE_BITS);
4019 m->seg.base = m->least_addr = tbase;
4020 m->seg.size = m->footprint = m->max_footprint = tsize;
4021 m->magic = mparams.magic;
4022 m->release_checks = MAX_RELEASE_CHECK_RATE;
4023 m->mflags = mparams.default_mflags;
4026 disable_contiguous(m);
4028 mn = next_chunk(mem2chunk(m));
4029 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4030 check_top_chunk(m, m->top);
4034 mspace create_mspace(size_t capacity, int locked) {
4037 ensure_initialization();
4038 msize = pad_request(sizeof(struct malloc_state));
4039 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4040 size_t rs = ((capacity == 0)? mparams.granularity :
4041 (capacity + TOP_FOOT_SIZE + msize));
4042 size_t tsize = granularity_align(rs);
4043 char* tbase = (char*)(CALL_MMAP(tsize));
4044 if (tbase != CMFAIL) {
4045 m = init_user_mstate(tbase, tsize);
4046 m->seg.sflags = USE_MMAP_BIT;
4047 set_lock(m, locked);
4053 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4056 ensure_initialization();
4057 msize = pad_request(sizeof(struct malloc_state));
4058 if (capacity > msize + TOP_FOOT_SIZE &&
4059 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4060 m = init_user_mstate((char*)base, capacity);
4061 m->seg.sflags = EXTERN_BIT;
4062 set_lock(m, locked);
4067 int mspace_track_large_chunks(mspace msp, int enable) {
4069 mstate ms = (mstate)msp;
4070 if (!PREACTION(ms)) {
4071 if (!use_mmap(ms)) {
4084 __clib_nosanitize_addr
4085 size_t destroy_mspace(mspace msp) {
4087 mstate ms = (mstate)msp;
4089 msegmentptr sp = &ms->seg;
4090 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4092 char* base = sp->base;
4093 size_t size = sp->size;
4094 flag_t flag = sp->sflags;
4095 (void)base; /* placate people compiling -Wunused-variable */
4097 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4098 CALL_MUNMAP(base, size) == 0)
4103 USAGE_ERROR_ACTION(ms,ms);
4108 void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
4114 this_seg = &ms->seg;
4116 *addrp = this_seg->base;
4117 *sizep = this_seg->size;
4120 __clib_nosanitize_addr
4121 int mspace_is_heap_object (mspace msp, void *p)
4129 this_seg = &ms->seg;
4134 base = this_seg->base;
4135 if (pp >= base && pp < (base + this_seg->size))
4137 this_seg = this_seg->next;
4140 if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
4146 __clib_nosanitize_addr
4147 void *mspace_least_addr (mspace msp)
4149 mstate ms = (mstate) msp;
4150 return (void *) ms->least_addr;
4153 void mspace_disable_expand (mspace msp)
4155 mstate ms = (mstate)msp;
4157 disable_expand (ms);
4160 __clib_nosanitize_addr
4161 int mspace_enable_disable_trace (mspace msp, int enable)
4163 mstate ms = (mstate)msp;
4164 int was_enabled = 0;
4174 return (was_enabled);
4177 __clib_nosanitize_addr
4178 int mspace_is_traced (mspace msp)
4180 mstate ms = (mstate)msp;
4187 __clib_nosanitize_addr
4188 void* mspace_get_aligned (mspace msp,
4189 unsigned long n_user_data_bytes,
4190 unsigned long align,
4191 unsigned long align_offset) {
4193 unsigned long searchp;
4194 unsigned *wwp; /* "where's Waldo" pointer */
4195 mstate ms = (mstate)msp;
4198 * Allocate space for the "Where's Waldo?" pointer
4199 * the base of the dlmalloc object
4201 n_user_data_bytes += sizeof(unsigned);
4202 align = align < MALLOC_ALIGNMENT ? MALLOC_ALIGNMENT : align;
4205 * Alignment requests greater than 4K must be at offset zero,
4206 * and must be freed using mspace_free_no_offset - or never freed -
4207 * since the "Where's Waldo?" pointer would waste too much space.
4209 * Waldo is the address of the chunk of memory returned by mspace_malloc,
4210 * which we need later to call mspace_free...
4212 if (align > 4<<10 || align_offset == ~0UL) {
4213 n_user_data_bytes -= sizeof(unsigned);
4214 assert(align_offset == 0);
4215 rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
4217 /* Trace the allocation */
4218 if (rv && use_trace(ms)) {
4219 mchunkptr p = mem2chunk(rv);
4220 size_t psize = chunksize(p);
4221 mheap_get_trace ((unsigned long)rv, psize);
4226 align = clib_max (align, MALLOC_ALIGNMENT);
4227 align = max_pow2 (align);
4229 /* Correct align offset to be smaller than alignment. */
4230 align_offset &= (align - 1);
4232 n_user_data_bytes += align;
4233 rv = mspace_malloc (msp, n_user_data_bytes);
4238 /* Honor the alignment request */
4239 searchp = (unsigned long)(rv + sizeof (unsigned));
4241 #if 0 /* this is the idea... */
4242 while ((searchp + align_offset) % align)
4247 unsigned long where_now, delta;
4249 where_now = (searchp + align_offset) % align;
4250 delta = align - where_now;
4255 wwp = (unsigned *)(searchp - sizeof(unsigned));
4256 *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
4257 assert (*wwp < align);
4259 if (use_trace(ms)) {
4260 mchunkptr p = mem2chunk(rv);
4261 size_t psize = chunksize(p);
4262 mheap_get_trace (searchp, psize);
4264 return (void *) searchp;
4267 __clib_nosanitize_addr
4268 void mspace_put (mspace msp, void *p_arg)
4270 char *object_header;
4272 mstate ms = (mstate)msp;
4274 /* Find the object header delta */
4275 wwp = (unsigned *)p_arg;
4278 /* Recover the dlmalloc object pointer */
4279 object_header = (char *)wwp;
4280 object_header -= *wwp;
4282 /* Tracing (if enabled) */
4285 mchunkptr p = mem2chunk(object_header);
4286 size_t psize = chunksize(p);
4288 mheap_put_trace ((unsigned long)p_arg, psize);
4291 #if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
4292 /* Poison the object */
4294 size_t psize = mspace_usable_size (object_header);
4295 memset (object_header, 0x13, psize);
4299 /* And free it... */
4300 mspace_free (msp, object_header);
4303 void mspace_put_no_offset (mspace msp, void *p_arg)
4305 mstate ms = (mstate)msp;
4309 mchunkptr p = mem2chunk(p_arg);
4310 size_t psize = chunksize(p);
4312 mheap_put_trace ((unsigned long)p_arg, psize);
4314 mspace_free (msp, p_arg);
4317 __clib_nosanitize_addr
4318 size_t mspace_usable_size_with_delta (const void *p)
4321 char *object_header;
4324 /* Find the object header delta */
4325 wwp = (unsigned *)p;
4328 /* Recover the dlmalloc object pointer */
4329 object_header = (char *)wwp;
4330 object_header -= *wwp;
4332 usable_size = mspace_usable_size (object_header);
4333 /* account for the offset and the size of the offset... */
4334 usable_size -= (*wwp + sizeof (*wwp));
4339 mspace versions of routines are near-clones of the global
4340 versions. This is not so nice but better than the alternatives.
4343 __clib_nosanitize_addr
4344 void* mspace_malloc(mspace msp, size_t bytes) {
4345 mstate ms = (mstate)msp;
4346 if (!ok_magic(ms)) {
4347 USAGE_ERROR_ACTION(ms,ms);
4350 if (!PREACTION(ms)) {
4353 if (bytes <= MAX_SMALL_REQUEST) {
4356 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4357 idx = small_index(nb);
4358 smallbits = ms->smallmap >> idx;
4360 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4362 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4363 b = smallbin_at(ms, idx);
4365 assert(chunksize(p) == small_index2size(idx));
4366 unlink_first_small_chunk(ms, b, p, idx);
4367 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4369 check_malloced_chunk(ms, mem, nb);
4373 else if (nb > ms->dvsize) {
4374 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4378 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4379 binmap_t leastbit = least_bit(leftbits);
4380 compute_bit2idx(leastbit, i);
4381 b = smallbin_at(ms, i);
4383 assert(chunksize(p) == small_index2size(i));
4384 unlink_first_small_chunk(ms, b, p, i);
4385 rsize = small_index2size(i) - nb;
4386 /* Fit here cannot be remainderless if 4byte sizes */
4387 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4388 set_inuse_and_pinuse(ms, p, small_index2size(i));
4390 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4391 r = chunk_plus_offset(p, nb);
4392 set_size_and_pinuse_of_free_chunk(r, rsize);
4393 replace_dv(ms, r, rsize);
4396 check_malloced_chunk(ms, mem, nb);
4400 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4401 check_malloced_chunk(ms, mem, nb);
4406 else if (bytes >= MAX_REQUEST)
4407 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4409 nb = pad_request(bytes);
4410 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4411 check_malloced_chunk(ms, mem, nb);
4416 if (nb <= ms->dvsize) {
4417 size_t rsize = ms->dvsize - nb;
4418 mchunkptr p = ms->dv;
4419 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4420 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4422 set_size_and_pinuse_of_free_chunk(r, rsize);
4423 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4425 else { /* exhaust dv */
4426 size_t dvs = ms->dvsize;
4429 set_inuse_and_pinuse(ms, p, dvs);
4432 check_malloced_chunk(ms, mem, nb);
4436 else if (nb < ms->topsize) { /* Split top */
4437 size_t rsize = ms->topsize -= nb;
4438 mchunkptr p = ms->top;
4439 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4440 r->head = rsize | PINUSE_BIT;
4441 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4443 check_top_chunk(ms, ms->top);
4444 check_malloced_chunk(ms, mem, nb);
4448 mem = sys_alloc(ms, nb);
4458 __clib_nosanitize_addr
4459 void mspace_free(mspace msp, void* mem) {
4461 mchunkptr p = mem2chunk(mem);
4463 mstate fm = get_mstate_for(p);
4464 (void)msp; /* placate people compiling -Wunused */
4466 mstate fm = (mstate)msp;
4467 #endif /* FOOTERS */
4468 if (!ok_magic(fm)) {
4469 USAGE_ERROR_ACTION(fm, p);
4472 if (!PREACTION(fm)) {
4473 check_inuse_chunk(fm, p);
4474 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4475 size_t psize = chunksize(p);
4476 mchunkptr next = chunk_plus_offset(p, psize);
4478 size_t prevsize = p->prev_foot;
4479 if (is_mmapped(p)) {
4480 psize += prevsize + MMAP_FOOT_PAD;
4481 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4482 fm->footprint -= psize;
4486 mchunkptr prev = chunk_minus_offset(p, prevsize);
4489 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4491 unlink_chunk(fm, p, prevsize);
4493 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4495 set_free_with_pinuse(p, psize, next);
4504 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4505 if (!cinuse(next)) { /* consolidate forward */
4506 if (next == fm->top) {
4507 size_t tsize = fm->topsize += psize;
4509 p->head = tsize | PINUSE_BIT;
4514 if (should_trim(fm, tsize))
4518 else if (next == fm->dv) {
4519 size_t dsize = fm->dvsize += psize;
4521 set_size_and_pinuse_of_free_chunk(p, dsize);
4525 size_t nsize = chunksize(next);
4527 unlink_chunk(fm, next, nsize);
4528 set_size_and_pinuse_of_free_chunk(p, psize);
4536 set_free_with_pinuse(p, psize, next);
4538 if (is_small(psize)) {
4539 insert_small_chunk(fm, p, psize);
4540 check_free_chunk(fm, p);
4543 tchunkptr tp = (tchunkptr)p;
4544 insert_large_chunk(fm, tp, psize);
4545 check_free_chunk(fm, p);
4546 if (--fm->release_checks == 0)
4547 release_unused_segments(fm);
4553 USAGE_ERROR_ACTION(fm, p);
4560 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4563 mstate ms = (mstate)msp;
4564 if (!ok_magic(ms)) {
4565 USAGE_ERROR_ACTION(ms,ms);
4568 if (n_elements != 0) {
4569 req = n_elements * elem_size;
4570 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4571 (req / n_elements != elem_size))
4572 req = MAX_SIZE_T; /* force downstream failure on overflow */
4574 mem = internal_malloc(ms, req);
4575 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4576 memset(mem, 0, req);
4580 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4583 mem = mspace_malloc(msp, bytes);
4585 else if (bytes >= MAX_REQUEST) {
4586 MALLOC_FAILURE_ACTION;
4588 #ifdef REALLOC_ZERO_BYTES_FREES
4589 else if (bytes == 0) {
4590 mspace_free(msp, oldmem);
4592 #endif /* REALLOC_ZERO_BYTES_FREES */
4594 size_t nb = request2size(bytes);
4595 mchunkptr oldp = mem2chunk(oldmem);
4597 mstate m = (mstate)msp;
4599 mstate m = get_mstate_for(oldp);
4601 USAGE_ERROR_ACTION(m, oldmem);
4604 #endif /* FOOTERS */
4605 if (!PREACTION(m)) {
4606 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4609 check_inuse_chunk(m, newp);
4610 mem = chunk2mem(newp);
4613 mem = mspace_malloc(m, bytes);
4615 size_t oc = chunksize(oldp) - overhead_for(oldp);
4616 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4617 mspace_free(m, oldmem);
4625 __clib_nosanitize_addr
4626 void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4629 if (bytes >= MAX_REQUEST) {
4630 MALLOC_FAILURE_ACTION;
4633 size_t nb = request2size(bytes);
4634 mchunkptr oldp = mem2chunk(oldmem);
4636 mstate m = (mstate)msp;
4638 mstate m = get_mstate_for(oldp);
4639 (void)msp; /* placate people compiling -Wunused */
4641 USAGE_ERROR_ACTION(m, oldmem);
4644 #endif /* FOOTERS */
4645 if (!PREACTION(m)) {
4646 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4649 check_inuse_chunk(m, newp);
4658 __clib_nosanitize_addr
4659 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4660 mstate ms = (mstate)msp;
4661 if (!ok_magic(ms)) {
4662 USAGE_ERROR_ACTION(ms,ms);
4665 if (alignment <= MALLOC_ALIGNMENT)
4666 return mspace_malloc(msp, bytes);
4667 return internal_memalign(ms, alignment, bytes);
4670 void** mspace_independent_calloc(mspace msp, size_t n_elements,
4671 size_t elem_size, void* chunks[]) {
4672 size_t sz = elem_size; /* serves as 1-element array */
4673 mstate ms = (mstate)msp;
4674 if (!ok_magic(ms)) {
4675 USAGE_ERROR_ACTION(ms,ms);
4678 return ialloc(ms, n_elements, &sz, 3, chunks);
4681 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4682 size_t sizes[], void* chunks[]) {
4683 mstate ms = (mstate)msp;
4684 if (!ok_magic(ms)) {
4685 USAGE_ERROR_ACTION(ms,ms);
4688 return ialloc(ms, n_elements, sizes, 0, chunks);
4691 size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4692 return internal_bulk_free((mstate)msp, array, nelem);
4695 #if MALLOC_INSPECT_ALL
4696 void mspace_inspect_all(mspace msp,
4697 void(*handler)(void *start,
4700 void* callback_arg),
4702 mstate ms = (mstate)msp;
4704 if (!PREACTION(ms)) {
4705 internal_inspect_all(ms, handler, arg);
4710 USAGE_ERROR_ACTION(ms,ms);
4713 #endif /* MALLOC_INSPECT_ALL */
4715 int mspace_trim(mspace msp, size_t pad) {
4717 mstate ms = (mstate)msp;
4719 if (!PREACTION(ms)) {
4720 result = sys_trim(ms, pad);
4725 USAGE_ERROR_ACTION(ms,ms);
4730 #if !NO_MALLOC_STATS
4731 void mspace_malloc_stats(mspace msp) {
4732 mstate ms = (mstate)msp;
4734 internal_malloc_stats(ms);
4737 USAGE_ERROR_ACTION(ms,ms);
4740 #endif /* NO_MALLOC_STATS */
4742 size_t mspace_footprint(mspace msp) {
4744 mstate ms = (mstate)msp;
4746 result = ms->footprint;
4749 USAGE_ERROR_ACTION(ms,ms);
4754 size_t mspace_max_footprint(mspace msp) {
4756 mstate ms = (mstate)msp;
4758 result = ms->max_footprint;
4761 USAGE_ERROR_ACTION(ms,ms);
4766 size_t mspace_footprint_limit(mspace msp) {
4768 mstate ms = (mstate)msp;
4770 size_t maf = ms->footprint_limit;
4771 result = (maf == 0) ? MAX_SIZE_T : maf;
4774 USAGE_ERROR_ACTION(ms,ms);
4779 size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4781 mstate ms = (mstate)msp;
4784 result = granularity_align(1); /* Use minimal size */
4785 if (bytes == MAX_SIZE_T)
4786 result = 0; /* disable */
4788 result = granularity_align(bytes);
4789 ms->footprint_limit = result;
4792 USAGE_ERROR_ACTION(ms,ms);
4798 __clib_nosanitize_addr
4799 struct dlmallinfo mspace_mallinfo(mspace msp) {
4800 mstate ms = (mstate)msp;
4801 if (!ok_magic(ms)) {
4802 USAGE_ERROR_ACTION(ms,ms);
4804 return internal_mallinfo(ms);
4806 #endif /* NO_MALLINFO */
4808 __clib_nosanitize_addr
4809 size_t mspace_usable_size(const void* mem) {
4811 mchunkptr p = mem2chunk(mem);
4813 return chunksize(p) - overhead_for(p);
4818 int mspace_mallopt(int param_number, int value) {
4819 return change_mparam(param_number, value);
4822 #endif /* MSPACES */
4825 /* -------------------- Alternative MORECORE functions ------------------- */
4828 Guidelines for creating a custom version of MORECORE:
4830 * For best performance, MORECORE should allocate in multiples of pagesize.
4831 * MORECORE may allocate more memory than requested. (Or even less,
4832 but this will usually result in a malloc failure.)
4833 * MORECORE must not allocate memory when given argument zero, but
4834 instead return one past the end address of memory from previous
4836 * For best performance, consecutive calls to MORECORE with positive
4837 arguments should return increasing addresses, indicating that
4838 space has been contiguously extended.
4839 * Even though consecutive calls to MORECORE need not return contiguous
4840 addresses, it must be OK for malloc'ed chunks to span multiple
4841 regions in those cases where they do happen to be contiguous.
4842 * MORECORE need not handle negative arguments -- it may instead
4843 just return MFAIL when given negative arguments.
4844 Negative arguments are always multiples of pagesize. MORECORE
4845 must not misinterpret negative args as large positive unsigned
4846 args. You can suppress all such calls from even occurring by defining
4847 MORECORE_CANNOT_TRIM,
4849 As an example alternative MORECORE, here is a custom allocator
4850 kindly contributed for pre-OSX macOS. It uses virtually but not
4851 necessarily physically contiguous non-paged memory (locked in,
4852 present and won't get swapped out). You can use it by uncommenting
4853 this section, adding some #includes, and setting up the appropriate
4856 #define MORECORE osMoreCore
4858 There is also a shutdown routine that should somehow be called for
4859 cleanup upon program exit.
4861 #define MAX_POOL_ENTRIES 100
4862 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4863 static int next_os_pool;
4864 void *our_os_pools[MAX_POOL_ENTRIES];
4866 void *osMoreCore(int size)
4869 static void *sbrk_top = 0;
4873 if (size < MINIMUM_MORECORE_SIZE)
4874 size = MINIMUM_MORECORE_SIZE;
4875 if (CurrentExecutionLevel() == kTaskLevel)
4876 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4879 return (void *) MFAIL;
4881 // save ptrs so they can be freed during cleanup
4882 our_os_pools[next_os_pool] = ptr;
4884 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4885 sbrk_top = (char *) ptr + size;
4890 // we don't currently support shrink behavior
4891 return (void *) MFAIL;
4899 // cleanup any allocated memory pools
4900 // called as last thing before shutting down driver
4902 void osCleanupMem(void)
4906 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4909 PoolDeallocate(*ptr);
4917 /* -----------------------------------------------------------------------
4919 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4920 * fix bad comparison in dlposix_memalign
4921 * don't reuse adjusted asize in sys_alloc
4922 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4923 * reduce compiler warnings -- thanks to all who reported/suggested these
4925 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4926 * Always perform unlink checks unless INSECURE
4927 * Add posix_memalign.
4928 * Improve realloc to expand in more cases; expose realloc_in_place.
4929 Thanks to Peter Buhr for the suggestion.
4930 * Add footprint_limit, inspect_all, bulk_free. Thanks
4931 to Barry Hayes and others for the suggestions.
4932 * Internal refactorings to avoid calls while holding locks
4933 * Use non-reentrant locks by default. Thanks to Roland McGrath
4935 * Small fixes to mspace_destroy, reset_on_error.
4936 * Various configuration extensions/changes. Thanks
4937 to all who contributed these.
4939 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4940 * Update Creative Commons URL
4942 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4943 * Use zeros instead of prev foot for is_mmapped
4944 * Add mspace_track_large_chunks; thanks to Jean Brouwers
4945 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4946 * Fix insufficient sys_alloc padding when using 16byte alignment
4947 * Fix bad error check in mspace_footprint
4948 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4949 * Reentrant spin locks; thanks to Earl Chew and others
4950 * Win32 improvements; thanks to Niall Douglas and Earl Chew
4951 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4952 * Extension hook in malloc_state
4953 * Various small adjustments to reduce warnings on some compilers
4954 * Various configuration extensions/changes for more platforms. Thanks
4955 to all who contributed these.
4957 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4958 * Add max_footprint functions
4959 * Ensure all appropriate literals are size_t
4960 * Fix conditional compilation problem for some #define settings
4961 * Avoid concatenating segments with the one provided
4962 in create_mspace_with_base
4963 * Rename some variables to avoid compiler shadowing warnings
4964 * Use explicit lock initialization.
4965 * Better handling of sbrk interference.
4966 * Simplify and fix segment insertion, trimming and mspace_destroy
4967 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4968 * Thanks especially to Dennis Flanagan for help on these.
4970 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4971 * Fix memalign brace error.
4973 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4974 * Fix improper #endif nesting in C++
4975 * Add explicit casts needed for C++
4977 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4978 * Use trees for large bins
4980 * Use segments to unify sbrk-based and mmap-based system allocation,
4981 removing need for emulation on most platforms without sbrk.
4982 * Default safety checks
4983 * Optional footer checks. Thanks to William Robertson for the idea.
4984 * Internal code refactoring
4985 * Incorporate suggestions and platform-specific changes.
4986 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4987 Aaron Bachmann, Emery Berger, and others.
4988 * Speed up non-fastbin processing enough to remove fastbins.
4989 * Remove useless cfree() to avoid conflicts with other apps.
4990 * Remove internal memcpy, memset. Compilers handle builtins better.
4991 * Remove some options that no one ever used and rename others.
4993 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4994 * Fix malloc_state bitmap array misdeclaration
4996 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4997 * Allow tuning of FIRST_SORTED_BIN_SIZE
4998 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4999 * Better detection and support for non-contiguousness of MORECORE.
5000 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5001 * Bypass most of malloc if no frees. Thanks To Emery Berger.
5002 * Fix freeing of old top non-contiguous chunk im sysmalloc.
5003 * Raised default trim and map thresholds to 256K.
5004 * Fix mmap-related #defines. Thanks to Lubos Lunak.
5005 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5006 * Branch-free bin calculation
5007 * Default trim and mmap thresholds now 256K.
5009 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5010 * Introduce independent_comalloc and independent_calloc.
5011 Thanks to Michael Pachos for motivation and help.
5012 * Make optional .h file available
5013 * Allow > 2GB requests on 32bit systems.
5014 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5015 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5017 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5019 * memalign: check alignment arg
5020 * realloc: don't try to shift chunks backwards, since this
5021 leads to more fragmentation in some programs and doesn't
5022 seem to help in any others.
5023 * Collect all cases in malloc requiring system memory into sysmalloc
5024 * Use mmap as backup to sbrk
5025 * Place all internal state in malloc_state
5026 * Introduce fastbins (although similar to 2.5.1)
5027 * Many minor tunings and cosmetic improvements
5028 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5029 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5030 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5031 * Include errno.h to support default failure action.
5033 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5034 * return null for negative arguments
5035 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5036 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5037 (e.g. WIN32 platforms)
5038 * Cleanup header file inclusion for WIN32 platforms
5039 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5040 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5041 memory allocation routines
5042 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5043 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5044 usage of 'assert' in non-WIN32 code
5045 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5047 * Always call 'fREe()' rather than 'free()'
5049 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5050 * Fixed ordering problem with boundary-stamping
5052 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5053 * Added pvalloc, as recommended by H.J. Liu
5054 * Added 64bit pointer support mainly from Wolfram Gloger
5055 * Added anonymously donated WIN32 sbrk emulation
5056 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5057 * malloc_extend_top: fix mask error that caused wastage after
5059 * Add linux mremap support code from HJ Liu
5061 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5062 * Integrated most documentation with the code.
5063 * Add support for mmap, with help from
5064 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5065 * Use last_remainder in more cases.
5066 * Pack bins using idea from colin@nyx10.cs.du.edu
5067 * Use ordered bins instead of best-fit threshold
5068 * Eliminate block-local decls to simplify tracing and debugging.
5069 * Support another case of realloc via move into top
5070 * Fix error occurring when initial sbrk_base not word-aligned.
5071 * Rely on page size for units instead of SBRK_UNIT to
5072 avoid surprises about sbrk alignment conventions.
5073 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5074 (raymond@es.ele.tue.nl) for the suggestion.
5075 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5076 * More precautions for cases where other routines call sbrk,
5077 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5078 * Added macros etc., allowing use in linux libc from
5079 H.J. Lu (hjl@gnu.ai.mit.edu)
5080 * Inverted this history list
5082 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5083 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5084 * Removed all preallocation code since under current scheme
5085 the work required to undo bad preallocations exceeds
5086 the work saved in good cases for most test programs.
5087 * No longer use return list or unconsolidated bins since
5088 no scheme using them consistently outperforms those that don't
5089 given above changes.
5090 * Use best fit for very large chunks to prevent some worst-cases.
5091 * Added some support for debugging
5093 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5094 * Removed footers when chunks are in use. Thanks to
5095 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5097 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5098 * Added malloc_trim, with help from Wolfram Gloger
5099 (wmglo@Dent.MED.Uni-Muenchen.DE).
5101 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5103 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5104 * realloc: try to expand in both directions
5105 * malloc: swap order of clean-bin strategy;
5106 * realloc: only conditionally expand backwards
5107 * Try not to scavenge used bins
5108 * Use bin counts as a guide to preallocation
5109 * Occasionally bin return list chunks in first scan
5110 * Add a few optimizations from colin@nyx10.cs.du.edu
5112 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5113 * faster bin computation & slightly different binning
5114 * merged all consolidations to one part of malloc proper
5115 (eliminating old malloc_find_space & malloc_clean_bin)
5116 * Scan 2 returns chunks (not just 1)
5117 * Propagate failure in realloc if malloc returns 0
5118 * Add stuff to allow compilation on non-ANSI compilers
5119 from kpv@research.att.com
5121 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5122 * removed potential for odd address access in prev_chunk
5123 * removed dependency on getpagesize.h
5124 * misc cosmetics and a bit more internal documentation
5125 * anticosmetics: mangled names in macros to evade debugger strangeness
5126 * tested on sparc, hp-700, dec-mips, rs6000
5127 with gcc & native cc (hp, dec only) allowing
5128 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5130 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5131 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5132 structure of old version, but most details differ.)