2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain, as explained at
4 http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5 comments, complaints, performance data, etc to dl@cs.oswego.edu
8 #include <vppinfra/dlmalloc.h>
10 /*------------------------------ internal #includes ---------------------- */
13 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
16 #include <stdio.h> /* for printing in malloc_stats */
17 #endif /* NO_MALLOC_STATS */
19 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
20 #endif /* LACKS_ERRNO_H */
22 #if DLM_ABORT_ON_ASSERT_FAILURE
24 #define assert(x) if(!(x)) DLM_ABORT
25 #else /* DLM_ABORT_ON_ASSERT_FAILURE */
27 #endif /* DLM_ABORT_ON_ASSERT_FAILURE */
34 #if !defined(WIN32) && !defined(LACKS_TIME_H)
35 #include <time.h> /* for magic initialization */
37 #ifndef LACKS_STDLIB_H
38 #include <stdlib.h> /* for abort() */
39 #endif /* LACKS_STDLIB_H */
40 #ifndef LACKS_STRING_H
41 #include <string.h> /* for memset etc */
42 #endif /* LACKS_STRING_H */
44 #ifndef LACKS_STRINGS_H
45 #include <strings.h> /* for ffs */
46 #endif /* LACKS_STRINGS_H */
47 #endif /* USE_BUILTIN_FFS */
49 #ifndef LACKS_SYS_MMAN_H
50 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
51 #if (defined(linux) && !defined(__USE_GNU))
53 #include <sys/mman.h> /* for mmap */
56 #include <sys/mman.h> /* for mmap */
58 #endif /* LACKS_SYS_MMAN_H */
61 #endif /* LACKS_FCNTL_H */
62 #endif /* HAVE_MMAP */
63 #ifndef LACKS_UNISTD_H
64 #include <unistd.h> /* for sbrk, sysconf */
65 #else /* LACKS_UNISTD_H */
66 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
67 extern void* sbrk(ptrdiff_t);
68 #endif /* FreeBSD etc */
69 #endif /* LACKS_UNISTD_H */
71 /* Declarations for locking */
74 #if defined (__SVR4) && defined (__sun) /* solaris */
76 #elif !defined(LACKS_SCHED_H)
78 #endif /* solaris or LACKS_SCHED_H */
79 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
81 #endif /* USE_RECURSIVE_LOCKS ... */
82 #elif defined(_MSC_VER)
84 /* These are already defined on AMD64 builds */
87 #endif /* __cplusplus */
88 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
89 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
92 #endif /* __cplusplus */
94 #pragma intrinsic (_InterlockedCompareExchange)
95 #pragma intrinsic (_InterlockedExchange)
96 #define interlockedcompareexchange _InterlockedCompareExchange
97 #define interlockedexchange _InterlockedExchange
98 #elif defined(WIN32) && defined(__GNUC__)
99 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
100 #define interlockedexchange __sync_lock_test_and_set
102 #else /* USE_LOCKS */
103 #endif /* USE_LOCKS */
106 #define LOCK_AT_FORK 0
109 /* Declarations for bit scanning on win32 */
110 #if defined(_MSC_VER) && _MSC_VER>=1300
111 #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
114 #endif /* __cplusplus */
115 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
116 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
119 #endif /* __cplusplus */
121 #define BitScanForward _BitScanForward
122 #define BitScanReverse _BitScanReverse
123 #pragma intrinsic(_BitScanForward)
124 #pragma intrinsic(_BitScanReverse)
125 #endif /* BitScanForward */
126 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
129 #ifndef malloc_getpagesize
130 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
131 # ifndef _SC_PAGE_SIZE
132 # define _SC_PAGE_SIZE _SC_PAGESIZE
135 # ifdef _SC_PAGE_SIZE
136 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
138 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
139 extern size_t getpagesize();
140 # define malloc_getpagesize getpagesize()
142 # ifdef WIN32 /* use supplied emulation of getpagesize */
143 # define malloc_getpagesize getpagesize()
145 # ifndef LACKS_SYS_PARAM_H
146 # include <sys/param.h>
148 # ifdef EXEC_PAGESIZE
149 # define malloc_getpagesize EXEC_PAGESIZE
153 # define malloc_getpagesize NBPG
155 # define malloc_getpagesize (NBPG * CLSIZE)
159 # define malloc_getpagesize NBPC
162 # define malloc_getpagesize PAGESIZE
163 # else /* just guess */
164 # define malloc_getpagesize ((size_t)4096U)
175 /* ------------------- size_t and alignment properties -------------------- */
177 /* The byte and bit size of a size_t */
178 #define SIZE_T_SIZE (sizeof(size_t))
179 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
181 /* Some constants coerced to size_t */
182 /* Annoying but necessary to avoid errors on some platforms */
183 #define SIZE_T_ZERO ((size_t)0)
184 #define SIZE_T_ONE ((size_t)1)
185 #define SIZE_T_TWO ((size_t)2)
186 #define SIZE_T_FOUR ((size_t)4)
187 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
188 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
189 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
190 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
192 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
193 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
195 /* True if address a has acceptable alignment */
196 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
198 /* the number of bytes to offset an address to align it */
199 #define align_offset(A)\
200 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
201 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
203 /* -------------------------- MMAP preliminaries ------------------------- */
206 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
207 checks to fail so compiler optimizer can delete code rather than
208 using so many "#if"s.
212 /* MORECORE and MMAP must return MFAIL on failure */
213 #define MFAIL ((void*)(MAX_SIZE_T))
214 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
219 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
220 #define MMAP_PROT (PROT_READ|PROT_WRITE)
221 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
222 #define MAP_ANONYMOUS MAP_ANON
223 #endif /* MAP_ANON */
225 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
226 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
227 #else /* MAP_ANONYMOUS */
229 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
230 is unlikely to be needed, but is supplied just in case.
232 #define MMAP_FLAGS (MAP_PRIVATE)
233 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
234 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
235 (dev_zero_fd = open("/dev/zero", O_RDWR), \
236 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
237 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
238 #endif /* MAP_ANONYMOUS */
240 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
244 /* Win32 MMAP via VirtualAlloc */
245 static FORCEINLINE void* win32mmap(size_t size) {
246 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
247 return (ptr != 0)? ptr: MFAIL;
250 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
251 static FORCEINLINE void* win32direct_mmap(size_t size) {
252 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
254 return (ptr != 0)? ptr: MFAIL;
257 /* This function supports releasing coalesed segments */
258 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
259 MEMORY_BASIC_INFORMATION minfo;
260 char* cptr = (char*)ptr;
262 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
264 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
265 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
267 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
269 cptr += minfo.RegionSize;
270 size -= minfo.RegionSize;
275 #define MMAP_DEFAULT(s) win32mmap(s)
276 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
277 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
279 #endif /* HAVE_MMAP */
283 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
285 #endif /* HAVE_MREMAP */
288 * Define CALL_MORECORE
292 #define CALL_MORECORE(S) MORECORE(S)
294 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
295 #endif /* MORECORE */
296 #else /* HAVE_MORECORE */
297 #define CALL_MORECORE(S) MFAIL
298 #endif /* HAVE_MORECORE */
301 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
304 #define USE_MMAP_BIT (SIZE_T_ONE)
307 #define CALL_MMAP(s) MMAP(s)
309 #define CALL_MMAP(s) MMAP_DEFAULT(s)
312 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
314 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
317 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
318 #else /* DIRECT_MMAP */
319 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
320 #endif /* DIRECT_MMAP */
321 #else /* HAVE_MMAP */
322 #define USE_MMAP_BIT (SIZE_T_ZERO)
324 #define MMAP(s) MFAIL
325 #define MUNMAP(a, s) (-1)
326 #define DIRECT_MMAP(s) MFAIL
327 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
328 #define CALL_MMAP(s) MMAP(s)
329 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
330 #endif /* HAVE_MMAP */
335 #if HAVE_MMAP && HAVE_MREMAP
337 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
339 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
341 #else /* HAVE_MMAP && HAVE_MREMAP */
342 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
343 #endif /* HAVE_MMAP && HAVE_MREMAP */
345 /* mstate bit set if contiguous morecore disabled or failed */
346 #define USE_NONCONTIGUOUS_BIT (4U)
348 /* mstate bit set if no expansion allowed */
349 #define USE_NOEXPAND_BIT (8U)
351 /* trace allocations if set */
352 #define USE_TRACE_BIT (16U)
354 /* segment bit set in create_mspace_with_base */
355 #define EXTERN_BIT (8U)
358 /* --------------------------- Lock preliminaries ------------------------ */
361 When locks are defined, there is one global lock, plus
364 The global lock_ensures that mparams.magic and other unique
365 mparams values are initialized only once. It also protects
366 sequences of calls to MORECORE. In many cases sys_alloc requires
367 two calls, that should not be interleaved with calls by other
368 threads. This does not protect against direct calls to MORECORE
369 by other threads not using this lock, so there is still code to
370 cope the best we can on interference.
372 Per-mspace locks surround calls to malloc, free, etc.
373 By default, locks are simple non-reentrant mutexes.
375 Because lock-protected regions generally have bounded times, it is
376 OK to use the supplied simple spinlocks. Spinlocks are likely to
377 improve performance for lightly contended applications, but worsen
378 performance under heavy contention.
380 If USE_LOCKS is > 1, the definitions of lock routines here are
381 bypassed, in which case you will need to define the type MLOCK_T,
382 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
383 and TRY_LOCK. You must also declare a
384 static MLOCK_T malloc_global_mutex = { initialization values };.
389 #define USE_LOCK_BIT (0U)
390 #define INITIAL_LOCK(l) (0)
391 #define DESTROY_LOCK(l) (0)
392 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
393 #define RELEASE_MALLOC_GLOBAL_LOCK()
397 /* ----------------------- User-defined locks ------------------------ */
398 /* Define your own lock implementation here */
399 /* #define INITIAL_LOCK(lk) ... */
400 /* #define DESTROY_LOCK(lk) ... */
401 /* #define ACQUIRE_LOCK(lk) ... */
402 /* #define RELEASE_LOCK(lk) ... */
403 /* #define TRY_LOCK(lk) ... */
404 /* static MLOCK_T malloc_global_mutex = ... */
408 /* First, define CAS_LOCK and CLEAR_LOCK on ints */
409 /* Note CAS_LOCK defined to return 0 on success */
411 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
412 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
413 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
415 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
416 /* Custom spin locks for older gcc on x86 */
417 static FORCEINLINE int x86_cas_lock(int *sl) {
421 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
423 : "r" (val), "m" (*(sl)), "0"(cmp)
428 static FORCEINLINE void x86_clear_lock(int* sl) {
432 __asm__ __volatile__ ("lock; xchgl %0, %1"
434 : "m" (*(sl)), "0"(prev)
438 #define CAS_LOCK(sl) x86_cas_lock(sl)
439 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
441 #else /* Win32 MSC */
442 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
443 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
445 #endif /* ... gcc spins locks ... */
447 /* How to yield for a spin lock */
448 #define SPINS_PER_YIELD 63
449 #if defined(_MSC_VER)
450 #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
451 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
452 #elif defined (__SVR4) && defined (__sun) /* solaris */
453 #define SPIN_LOCK_YIELD thr_yield();
454 #elif !defined(LACKS_SCHED_H)
455 #define SPIN_LOCK_YIELD sched_yield();
457 #define SPIN_LOCK_YIELD
458 #endif /* ... yield ... */
460 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
461 /* Plain spin locks use single word (embedded in malloc_states) */
462 static int spin_acquire_lock(int *sl) {
464 while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
465 if ((++spins & SPINS_PER_YIELD) == 0) {
473 #define TRY_LOCK(sl) !CAS_LOCK(sl)
474 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
475 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
476 #define INITIAL_LOCK(sl) (*sl = 0)
477 #define DESTROY_LOCK(sl) (0)
478 static MLOCK_T malloc_global_mutex = 0;
480 #else /* USE_RECURSIVE_LOCKS */
481 /* types for lock owners */
483 #define THREAD_ID_T DWORD
484 #define CURRENT_THREAD GetCurrentThreadId()
485 #define EQ_OWNER(X,Y) ((X) == (Y))
488 Note: the following assume that pthread_t is a type that can be
489 initialized to (casted) zero. If this is not the case, you will need to
490 somehow redefine these or not use spin locks.
492 #define THREAD_ID_T pthread_t
493 #define CURRENT_THREAD pthread_self()
494 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
497 struct malloc_recursive_lock {
500 THREAD_ID_T threadid;
503 #define MLOCK_T struct malloc_recursive_lock
504 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
506 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
513 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
514 THREAD_ID_T mythreadid = CURRENT_THREAD;
517 if (*((volatile int *)(&lk->sl)) == 0) {
518 if (!CAS_LOCK(&lk->sl)) {
519 lk->threadid = mythreadid;
524 else if (EQ_OWNER(lk->threadid, mythreadid)) {
528 if ((++spins & SPINS_PER_YIELD) == 0) {
534 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
535 THREAD_ID_T mythreadid = CURRENT_THREAD;
536 if (*((volatile int *)(&lk->sl)) == 0) {
537 if (!CAS_LOCK(&lk->sl)) {
538 lk->threadid = mythreadid;
543 else if (EQ_OWNER(lk->threadid, mythreadid)) {
550 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
551 #define TRY_LOCK(lk) recursive_try_lock(lk)
552 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
553 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
554 #define DESTROY_LOCK(lk) (0)
555 #endif /* USE_RECURSIVE_LOCKS */
557 #elif defined(WIN32) /* Win32 critical sections */
558 #define MLOCK_T CRITICAL_SECTION
559 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
560 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
561 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
562 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
563 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
564 #define NEED_GLOBAL_LOCK_INIT
566 static MLOCK_T malloc_global_mutex;
567 static volatile LONG malloc_global_mutex_status;
569 /* Use spin loop to initialize global lock */
570 static void init_malloc_global_mutex() {
572 long stat = malloc_global_mutex_status;
575 /* transition to < 0 while initializing, then to > 0) */
577 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
578 InitializeCriticalSection(&malloc_global_mutex);
579 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
586 #else /* pthreads-based locks */
587 #define MLOCK_T pthread_mutex_t
588 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
589 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
590 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
591 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
592 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
594 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
595 /* Cope with old-style linux recursive lock initialization by adding */
596 /* skipped internal declaration from pthread.h */
597 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
599 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
600 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
601 #endif /* USE_RECURSIVE_LOCKS ... */
603 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
605 static int pthread_init_lock (MLOCK_T *lk) {
606 pthread_mutexattr_t attr;
607 if (pthread_mutexattr_init(&attr)) return 1;
608 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
609 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
611 if (pthread_mutex_init(lk, &attr)) return 1;
612 if (pthread_mutexattr_destroy(&attr)) return 1;
616 #endif /* ... lock types ... */
618 /* Common code for all lock types */
619 #define USE_LOCK_BIT (2U)
621 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
622 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
625 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
626 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
629 #endif /* USE_LOCKS */
631 /* ----------------------- Chunk representations ------------------------ */
634 (The following includes lightly edited explanations by Colin Plumb.)
636 The malloc_chunk declaration below is misleading (but accurate and
637 necessary). It declares a "view" into memory allowing access to
638 necessary fields at known offsets from a given base.
640 Chunks of memory are maintained using a `boundary tag' method as
641 originally described by Knuth. (See the paper by Paul Wilson
642 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
643 techniques.) Sizes of free chunks are stored both in the front of
644 each chunk and at the end. This makes consolidating fragmented
645 chunks into bigger chunks fast. The head fields also hold bits
646 representing whether chunks are free or in use.
648 Here are some pictures to make it clearer. They are "exploded" to
649 show that the state of a chunk can be thought of as extending from
650 the high 31 bits of the head field of its header through the
651 prev_foot and PINUSE_BIT bit of the following chunk header.
653 A chunk that's in use looks like:
655 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
656 | Size of previous chunk (if P = 0) |
657 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
658 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
659 | Size of this chunk 1| +-+
660 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
666 +- size - sizeof(size_t) available payload bytes -+
670 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
671 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
672 | Size of next chunk (may or may not be in use) | +-+
673 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
675 And if it's free, it looks like this:
678 | User payload (must be in use, or we would have merged!) |
679 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
680 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
681 | Size of this chunk 0| +-+
682 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
684 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
686 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
688 +- size - sizeof(struct chunk) unused bytes -+
690 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
691 | Size of this chunk |
692 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
693 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
694 | Size of next chunk (must be in use, or we would have merged)| +-+
695 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
699 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
702 Note that since we always merge adjacent free chunks, the chunks
703 adjacent to a free chunk must be in use.
705 Given a pointer to a chunk (which can be derived trivially from the
706 payload pointer) we can, in O(1) time, find out whether the adjacent
707 chunks are free, and if so, unlink them from the lists that they
708 are on and merge them with the current chunk.
710 Chunks always begin on even word boundaries, so the mem portion
711 (which is returned to the user) is also on an even word boundary, and
712 thus at least double-word aligned.
714 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
715 chunk size (which is always a multiple of two words), is an in-use
716 bit for the *previous* chunk. If that bit is *clear*, then the
717 word before the current chunk size contains the previous chunk
718 size, and can be used to find the front of the previous chunk.
719 The very first chunk allocated always has this bit set, preventing
720 access to non-existent (or non-owned) memory. If pinuse is set for
721 any given chunk, then you CANNOT determine the size of the
722 previous chunk, and might even get a memory addressing fault when
725 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
726 the chunk size redundantly records whether the current chunk is
727 inuse (unless the chunk is mmapped). This redundancy enables usage
728 checks within free and realloc, and reduces indirection when freeing
729 and consolidating chunks.
731 Each freshly allocated chunk must have both cinuse and pinuse set.
732 That is, each allocated chunk borders either a previously allocated
733 and still in-use chunk, or the base of its memory arena. This is
734 ensured by making all allocations from the `lowest' part of any
735 found chunk. Further, no free chunk physically borders another one,
736 so each free chunk is known to be preceded and followed by either
737 inuse chunks or the ends of memory.
739 Note that the `foot' of the current chunk is actually represented
740 as the prev_foot of the NEXT chunk. This makes it easier to
741 deal with alignments etc but can be very confusing when trying
742 to extend or adapt this code.
744 The exceptions to all this are
746 1. The special chunk `top' is the top-most available chunk (i.e.,
747 the one bordering the end of available memory). It is treated
748 specially. Top is never included in any bin, is used only if
749 no other chunk is available, and is released back to the
750 system if it is very large (see M_TRIM_THRESHOLD). In effect,
751 the top chunk is treated as larger (and thus less well
752 fitting) than any other available chunk. The top chunk
753 doesn't update its trailing size field since there is no next
754 contiguous chunk that would have to index off it. However,
755 space is still allocated for it (TOP_FOOT_SIZE) to enable
756 separation or merging when space is extended.
758 3. Chunks allocated via mmap, have both cinuse and pinuse bits
759 cleared in their head fields. Because they are allocated
760 one-by-one, each must carry its own prev_foot field, which is
761 also used to hold the offset this chunk has within its mmapped
762 region, which is needed to preserve alignment. Each mmapped
763 chunk is trailed by the first two fields of a fake next-chunk
764 for sake of usage checks.
768 struct malloc_chunk {
769 size_t prev_foot; /* Size of previous chunk (if free). */
770 size_t head; /* Size and inuse bits. */
771 struct malloc_chunk* fd; /* double links -- used only if free. */
772 struct malloc_chunk* bk;
775 typedef struct malloc_chunk mchunk;
776 typedef struct malloc_chunk* mchunkptr;
777 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
778 typedef unsigned int bindex_t; /* Described below */
779 typedef unsigned int binmap_t; /* Described below */
780 typedef unsigned int flag_t; /* The type of various bit flag sets */
782 /* ------------------- Chunks sizes and alignments ----------------------- */
784 #define MCHUNK_SIZE (sizeof(mchunk))
787 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
789 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
792 /* MMapped chunks need a second word of overhead ... */
793 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
794 /* ... and additional padding for fake next-chunk at foot */
795 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
797 /* The smallest size we can malloc is an aligned minimal chunk */
798 #define MIN_CHUNK_SIZE\
799 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
801 /* conversion from malloc headers to user pointers, and back */
802 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
803 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
804 /* chunk associated with aligned address A */
805 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
807 /* Bounds on request (not chunk) sizes. */
808 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
809 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
811 /* pad request bytes into a usable size */
812 #define pad_request(req) \
813 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
815 /* pad request, checking for minimum (but not maximum) */
816 #define request2size(req) \
817 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
820 /* ------------------ Operations on head and foot fields ----------------- */
823 The head field of a chunk is or'ed with PINUSE_BIT when previous
824 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
825 use, unless mmapped, in which case both bits are cleared.
827 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
830 #define PINUSE_BIT (SIZE_T_ONE)
831 #define CINUSE_BIT (SIZE_T_TWO)
832 #define FLAG4_BIT (SIZE_T_FOUR)
833 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
834 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
836 /* Head value for fenceposts */
837 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
839 /* extraction of fields from head words */
840 #define cinuse(p) ((p)->head & CINUSE_BIT)
841 #define pinuse(p) ((p)->head & PINUSE_BIT)
842 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
843 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
844 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
846 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
848 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
849 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
850 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
852 /* Treat space at ptr +/- offset as a chunk */
853 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
854 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
856 /* Ptr to next or previous physical malloc_chunk. */
857 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
858 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
860 /* extract next chunk's pinuse bit */
861 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
863 /* Get/set size at footer */
864 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
865 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
867 /* Set size, pinuse bit, and foot */
868 #define set_size_and_pinuse_of_free_chunk(p, s)\
869 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
871 /* Set size, pinuse bit, foot, and clear next pinuse */
872 #define set_free_with_pinuse(p, s, n)\
873 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
875 /* Get the internal overhead associated with chunk p */
876 #define overhead_for(p)\
877 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
879 /* Return true if malloced space is not necessarily cleared */
881 #define calloc_must_clear(p) (!is_mmapped(p))
882 #else /* MMAP_CLEARS */
883 #define calloc_must_clear(p) (1)
884 #endif /* MMAP_CLEARS */
886 /* ---------------------- Overlaid data structures ----------------------- */
889 When chunks are not in use, they are treated as nodes of either
892 "Small" chunks are stored in circular doubly-linked lists, and look
895 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
896 | Size of previous chunk |
897 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
898 `head:' | Size of chunk, in bytes |P|
899 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
900 | Forward pointer to next chunk in list |
901 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
902 | Back pointer to previous chunk in list |
903 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
904 | Unused space (may be 0 bytes long) .
907 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
908 `foot:' | Size of chunk, in bytes |
909 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
911 Larger chunks are kept in a form of bitwise digital trees (aka
912 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
913 free chunks greater than 256 bytes, their size doesn't impose any
914 constraints on user chunk sizes. Each node looks like:
916 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
917 | Size of previous chunk |
918 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
919 `head:' | Size of chunk, in bytes |P|
920 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
921 | Forward pointer to next chunk of same size |
922 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
923 | Back pointer to previous chunk of same size |
924 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
925 | Pointer to left child (child[0]) |
926 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
927 | Pointer to right child (child[1]) |
928 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
929 | Pointer to parent |
930 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
931 | bin index of this chunk |
932 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
935 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
936 `foot:' | Size of chunk, in bytes |
937 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
939 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
940 of the same size are arranged in a circularly-linked list, with only
941 the oldest chunk (the next to be used, in our FIFO ordering)
942 actually in the tree. (Tree members are distinguished by a non-null
943 parent pointer.) If a chunk with the same size an an existing node
944 is inserted, it is linked off the existing node using pointers that
945 work in the same way as fd/bk pointers of small chunks.
947 Each tree contains a power of 2 sized range of chunk sizes (the
948 smallest is 0x100 <= x < 0x180), which is is divided in half at each
949 tree level, with the chunks in the smaller half of the range (0x100
950 <= x < 0x140 for the top nose) in the left subtree and the larger
951 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
952 done by inspecting individual bits.
954 Using these rules, each node's left subtree contains all smaller
955 sizes than its right subtree. However, the node at the root of each
956 subtree has no particular ordering relationship to either. (The
957 dividing line between the subtree sizes is based on trie relation.)
958 If we remove the last chunk of a given size from the interior of the
959 tree, we need to replace it with a leaf node. The tree ordering
960 rules permit a node to be replaced by any leaf below it.
962 The smallest chunk in a tree (a common operation in a best-fit
963 allocator) can be found by walking a path to the leftmost leaf in
964 the tree. Unlike a usual binary tree, where we follow left child
965 pointers until we reach a null, here we follow the right child
966 pointer any time the left one is null, until we reach a leaf with
967 both child pointers null. The smallest chunk in the tree will be
968 somewhere along that path.
970 The worst case number of steps to add, find, or remove a node is
971 bounded by the number of bits differentiating chunks within
972 bins. Under current bin calculations, this ranges from 6 up to 21
973 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
974 is of course much better.
977 struct malloc_tree_chunk {
978 /* The first four fields must be compatible with malloc_chunk */
981 struct malloc_tree_chunk* fd;
982 struct malloc_tree_chunk* bk;
984 struct malloc_tree_chunk* child[2];
985 struct malloc_tree_chunk* parent;
989 typedef struct malloc_tree_chunk tchunk;
990 typedef struct malloc_tree_chunk* tchunkptr;
991 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
993 /* A little helper macro for trees */
994 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
996 /* ----------------------------- Segments -------------------------------- */
999 Each malloc space may include non-contiguous segments, held in a
1000 list headed by an embedded malloc_segment record representing the
1001 top-most space. Segments also include flags holding properties of
1002 the space. Large chunks that are directly allocated by mmap are not
1003 included in this list. They are instead independently created and
1004 destroyed without otherwise keeping track of them.
1006 Segment management mainly comes into play for spaces allocated by
1007 MMAP. Any call to MMAP might or might not return memory that is
1008 adjacent to an existing segment. MORECORE normally contiguously
1009 extends the current space, so this space is almost always adjacent,
1010 which is simpler and faster to deal with. (This is why MORECORE is
1011 used preferentially to MMAP when both are available -- see
1012 sys_alloc.) When allocating using MMAP, we don't use any of the
1013 hinting mechanisms (inconsistently) supported in various
1014 implementations of unix mmap, or distinguish reserving from
1015 committing memory. Instead, we just ask for space, and exploit
1016 contiguity when we get it. It is probably possible to do
1017 better than this on some systems, but no general scheme seems
1018 to be significantly better.
1020 Management entails a simpler variant of the consolidation scheme
1021 used for chunks to reduce fragmentation -- new adjacent memory is
1022 normally prepended or appended to an existing segment. However,
1023 there are limitations compared to chunk consolidation that mostly
1024 reflect the fact that segment processing is relatively infrequent
1025 (occurring only when getting memory from system) and that we
1026 don't expect to have huge numbers of segments:
1028 * Segments are not indexed, so traversal requires linear scans. (It
1029 would be possible to index these, but is not worth the extra
1030 overhead and complexity for most programs on most platforms.)
1031 * New segments are only appended to old ones when holding top-most
1032 memory; if they cannot be prepended to others, they are held in
1035 Except for the top-most segment of an mstate, each segment record
1036 is kept at the tail of its segment. Segments are added by pushing
1037 segment records onto the list headed by &mstate.seg for the
1040 Segment flags control allocation/merge/deallocation policies:
1041 * If EXTERN_BIT set, then we did not allocate this segment,
1042 and so should not try to deallocate or merge with others.
1043 (This currently holds only for the initial segment passed
1044 into create_mspace_with_base.)
1045 * If USE_MMAP_BIT set, the segment may be merged with
1046 other surrounding mmapped segments and trimmed/de-allocated
1048 * If neither bit is set, then the segment was obtained using
1049 MORECORE so can be merged with surrounding MORECORE'd segments
1050 and deallocated/trimmed using MORECORE with negative arguments.
1053 struct malloc_segment {
1054 char* base; /* base address */
1055 size_t size; /* allocated size */
1056 struct malloc_segment* next; /* ptr to next segment */
1057 flag_t sflags; /* mmap and extern flag */
1060 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1061 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1063 typedef struct malloc_segment msegment;
1064 typedef struct malloc_segment* msegmentptr;
1066 /* ---------------------------- malloc_state ----------------------------- */
1069 A malloc_state holds all of the bookkeeping for a space.
1070 The main fields are:
1073 The topmost chunk of the currently active segment. Its size is
1074 cached in topsize. The actual size of topmost space is
1075 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1076 fenceposts and segment records if necessary when getting more
1077 space from the system. The size at which to autotrim top is
1078 cached from mparams in trim_check, except that it is disabled if
1081 Designated victim (dv)
1082 This is the preferred chunk for servicing small requests that
1083 don't have exact fits. It is normally the chunk split off most
1084 recently to service another small request. Its size is cached in
1085 dvsize. The link fields of this chunk are not maintained since it
1086 is not kept in a bin.
1089 An array of bin headers for free chunks. These bins hold chunks
1090 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1091 chunks of all the same size, spaced 8 bytes apart. To simplify
1092 use in double-linked lists, each bin header acts as a malloc_chunk
1093 pointing to the real first node, if it exists (else pointing to
1094 itself). This avoids special-casing for headers. But to avoid
1095 waste, we allocate only the fd/bk pointers of bins, and then use
1096 repositioning tricks to treat these as the fields of a chunk.
1099 Treebins are pointers to the roots of trees holding a range of
1100 sizes. There are 2 equally spaced treebins for each power of two
1101 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1105 There is one bit map for small bins ("smallmap") and one for
1106 treebins ("treemap). Each bin sets its bit when non-empty, and
1107 clears the bit when empty. Bit operations are then used to avoid
1108 bin-by-bin searching -- nearly all "search" is done without ever
1109 looking at bins that won't be selected. The bit maps
1110 conservatively use 32 bits per map word, even if on 64bit system.
1111 For a good description of some of the bit-based techniques used
1112 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1113 supplement at http://hackersdelight.org/). Many of these are
1114 intended to reduce the branchiness of paths through malloc etc, as
1115 well as to reduce the number of memory locations read or written.
1118 A list of segments headed by an embedded malloc_segment record
1119 representing the initial space.
1121 Address check support
1122 The least_addr field is the least address ever obtained from
1123 MORECORE or MMAP. Attempted frees and reallocs of any address less
1124 than this are trapped (unless INSECURE is defined).
1127 A cross-check field that should always hold same value as mparams.magic.
1129 Max allowed footprint
1130 The maximum allowed bytes to allocate from system (zero means no limit)
1133 Bits recording whether to use MMAP, locks, or contiguous MORECORE
1136 Each space keeps track of current and maximum system memory
1137 obtained via MORECORE or MMAP.
1140 Fields holding the amount of unused topmost memory that should trigger
1141 trimming, and a counter to force periodic scanning to release unused
1142 non-topmost segments.
1145 If USE_LOCKS is defined, the "mutex" lock is acquired and released
1146 around every public call using this mspace.
1149 A void* pointer and a size_t field that can be used to help implement
1150 extensions to this malloc.
1153 /* Bin types, widths and sizes */
1154 #define NSMALLBINS (32U)
1155 #define NTREEBINS (32U)
1156 #define SMALLBIN_SHIFT (3U)
1157 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1158 #define TREEBIN_SHIFT (8U)
1159 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1160 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1161 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1163 struct malloc_state {
1172 size_t release_checks;
1174 mchunkptr smallbins[(NSMALLBINS+1)*2];
1175 tbinptr treebins[NTREEBINS];
1177 size_t max_footprint;
1178 size_t footprint_limit; /* zero means no limit */
1181 MLOCK_T mutex; /* locate lock among fields that rarely change */
1182 #endif /* USE_LOCKS */
1184 void* extp; /* Unused but available for extensions */
1188 typedef struct malloc_state* mstate;
1190 /* ------------- Global malloc_state and malloc_params ------------------- */
1193 malloc_params holds global properties, including those that can be
1194 dynamically set using mallopt. There is a single instance, mparams,
1195 initialized in init_mparams. Note that the non-zeroness of "magic"
1196 also serves as an initialization flag.
1199 struct malloc_params {
1203 size_t mmap_threshold;
1204 size_t trim_threshold;
1205 flag_t default_mflags;
1208 static struct malloc_params mparams;
1210 /* Ensure mparams initialized */
1211 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1215 /* The global malloc_state used for all non-"mspace" calls */
1216 static struct malloc_state _gm_;
1218 #define is_global(M) ((M) == &_gm_)
1220 #endif /* !ONLY_MSPACES */
1222 #define is_initialized(M) ((M)->top != 0)
1224 /* -------------------------- system alloc setup ------------------------- */
1226 /* Operations on mflags */
1228 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1229 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1231 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1233 #define disable_lock(M)
1236 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1237 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1239 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1241 #define disable_mmap(M)
1244 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1245 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1246 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1247 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1248 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1249 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1250 #define disable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1252 #define set_lock(M,L)\
1253 ((M)->mflags = (L)?\
1254 ((M)->mflags | USE_LOCK_BIT) :\
1255 ((M)->mflags & ~USE_LOCK_BIT))
1257 /* page-align a size */
1258 #define page_align(S)\
1259 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1261 /* granularity-align a size */
1262 #define granularity_align(S)\
1263 (((S) + (mparams.granularity - SIZE_T_ONE))\
1264 & ~(mparams.granularity - SIZE_T_ONE))
1267 /* For mmap, use granularity alignment on windows, else page-align */
1269 #define mmap_align(S) granularity_align(S)
1271 #define mmap_align(S) page_align(S)
1274 /* For sys_alloc, enough padding to ensure can malloc request on success */
1275 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1277 #define is_page_aligned(S)\
1278 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1279 #define is_granularity_aligned(S)\
1280 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1282 /* True if segment S holds address A */
1283 #define segment_holds(S, A)\
1284 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1286 /* Return segment holding given address */
1287 static msegmentptr segment_holding(mstate m, char* addr) {
1288 msegmentptr sp = &m->seg;
1290 if (addr >= sp->base && addr < sp->base + sp->size)
1292 if ((sp = sp->next) == 0)
1297 /* Return true if segment contains a segment link */
1298 static int has_segment_link(mstate m, msegmentptr ss) {
1299 msegmentptr sp = &m->seg;
1301 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1303 if ((sp = sp->next) == 0)
1308 #ifndef MORECORE_CANNOT_TRIM
1309 #define should_trim(M,s) ((s) > (M)->trim_check)
1310 #else /* MORECORE_CANNOT_TRIM */
1311 #define should_trim(M,s) (0)
1312 #endif /* MORECORE_CANNOT_TRIM */
1315 TOP_FOOT_SIZE is padding at the end of a segment, including space
1316 that may be needed to place segment records and fenceposts when new
1317 noncontiguous segments are added.
1319 #define TOP_FOOT_SIZE\
1320 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1323 /* ------------------------------- Hooks -------------------------------- */
1326 PREACTION should be defined to return 0 on success, and nonzero on
1327 failure. If you are not using locking, you can redefine these to do
1332 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1333 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1334 #else /* USE_LOCKS */
1337 #define PREACTION(M) (0)
1338 #endif /* PREACTION */
1341 #define POSTACTION(M)
1342 #endif /* POSTACTION */
1344 #endif /* USE_LOCKS */
1347 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1348 USAGE_ERROR_ACTION is triggered on detected bad frees and
1349 reallocs. The argument p is an address that might have triggered the
1350 fault. It is ignored by the two predefined actions, but might be
1351 useful in custom actions that try to help diagnose errors.
1354 #if PROCEED_ON_ERROR
1356 /* A count of the number of corruption errors causing resets */
1357 int malloc_corruption_error_count;
1359 /* default corruption action */
1360 static void reset_on_error(mstate m);
1362 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1363 #define USAGE_ERROR_ACTION(m, p)
1365 #else /* PROCEED_ON_ERROR */
1367 #ifndef CORRUPTION_ERROR_ACTION
1368 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1369 #endif /* CORRUPTION_ERROR_ACTION */
1371 #ifndef USAGE_ERROR_ACTION
1372 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1373 #endif /* USAGE_ERROR_ACTION */
1375 #endif /* PROCEED_ON_ERROR */
1378 /* -------------------------- Debugging setup ---------------------------- */
1382 #define check_free_chunk(M,P)
1383 #define check_inuse_chunk(M,P)
1384 #define check_malloced_chunk(M,P,N)
1385 #define check_mmapped_chunk(M,P)
1386 #define check_malloc_state(M)
1387 #define check_top_chunk(M,P)
1390 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
1391 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1392 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
1393 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1394 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1395 #define check_malloc_state(M) do_check_malloc_state(M)
1397 static void do_check_any_chunk(mstate m, mchunkptr p);
1398 static void do_check_top_chunk(mstate m, mchunkptr p);
1399 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1400 static void do_check_inuse_chunk(mstate m, mchunkptr p);
1401 static void do_check_free_chunk(mstate m, mchunkptr p);
1402 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1403 static void do_check_tree(mstate m, tchunkptr t);
1404 static void do_check_treebin(mstate m, bindex_t i);
1405 static void do_check_smallbin(mstate m, bindex_t i);
1406 static void do_check_malloc_state(mstate m);
1407 static int bin_find(mstate m, mchunkptr x);
1408 static size_t traverse_and_check(mstate m);
1411 /* ---------------------------- Indexing Bins ---------------------------- */
1413 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1414 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1415 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1416 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1418 /* addressing by index. See above about smallbin repositioning */
1419 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1420 #define treebin_at(M,i) (&((M)->treebins[i]))
1422 /* assign tree index for size S to variable I. Use x86 asm if possible */
1423 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1424 #define compute_tree_index(S, I)\
1426 unsigned int X = S >> TREEBIN_SHIFT;\
1429 else if (X > 0xFFFF)\
1432 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1433 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1437 #elif defined (__INTEL_COMPILER)
1438 #define compute_tree_index(S, I)\
1440 size_t X = S >> TREEBIN_SHIFT;\
1443 else if (X > 0xFFFF)\
1446 unsigned int K = _bit_scan_reverse (X); \
1447 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1451 #elif defined(_MSC_VER) && _MSC_VER>=1300
1452 #define compute_tree_index(S, I)\
1454 size_t X = S >> TREEBIN_SHIFT;\
1457 else if (X > 0xFFFF)\
1461 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1462 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1467 #define compute_tree_index(S, I)\
1469 size_t X = S >> TREEBIN_SHIFT;\
1472 else if (X > 0xFFFF)\
1475 unsigned int Y = (unsigned int)X;\
1476 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1477 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1479 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1480 K = 14 - N + ((Y <<= K) >> 15);\
1481 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1486 /* Bit representing maximum resolved size in a treebin at i */
1487 #define bit_for_tree_index(i) \
1488 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1490 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
1491 #define leftshift_for_tree_index(i) \
1492 ((i == NTREEBINS-1)? 0 : \
1493 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1495 /* The size of the smallest chunk held in bin with index i */
1496 #define minsize_for_tree_index(i) \
1497 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1498 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1501 /* ------------------------ Operations on bin maps ----------------------- */
1503 /* bit corresponding to given index */
1504 #define idx2bit(i) ((binmap_t)(1) << (i))
1506 /* Mark/Clear bits with given index */
1507 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1508 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1509 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1511 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1512 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1513 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1515 /* isolate the least set bit of a bitmap */
1516 #define least_bit(x) ((x) & -(x))
1518 /* mask with all bits to left of least bit of x on */
1519 #define left_bits(x) ((x<<1) | -(x<<1))
1521 /* mask with all bits to left of or equal to least bit of x on */
1522 #define same_or_left_bits(x) ((x) | -(x))
1524 /* index corresponding to given bit. Use x86 asm if possible */
1526 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1527 #define compute_bit2idx(X, I)\
1530 J = __builtin_ctz(X); \
1534 #elif defined (__INTEL_COMPILER)
1535 #define compute_bit2idx(X, I)\
1538 J = _bit_scan_forward (X); \
1542 #elif defined(_MSC_VER) && _MSC_VER>=1300
1543 #define compute_bit2idx(X, I)\
1546 _BitScanForward((DWORD *) &J, X);\
1550 #elif USE_BUILTIN_FFS
1551 #define compute_bit2idx(X, I) I = ffs(X)-1
1554 #define compute_bit2idx(X, I)\
1556 unsigned int Y = X - 1;\
1557 unsigned int K = Y >> (16-4) & 16;\
1558 unsigned int N = K; Y >>= K;\
1559 N += K = Y >> (8-3) & 8; Y >>= K;\
1560 N += K = Y >> (4-2) & 4; Y >>= K;\
1561 N += K = Y >> (2-1) & 2; Y >>= K;\
1562 N += K = Y >> (1-0) & 1; Y >>= K;\
1563 I = (bindex_t)(N + Y);\
1568 /* ----------------------- Runtime Check Support ------------------------- */
1571 For security, the main invariant is that malloc/free/etc never
1572 writes to a static address other than malloc_state, unless static
1573 malloc_state itself has been corrupted, which cannot occur via
1574 malloc (because of these checks). In essence this means that we
1575 believe all pointers, sizes, maps etc held in malloc_state, but
1576 check all of those linked or offsetted from other embedded data
1577 structures. These checks are interspersed with main code in a way
1578 that tends to minimize their run-time cost.
1580 When FOOTERS is defined, in addition to range checking, we also
1581 verify footer fields of inuse chunks, which can be used guarantee
1582 that the mstate controlling malloc/free is intact. This is a
1583 streamlined version of the approach described by William Robertson
1584 et al in "Run-time Detection of Heap-based Overflows" LISA'03
1585 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1586 of an inuse chunk holds the xor of its mstate and a random seed,
1587 that is checked upon calls to free() and realloc(). This is
1588 (probabilistically) unguessable from outside the program, but can be
1589 computed by any code successfully malloc'ing any chunk, so does not
1590 itself provide protection against code that has already broken
1591 security through some other means. Unlike Robertson et al, we
1592 always dynamically check addresses of all offset chunks (previous,
1593 next, etc). This turns out to be cheaper than relying on hashes.
1597 /* Check if address a is at least as high as any from MORECORE or MMAP */
1598 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1599 /* Check if address of next chunk n is higher than base chunk p */
1600 #define ok_next(p, n) ((char*)(p) < (char*)(n))
1601 /* Check if p has inuse status */
1602 #define ok_inuse(p) is_inuse(p)
1603 /* Check if p has its pinuse bit on */
1604 #define ok_pinuse(p) pinuse(p)
1606 #else /* !INSECURE */
1607 #define ok_address(M, a) (1)
1608 #define ok_next(b, n) (1)
1609 #define ok_inuse(p) (1)
1610 #define ok_pinuse(p) (1)
1611 #endif /* !INSECURE */
1613 #if (FOOTERS && !INSECURE)
1614 /* Check if (alleged) mstate m has expected magic field */
1616 ok_magic (const mstate m)
1618 return (m->magic == mparams.magic);
1620 #else /* (FOOTERS && !INSECURE) */
1621 #define ok_magic(M) (1)
1622 #endif /* (FOOTERS && !INSECURE) */
1624 /* In gcc, use __builtin_expect to minimize impact of checks */
1626 #if defined(__GNUC__) && __GNUC__ >= 3
1627 #define RTCHECK(e) __builtin_expect(e, 1)
1629 #define RTCHECK(e) (e)
1631 #else /* !INSECURE */
1632 #define RTCHECK(e) (1)
1633 #endif /* !INSECURE */
1635 /* macros to set up inuse chunks with or without footers */
1639 #define mark_inuse_foot(M,p,s)
1641 /* Macros for setting head/foot of non-mmapped chunks */
1643 /* Set cinuse bit and pinuse bit of next chunk */
1644 #define set_inuse(M,p,s)\
1645 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1646 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1648 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1649 #define set_inuse_and_pinuse(M,p,s)\
1650 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1651 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1653 /* Set size, cinuse and pinuse bit of this chunk */
1654 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1655 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1659 /* Set foot of inuse chunk to be xor of mstate and seed */
1660 #define mark_inuse_foot(M,p,s)\
1661 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1663 #define get_mstate_for(p)\
1664 ((mstate)(((mchunkptr)((char*)(p) +\
1665 (chunksize(p))))->prev_foot ^ mparams.magic))
1667 #define set_inuse(M,p,s)\
1668 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1669 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1670 mark_inuse_foot(M,p,s))
1672 #define set_inuse_and_pinuse(M,p,s)\
1673 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1674 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1675 mark_inuse_foot(M,p,s))
1677 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1678 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1679 mark_inuse_foot(M, p, s))
1681 #endif /* !FOOTERS */
1683 /* ---------------------------- setting mparams -------------------------- */
1686 static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1687 static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1688 static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1689 #endif /* LOCK_AT_FORK */
1691 /* Initialize mparams */
1692 static int init_mparams(void) {
1693 #ifdef NEED_GLOBAL_LOCK_INIT
1694 if (malloc_global_mutex_status <= 0)
1695 init_malloc_global_mutex();
1698 ACQUIRE_MALLOC_GLOBAL_LOCK();
1699 if (mparams.magic == 0) {
1705 psize = malloc_getpagesize;
1706 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1709 SYSTEM_INFO system_info;
1710 GetSystemInfo(&system_info);
1711 psize = system_info.dwPageSize;
1712 gsize = ((DEFAULT_GRANULARITY != 0)?
1713 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1717 /* Sanity-check configuration:
1718 size_t must be unsigned and as wide as pointer type.
1719 ints must be at least 4 bytes.
1720 alignment must be at least 8.
1721 Alignment, min chunk size, and page size must all be powers of 2.
1723 if ((sizeof(size_t) != sizeof(char*)) ||
1724 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1725 (sizeof(int) < 4) ||
1726 (MALLOC_ALIGNMENT < (size_t)8U) ||
1727 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
1728 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1729 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1730 ((psize & (psize-SIZE_T_ONE)) != 0))
1732 mparams.granularity = gsize;
1733 mparams.page_size = psize;
1734 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1735 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1736 #if MORECORE_CONTIGUOUS
1737 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
1738 #else /* MORECORE_CONTIGUOUS */
1739 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1740 #endif /* MORECORE_CONTIGUOUS */
1743 /* Set up lock for main malloc area */
1744 gm->mflags = mparams.default_mflags;
1745 (void)INITIAL_LOCK(&gm->mutex);
1748 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1752 #ifndef DLM_MAGIC_CONSTANT
1755 unsigned char buf[sizeof(size_t)];
1756 /* Try to use /dev/urandom, else fall back on using time */
1757 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1758 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1759 magic = *((size_t *) buf);
1763 #endif /* USE_DEV_RANDOM */
1765 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1766 #elif defined(LACKS_TIME_H)
1767 magic = (size_t)&magic ^ (size_t)0x55555555U;
1769 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1771 magic |= (size_t)8U; /* ensure nonzero */
1772 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
1774 magic = DLM_MAGIC_CONSTANT;
1776 /* Until memory modes commonly available, use volatile-write */
1777 (*(volatile size_t *)(&(mparams.magic))) = magic;
1781 RELEASE_MALLOC_GLOBAL_LOCK();
1785 /* support for mallopt */
1786 static int change_mparam(int param_number, int value) {
1788 ensure_initialization();
1789 val = (value == -1)? MAX_SIZE_T : (size_t)value;
1790 switch(param_number) {
1791 case M_TRIM_THRESHOLD:
1792 mparams.trim_threshold = val;
1795 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1796 mparams.granularity = val;
1801 case M_MMAP_THRESHOLD:
1802 mparams.mmap_threshold = val;
1810 /* ------------------------- Debugging Support --------------------------- */
1812 /* Check properties of any chunk, whether free, inuse, mmapped etc */
1813 static void do_check_any_chunk(mstate m, mchunkptr p) {
1814 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1815 assert(ok_address(m, p));
1818 /* Check properties of top chunk */
1819 static void do_check_top_chunk(mstate m, mchunkptr p) {
1820 msegmentptr sp = segment_holding(m, (char*)p);
1821 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1823 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1824 assert(ok_address(m, p));
1825 assert(sz == m->topsize);
1827 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1829 assert(!pinuse(chunk_plus_offset(p, sz)));
1832 /* Check properties of (inuse) mmapped chunks */
1833 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1834 size_t sz = chunksize(p);
1835 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1836 assert(is_mmapped(p));
1837 assert(use_mmap(m));
1838 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1839 assert(ok_address(m, p));
1840 assert(!is_small(sz));
1841 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1842 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1843 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1846 /* Check properties of inuse chunks */
1847 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1848 do_check_any_chunk(m, p);
1849 assert(is_inuse(p));
1850 assert(next_pinuse(p));
1851 /* If not pinuse and not mmapped, previous chunk has OK offset */
1852 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1854 do_check_mmapped_chunk(m, p);
1857 /* Check properties of free chunks */
1858 static void do_check_free_chunk(mstate m, mchunkptr p) {
1859 size_t sz = chunksize(p);
1860 mchunkptr next = chunk_plus_offset(p, sz);
1861 do_check_any_chunk(m, p);
1862 assert(!is_inuse(p));
1863 assert(!next_pinuse(p));
1864 assert (!is_mmapped(p));
1865 if (p != m->dv && p != m->top) {
1866 if (sz >= MIN_CHUNK_SIZE) {
1867 assert((sz & CHUNK_ALIGN_MASK) == 0);
1868 assert(is_aligned(chunk2mem(p)));
1869 assert(next->prev_foot == sz);
1871 assert (next == m->top || is_inuse(next));
1872 assert(p->fd->bk == p);
1873 assert(p->bk->fd == p);
1875 else /* markers are always of size SIZE_T_SIZE */
1876 assert(sz == SIZE_T_SIZE);
1880 /* Check properties of malloced chunks at the point they are malloced */
1881 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1883 mchunkptr p = mem2chunk(mem);
1884 size_t sz = p->head & ~INUSE_BITS;
1885 do_check_inuse_chunk(m, p);
1886 assert((sz & CHUNK_ALIGN_MASK) == 0);
1887 assert(sz >= MIN_CHUNK_SIZE);
1889 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1890 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1894 /* Check a tree and its subtrees. */
1895 static void do_check_tree(mstate m, tchunkptr t) {
1898 bindex_t tindex = t->index;
1899 size_t tsize = chunksize(t);
1901 compute_tree_index(tsize, idx);
1902 assert(tindex == idx);
1903 assert(tsize >= MIN_LARGE_SIZE);
1904 assert(tsize >= minsize_for_tree_index(idx));
1905 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1907 do { /* traverse through chain of same-sized nodes */
1908 do_check_any_chunk(m, ((mchunkptr)u));
1909 assert(u->index == tindex);
1910 assert(chunksize(u) == tsize);
1911 assert(!is_inuse(u));
1912 assert(!next_pinuse(u));
1913 assert(u->fd->bk == u);
1914 assert(u->bk->fd == u);
1915 if (u->parent == 0) {
1916 assert(u->child[0] == 0);
1917 assert(u->child[1] == 0);
1920 assert(head == 0); /* only one node on chain has parent */
1922 assert(u->parent != u);
1923 assert (u->parent->child[0] == u ||
1924 u->parent->child[1] == u ||
1925 *((tbinptr*)(u->parent)) == u);
1926 if (u->child[0] != 0) {
1927 assert(u->child[0]->parent == u);
1928 assert(u->child[0] != u);
1929 do_check_tree(m, u->child[0]);
1931 if (u->child[1] != 0) {
1932 assert(u->child[1]->parent == u);
1933 assert(u->child[1] != u);
1934 do_check_tree(m, u->child[1]);
1936 if (u->child[0] != 0 && u->child[1] != 0) {
1937 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1945 /* Check all the chunks in a treebin. */
1946 static void do_check_treebin(mstate m, bindex_t i) {
1947 tbinptr* tb = treebin_at(m, i);
1949 int empty = (m->treemap & (1U << i)) == 0;
1953 do_check_tree(m, t);
1956 /* Check all the chunks in a smallbin. */
1957 static void do_check_smallbin(mstate m, bindex_t i) {
1958 sbinptr b = smallbin_at(m, i);
1959 mchunkptr p = b->bk;
1960 unsigned int empty = (m->smallmap & (1U << i)) == 0;
1964 for (; p != b; p = p->bk) {
1965 size_t size = chunksize(p);
1967 /* each chunk claims to be free */
1968 do_check_free_chunk(m, p);
1969 /* chunk belongs in bin */
1970 assert(small_index(size) == i);
1971 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1972 /* chunk is followed by an inuse chunk */
1974 if (q->head != FENCEPOST_HEAD)
1975 do_check_inuse_chunk(m, q);
1980 /* Find x in a bin. Used in other check functions. */
1981 static int bin_find(mstate m, mchunkptr x) {
1982 size_t size = chunksize(x);
1983 if (is_small(size)) {
1984 bindex_t sidx = small_index(size);
1985 sbinptr b = smallbin_at(m, sidx);
1986 if (smallmap_is_marked(m, sidx)) {
1991 } while ((p = p->fd) != b);
1996 compute_tree_index(size, tidx);
1997 if (treemap_is_marked(m, tidx)) {
1998 tchunkptr t = *treebin_at(m, tidx);
1999 size_t sizebits = size << leftshift_for_tree_index(tidx);
2000 while (t != 0 && chunksize(t) != size) {
2001 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2007 if (u == (tchunkptr)x)
2009 } while ((u = u->fd) != t);
2016 /* Traverse each chunk and check it; return total */
2017 static size_t traverse_and_check(mstate m) {
2019 if (is_initialized(m)) {
2020 msegmentptr s = &m->seg;
2021 sum += m->topsize + TOP_FOOT_SIZE;
2023 mchunkptr q = align_as_chunk(s->base);
2024 mchunkptr lastq = 0;
2026 while (segment_holds(s, q) &&
2027 q != m->top && q->head != FENCEPOST_HEAD) {
2028 sum += chunksize(q);
2030 assert(!bin_find(m, q));
2031 do_check_inuse_chunk(m, q);
2034 assert(q == m->dv || bin_find(m, q));
2035 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2036 do_check_free_chunk(m, q);
2048 /* Check all properties of malloc_state. */
2049 static void do_check_malloc_state(mstate m) {
2053 for (i = 0; i < NSMALLBINS; ++i)
2054 do_check_smallbin(m, i);
2055 for (i = 0; i < NTREEBINS; ++i)
2056 do_check_treebin(m, i);
2058 if (m->dvsize != 0) { /* check dv chunk */
2059 do_check_any_chunk(m, m->dv);
2060 assert(m->dvsize == chunksize(m->dv));
2061 assert(m->dvsize >= MIN_CHUNK_SIZE);
2062 assert(bin_find(m, m->dv) == 0);
2065 if (m->top != 0) { /* check top chunk */
2066 do_check_top_chunk(m, m->top);
2067 /*assert(m->topsize == chunksize(m->top)); redundant */
2068 assert(m->topsize > 0);
2069 assert(bin_find(m, m->top) == 0);
2072 total = traverse_and_check(m);
2073 assert(total <= m->footprint);
2074 assert(m->footprint <= m->max_footprint);
2078 /* ----------------------------- statistics ------------------------------ */
2081 static struct dlmallinfo internal_mallinfo(mstate m) {
2082 struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2083 ensure_initialization();
2084 if (!PREACTION(m)) {
2085 check_malloc_state(m);
2086 if (is_initialized(m)) {
2087 size_t nfree = SIZE_T_ONE; /* top always free */
2088 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2090 msegmentptr s = &m->seg;
2092 mchunkptr q = align_as_chunk(s->base);
2093 while (segment_holds(s, q) &&
2094 q != m->top && q->head != FENCEPOST_HEAD) {
2095 size_t sz = chunksize(q);
2108 nm.hblkhd = m->footprint - sum;
2109 nm.usmblks = m->max_footprint;
2110 nm.uordblks = m->footprint - mfree;
2111 nm.fordblks = mfree;
2112 nm.keepcost = m->topsize;
2119 #endif /* !NO_MALLINFO */
2121 #if !NO_MALLOC_STATS
2122 static void internal_malloc_stats(mstate m) {
2123 ensure_initialization();
2124 if (!PREACTION(m)) {
2128 check_malloc_state(m);
2129 if (is_initialized(m)) {
2130 msegmentptr s = &m->seg;
2131 maxfp = m->max_footprint;
2133 used = fp - (m->topsize + TOP_FOOT_SIZE);
2136 mchunkptr q = align_as_chunk(s->base);
2137 while (segment_holds(s, q) &&
2138 q != m->top && q->head != FENCEPOST_HEAD) {
2140 used -= chunksize(q);
2146 POSTACTION(m); /* drop lock */
2147 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2148 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2149 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2152 #endif /* NO_MALLOC_STATS */
2154 /* ----------------------- Operations on smallbins ----------------------- */
2157 Various forms of linking and unlinking are defined as macros. Even
2158 the ones for trees, which are very long but have very short typical
2159 paths. This is ugly but reduces reliance on inlining support of
2163 /* Link a free chunk into a smallbin */
2164 #define insert_small_chunk(M, P, S) {\
2165 bindex_t I = small_index(S);\
2166 mchunkptr B = smallbin_at(M, I);\
2168 assert(S >= MIN_CHUNK_SIZE);\
2169 if (!smallmap_is_marked(M, I))\
2170 mark_smallmap(M, I);\
2171 else if (RTCHECK(ok_address(M, B->fd)))\
2174 CORRUPTION_ERROR_ACTION(M);\
2182 /* Unlink a chunk from a smallbin */
2183 #define unlink_small_chunk(M, P, S) {\
2184 mchunkptr F = P->fd;\
2185 mchunkptr B = P->bk;\
2186 bindex_t I = small_index(S);\
2189 assert(chunksize(P) == small_index2size(I));\
2190 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2192 clear_smallmap(M, I);\
2194 else if (RTCHECK(B == smallbin_at(M,I) ||\
2195 (ok_address(M, B) && B->fd == P))) {\
2200 CORRUPTION_ERROR_ACTION(M);\
2204 CORRUPTION_ERROR_ACTION(M);\
2208 /* Unlink the first chunk from a smallbin */
2209 #define unlink_first_small_chunk(M, B, P, I) {\
2210 mchunkptr F = P->fd;\
2213 assert(chunksize(P) == small_index2size(I));\
2215 clear_smallmap(M, I);\
2217 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2222 CORRUPTION_ERROR_ACTION(M);\
2226 /* Replace dv node, binning the old one */
2227 /* Used only when dvsize known to be small */
2228 #define replace_dv(M, P, S) {\
2229 size_t DVS = M->dvsize;\
2230 assert(is_small(DVS));\
2232 mchunkptr DV = M->dv;\
2233 insert_small_chunk(M, DV, DVS);\
2239 /* ------------------------- Operations on trees ------------------------- */
2241 /* Insert chunk into tree */
2242 #define insert_large_chunk(M, X, S) {\
2245 compute_tree_index(S, I);\
2246 H = treebin_at(M, I);\
2248 X->child[0] = X->child[1] = 0;\
2249 if (!treemap_is_marked(M, I)) {\
2250 mark_treemap(M, I);\
2252 X->parent = (tchunkptr)H;\
2257 size_t K = S << leftshift_for_tree_index(I);\
2259 if (chunksize(T) != S) {\
2260 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2264 else if (RTCHECK(ok_address(M, C))) {\
2271 CORRUPTION_ERROR_ACTION(M);\
2276 tchunkptr F = T->fd;\
2277 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2285 CORRUPTION_ERROR_ACTION(M);\
2296 1. If x is a chained node, unlink it from its same-sized fd/bk links
2297 and choose its bk node as its replacement.
2298 2. If x was the last node of its size, but not a leaf node, it must
2299 be replaced with a leaf node (not merely one with an open left or
2300 right), to make sure that lefts and rights of descendents
2301 correspond properly to bit masks. We use the rightmost descendent
2302 of x. We could use any other leaf, but this is easy to locate and
2303 tends to counteract removal of leftmosts elsewhere, and so keeps
2304 paths shorter than minimally guaranteed. This doesn't loop much
2305 because on average a node in a tree is near the bottom.
2306 3. If x is the base of a chain (i.e., has parent links) relink
2307 x's parent and children to x's replacement (or null if none).
2310 #define unlink_large_chunk(M, X) {\
2311 tchunkptr XP = X->parent;\
2314 tchunkptr F = X->fd;\
2316 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2321 CORRUPTION_ERROR_ACTION(M);\
2326 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2327 ((R = *(RP = &(X->child[0]))) != 0)) {\
2329 while ((*(CP = &(R->child[1])) != 0) ||\
2330 (*(CP = &(R->child[0])) != 0)) {\
2333 if (RTCHECK(ok_address(M, RP)))\
2336 CORRUPTION_ERROR_ACTION(M);\
2341 tbinptr* H = treebin_at(M, X->index);\
2343 if ((*H = R) == 0) \
2344 clear_treemap(M, X->index);\
2346 else if (RTCHECK(ok_address(M, XP))) {\
2347 if (XP->child[0] == X) \
2353 CORRUPTION_ERROR_ACTION(M);\
2355 if (RTCHECK(ok_address(M, R))) {\
2358 if ((C0 = X->child[0]) != 0) {\
2359 if (RTCHECK(ok_address(M, C0))) {\
2364 CORRUPTION_ERROR_ACTION(M);\
2366 if ((C1 = X->child[1]) != 0) {\
2367 if (RTCHECK(ok_address(M, C1))) {\
2372 CORRUPTION_ERROR_ACTION(M);\
2376 CORRUPTION_ERROR_ACTION(M);\
2381 /* Relays to large vs small bin operations */
2383 #define insert_chunk(M, P, S)\
2384 if (is_small(S)) insert_small_chunk(M, P, S)\
2385 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2387 #define unlink_chunk(M, P, S)\
2388 if (is_small(S)) unlink_small_chunk(M, P, S)\
2389 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2392 /* Relays to internal calls to malloc/free from realloc, memalign etc */
2395 #define internal_malloc(m, b) mspace_malloc(m, b)
2396 #define internal_free(m, mem) mspace_free(m,mem);
2397 #else /* ONLY_MSPACES */
2399 #define internal_malloc(m, b)\
2400 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2401 #define internal_free(m, mem)\
2402 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2404 #define internal_malloc(m, b) dlmalloc(b)
2405 #define internal_free(m, mem) dlfree(mem)
2406 #endif /* MSPACES */
2407 #endif /* ONLY_MSPACES */
2409 /* ----------------------- Direct-mmapping chunks ----------------------- */
2412 Directly mmapped chunks are set up with an offset to the start of
2413 the mmapped region stored in the prev_foot field of the chunk. This
2414 allows reconstruction of the required argument to MUNMAP when freed,
2415 and also allows adjustment of the returned chunk to meet alignment
2416 requirements (especially in memalign).
2419 /* Malloc using mmap */
2420 static void* mmap_alloc(mstate m, size_t nb) {
2421 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2422 if (m->footprint_limit != 0) {
2423 size_t fp = m->footprint + mmsize;
2424 if (fp <= m->footprint || fp > m->footprint_limit)
2427 if (mmsize > nb) { /* Check for wrap around 0 */
2428 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2430 size_t offset = align_offset(chunk2mem(mm));
2431 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2432 mchunkptr p = (mchunkptr)(mm + offset);
2433 p->prev_foot = offset;
2435 mark_inuse_foot(m, p, psize);
2436 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2437 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2439 if (m->least_addr == 0 || mm < m->least_addr)
2441 if ((m->footprint += mmsize) > m->max_footprint)
2442 m->max_footprint = m->footprint;
2443 assert(is_aligned(chunk2mem(p)));
2444 check_mmapped_chunk(m, p);
2445 return chunk2mem(p);
2451 /* Realloc using mmap */
2452 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2453 size_t oldsize = chunksize(oldp);
2454 (void)flags; /* placate people compiling -Wunused */
2455 if (is_small(nb)) /* Can't shrink mmap regions below small size */
2457 /* Keep old chunk if big enough but not too big */
2458 if (oldsize >= nb + SIZE_T_SIZE &&
2459 (oldsize - nb) <= (mparams.granularity << 1))
2462 size_t offset = oldp->prev_foot;
2463 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2464 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2465 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2466 oldmmsize, newmmsize, flags);
2468 mchunkptr newp = (mchunkptr)(cp + offset);
2469 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2471 mark_inuse_foot(m, newp, psize);
2472 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2473 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2475 if (cp < m->least_addr)
2477 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2478 m->max_footprint = m->footprint;
2479 check_mmapped_chunk(m, newp);
2487 /* -------------------------- mspace management -------------------------- */
2489 /* Initialize top chunk and its size */
2490 static void init_top(mstate m, mchunkptr p, size_t psize) {
2491 /* Ensure alignment */
2492 size_t offset = align_offset(chunk2mem(p));
2493 p = (mchunkptr)((char*)p + offset);
2498 p->head = psize | PINUSE_BIT;
2499 /* set size of fake trailing chunk holding overhead space only once */
2500 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2501 m->trim_check = mparams.trim_threshold; /* reset on each update */
2504 /* Initialize bins for a new mstate that is otherwise zeroed out */
2505 static void init_bins(mstate m) {
2506 /* Establish circular links for smallbins */
2508 for (i = 0; i < NSMALLBINS; ++i) {
2509 sbinptr bin = smallbin_at(m,i);
2510 bin->fd = bin->bk = bin;
2514 #if PROCEED_ON_ERROR
2516 /* default corruption action */
2517 static void reset_on_error(mstate m) {
2519 ++malloc_corruption_error_count;
2520 /* Reinitialize fields to forget about all memory */
2521 m->smallmap = m->treemap = 0;
2522 m->dvsize = m->topsize = 0;
2527 for (i = 0; i < NTREEBINS; ++i)
2528 *treebin_at(m, i) = 0;
2531 #endif /* PROCEED_ON_ERROR */
2533 /* Allocate chunk and prepend remainder with chunk in successor base. */
2534 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2536 mchunkptr p = align_as_chunk(newbase);
2537 mchunkptr oldfirst = align_as_chunk(oldbase);
2538 size_t psize = (char*)oldfirst - (char*)p;
2539 mchunkptr q = chunk_plus_offset(p, nb);
2540 size_t qsize = psize - nb;
2541 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2543 assert((char*)oldfirst > (char*)q);
2544 assert(pinuse(oldfirst));
2545 assert(qsize >= MIN_CHUNK_SIZE);
2547 /* consolidate remainder with first chunk of old base */
2548 if (oldfirst == m->top) {
2549 size_t tsize = m->topsize += qsize;
2551 q->head = tsize | PINUSE_BIT;
2552 check_top_chunk(m, q);
2554 else if (oldfirst == m->dv) {
2555 size_t dsize = m->dvsize += qsize;
2557 set_size_and_pinuse_of_free_chunk(q, dsize);
2560 if (!is_inuse(oldfirst)) {
2561 size_t nsize = chunksize(oldfirst);
2562 unlink_chunk(m, oldfirst, nsize);
2563 oldfirst = chunk_plus_offset(oldfirst, nsize);
2566 set_free_with_pinuse(q, qsize, oldfirst);
2567 insert_chunk(m, q, qsize);
2568 check_free_chunk(m, q);
2571 check_malloced_chunk(m, chunk2mem(p), nb);
2572 return chunk2mem(p);
2575 /* Add a segment to hold a new noncontiguous region */
2576 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2577 /* Determine locations and sizes of segment, fenceposts, old top */
2578 char* old_top = (char*)m->top;
2579 msegmentptr oldsp = segment_holding(m, old_top);
2580 char* old_end = oldsp->base + oldsp->size;
2581 size_t ssize = pad_request(sizeof(struct malloc_segment));
2582 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2583 size_t offset = align_offset(chunk2mem(rawsp));
2584 char* asp = rawsp + offset;
2585 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2586 mchunkptr sp = (mchunkptr)csp;
2587 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2588 mchunkptr tnext = chunk_plus_offset(sp, ssize);
2589 mchunkptr p = tnext;
2592 /* reset top to new space */
2593 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2595 /* Set up segment record */
2596 assert(is_aligned(ss));
2597 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2598 *ss = m->seg; /* Push current record */
2599 m->seg.base = tbase;
2600 m->seg.size = tsize;
2601 m->seg.sflags = mmapped;
2604 /* Insert trailing fenceposts */
2606 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2607 p->head = FENCEPOST_HEAD;
2609 if ((char*)(&(nextp->head)) < old_end)
2614 assert(nfences >= 2);
2616 /* Insert the rest of old top into a bin as an ordinary free chunk */
2617 if (csp != old_top) {
2618 mchunkptr q = (mchunkptr)old_top;
2619 size_t psize = csp - old_top;
2620 mchunkptr tn = chunk_plus_offset(q, psize);
2621 set_free_with_pinuse(q, psize, tn);
2622 insert_chunk(m, q, psize);
2625 check_top_chunk(m, m->top);
2628 /* -------------------------- System allocation -------------------------- */
2630 /* Get memory from system using MORECORE or MMAP */
2631 static void* sys_alloc(mstate m, size_t nb) {
2632 char* tbase = CMFAIL;
2634 flag_t mmap_flag = 0;
2635 size_t asize; /* allocation size */
2637 ensure_initialization();
2639 if (use_noexpand(m))
2642 /* Directly map large chunks, but only if already initialized */
2643 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2644 void* mem = mmap_alloc(m, nb);
2649 asize = granularity_align(nb + SYS_ALLOC_PADDING);
2651 return 0; /* wraparound */
2652 if (m->footprint_limit != 0) {
2653 size_t fp = m->footprint + asize;
2654 if (fp <= m->footprint || fp > m->footprint_limit)
2659 Try getting memory in any of three ways (in most-preferred to
2660 least-preferred order):
2661 1. A call to MORECORE that can normally contiguously extend memory.
2662 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2663 or main space is mmapped or a previous contiguous call failed)
2664 2. A call to MMAP new space (disabled if not HAVE_MMAP).
2665 Note that under the default settings, if MORECORE is unable to
2666 fulfill a request, and HAVE_MMAP is true, then mmap is
2667 used as a noncontiguous system allocator. This is a useful backup
2668 strategy for systems with holes in address spaces -- in this case
2669 sbrk cannot contiguously expand the heap, but mmap may be able to
2671 3. A call to MORECORE that cannot usually contiguously extend memory.
2672 (disabled if not HAVE_MORECORE)
2674 In all cases, we need to request enough bytes from system to ensure
2675 we can malloc nb bytes upon success, so pad with enough space for
2676 top_foot, plus alignment-pad to make sure we don't lose bytes if
2677 not on boundary, and round this up to a granularity unit.
2680 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2682 size_t ssize = asize; /* sbrk call size */
2683 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2684 ACQUIRE_MALLOC_GLOBAL_LOCK();
2686 if (ss == 0) { /* First time through or recovery */
2687 char* base = (char*)CALL_MORECORE(0);
2688 if (base != CMFAIL) {
2690 /* Adjust to end on a page boundary */
2691 if (!is_page_aligned(base))
2692 ssize += (page_align((size_t)base) - (size_t)base);
2693 fp = m->footprint + ssize; /* recheck limits */
2694 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2695 (m->footprint_limit == 0 ||
2696 (fp > m->footprint && fp <= m->footprint_limit)) &&
2697 (br = (char*)(CALL_MORECORE(ssize))) == base) {
2704 /* Subtract out existing available top space from MORECORE request. */
2705 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2706 /* Use mem here only if it did continuously extend old space */
2707 if (ssize < HALF_MAX_SIZE_T &&
2708 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2714 if (tbase == CMFAIL) { /* Cope with partial failure */
2715 if (br != CMFAIL) { /* Try to use/extend the space we did get */
2716 if (ssize < HALF_MAX_SIZE_T &&
2717 ssize < nb + SYS_ALLOC_PADDING) {
2718 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2719 if (esize < HALF_MAX_SIZE_T) {
2720 char* end = (char*)CALL_MORECORE(esize);
2723 else { /* Can't use; try to release */
2724 (void) CALL_MORECORE(-ssize);
2730 if (br != CMFAIL) { /* Use the space we did get */
2735 disable_contiguous(m); /* Don't try contiguous path in the future */
2738 RELEASE_MALLOC_GLOBAL_LOCK();
2741 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2742 char* mp = (char*)(CALL_MMAP(asize));
2746 mmap_flag = USE_MMAP_BIT;
2750 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2751 if (asize < HALF_MAX_SIZE_T) {
2754 ACQUIRE_MALLOC_GLOBAL_LOCK();
2755 br = (char*)(CALL_MORECORE(asize));
2756 end = (char*)(CALL_MORECORE(0));
2757 RELEASE_MALLOC_GLOBAL_LOCK();
2758 if (br != CMFAIL && end != CMFAIL && br < end) {
2759 size_t ssize = end - br;
2760 if (ssize > nb + TOP_FOOT_SIZE) {
2768 if (tbase != CMFAIL) {
2770 if ((m->footprint += tsize) > m->max_footprint)
2771 m->max_footprint = m->footprint;
2773 if (!is_initialized(m)) { /* first-time initialization */
2774 if (m->least_addr == 0 || tbase < m->least_addr)
2775 m->least_addr = tbase;
2776 m->seg.base = tbase;
2777 m->seg.size = tsize;
2778 m->seg.sflags = mmap_flag;
2779 m->magic = mparams.magic;
2780 m->release_checks = MAX_RELEASE_CHECK_RATE;
2784 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2788 /* Offset top by embedded malloc_state */
2789 mchunkptr mn = next_chunk(mem2chunk(m));
2790 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2795 /* Try to merge with an existing segment */
2796 msegmentptr sp = &m->seg;
2797 /* Only consider most recent segment if traversal suppressed */
2798 while (sp != 0 && tbase != sp->base + sp->size)
2799 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2801 !is_extern_segment(sp) &&
2802 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2803 segment_holds(sp, m->top)) { /* append */
2805 init_top(m, m->top, m->topsize + tsize);
2808 if (tbase < m->least_addr)
2809 m->least_addr = tbase;
2811 while (sp != 0 && sp->base != tbase + tsize)
2812 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2814 !is_extern_segment(sp) &&
2815 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2816 char* oldbase = sp->base;
2819 return prepend_alloc(m, tbase, oldbase, nb);
2822 add_segment(m, tbase, tsize, mmap_flag);
2826 if (nb < m->topsize) { /* Allocate from new or extended top space */
2827 size_t rsize = m->topsize -= nb;
2828 mchunkptr p = m->top;
2829 mchunkptr r = m->top = chunk_plus_offset(p, nb);
2830 r->head = rsize | PINUSE_BIT;
2831 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2832 check_top_chunk(m, m->top);
2833 check_malloced_chunk(m, chunk2mem(p), nb);
2834 return chunk2mem(p);
2838 MALLOC_FAILURE_ACTION;
2842 /* ----------------------- system deallocation -------------------------- */
2844 /* Unmap and unlink any mmapped segments that don't contain used chunks */
2845 static size_t release_unused_segments(mstate m) {
2846 size_t released = 0;
2848 msegmentptr pred = &m->seg;
2849 msegmentptr sp = pred->next;
2851 char* base = sp->base;
2852 size_t size = sp->size;
2853 msegmentptr next = sp->next;
2855 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2856 mchunkptr p = align_as_chunk(base);
2857 size_t psize = chunksize(p);
2858 /* Can unmap if first chunk holds entire segment and not pinned */
2859 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2860 tchunkptr tp = (tchunkptr)p;
2861 assert(segment_holds(sp, (char*)sp));
2867 unlink_large_chunk(m, tp);
2869 if (CALL_MUNMAP(base, size) == 0) {
2871 m->footprint -= size;
2872 /* unlink obsoleted record */
2876 else { /* back out if cannot unmap */
2877 insert_large_chunk(m, tp, psize);
2881 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2886 /* Reset check counter */
2887 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2888 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2892 static int sys_trim(mstate m, size_t pad) {
2893 size_t released = 0;
2894 ensure_initialization();
2895 if (pad < MAX_REQUEST && is_initialized(m)) {
2896 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2898 if (m->topsize > pad) {
2899 /* Shrink top space in granularity-size units, keeping at least one */
2900 size_t unit = mparams.granularity;
2901 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2903 msegmentptr sp = segment_holding(m, (char*)m->top);
2905 if (!is_extern_segment(sp)) {
2906 if (is_mmapped_segment(sp)) {
2908 sp->size >= extra &&
2909 !has_segment_link(m, sp)) { /* can't shrink if pinned */
2910 size_t newsize = sp->size - extra;
2911 (void)newsize; /* placate people compiling -Wunused-variable */
2912 /* Prefer mremap, fall back to munmap */
2913 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2914 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2919 else if (HAVE_MORECORE) {
2920 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2921 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2922 ACQUIRE_MALLOC_GLOBAL_LOCK();
2924 /* Make sure end of memory is where we last set it. */
2925 char* old_br = (char*)(CALL_MORECORE(0));
2926 if (old_br == sp->base + sp->size) {
2927 char* rel_br = (char*)(CALL_MORECORE(-extra));
2928 char* new_br = (char*)(CALL_MORECORE(0));
2929 if (rel_br != CMFAIL && new_br < old_br)
2930 released = old_br - new_br;
2933 RELEASE_MALLOC_GLOBAL_LOCK();
2937 if (released != 0) {
2938 sp->size -= released;
2939 m->footprint -= released;
2940 init_top(m, m->top, m->topsize - released);
2941 check_top_chunk(m, m->top);
2945 /* Unmap any unused mmapped segments */
2947 released += release_unused_segments(m);
2949 /* On failure, disable autotrim to avoid repeated failed future calls */
2950 if (released == 0 && m->topsize > m->trim_check)
2951 m->trim_check = MAX_SIZE_T;
2954 return (released != 0)? 1 : 0;
2957 /* Consolidate and bin a chunk. Differs from exported versions
2958 of free mainly in that the chunk need not be marked as inuse.
2960 static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2961 mchunkptr next = chunk_plus_offset(p, psize);
2964 size_t prevsize = p->prev_foot;
2965 if (is_mmapped(p)) {
2966 psize += prevsize + MMAP_FOOT_PAD;
2967 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2968 m->footprint -= psize;
2971 prev = chunk_minus_offset(p, prevsize);
2974 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2976 unlink_chunk(m, p, prevsize);
2978 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2980 set_free_with_pinuse(p, psize, next);
2985 CORRUPTION_ERROR_ACTION(m);
2989 if (RTCHECK(ok_address(m, next))) {
2990 if (!cinuse(next)) { /* consolidate forward */
2991 if (next == m->top) {
2992 size_t tsize = m->topsize += psize;
2994 p->head = tsize | PINUSE_BIT;
3001 else if (next == m->dv) {
3002 size_t dsize = m->dvsize += psize;
3004 set_size_and_pinuse_of_free_chunk(p, dsize);
3008 size_t nsize = chunksize(next);
3010 unlink_chunk(m, next, nsize);
3011 set_size_and_pinuse_of_free_chunk(p, psize);
3019 set_free_with_pinuse(p, psize, next);
3021 insert_chunk(m, p, psize);
3024 CORRUPTION_ERROR_ACTION(m);
3028 /* ---------------------------- malloc --------------------------- */
3030 /* allocate a large request from the best fitting chunk in a treebin */
3031 static void* tmalloc_large(mstate m, size_t nb) {
3033 size_t rsize = -nb; /* Unsigned negation */
3036 compute_tree_index(nb, idx);
3037 if ((t = *treebin_at(m, idx)) != 0) {
3038 /* Traverse tree for this bin looking for node with size == nb */
3039 size_t sizebits = nb << leftshift_for_tree_index(idx);
3040 tchunkptr rst = 0; /* The deepest untaken right subtree */
3043 size_t trem = chunksize(t) - nb;
3046 if ((rsize = trem) == 0)
3050 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3051 if (rt != 0 && rt != t)
3054 t = rst; /* set t to least subtree holding sizes > nb */
3060 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3061 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3062 if (leftbits != 0) {
3064 binmap_t leastbit = least_bit(leftbits);
3065 compute_bit2idx(leastbit, i);
3066 t = *treebin_at(m, i);
3070 while (t != 0) { /* find smallest of tree or subtree */
3071 size_t trem = chunksize(t) - nb;
3076 t = leftmost_child(t);
3079 /* If dv is a better fit, return 0 so malloc will use it */
3080 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3081 if (RTCHECK(ok_address(m, v))) { /* split */
3082 mchunkptr r = chunk_plus_offset(v, nb);
3083 assert(chunksize(v) == rsize + nb);
3084 if (RTCHECK(ok_next(v, r))) {
3085 unlink_large_chunk(m, v);
3086 if (rsize < MIN_CHUNK_SIZE)
3087 set_inuse_and_pinuse(m, v, (rsize + nb));
3089 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3090 set_size_and_pinuse_of_free_chunk(r, rsize);
3091 insert_chunk(m, r, rsize);
3093 return chunk2mem(v);
3096 CORRUPTION_ERROR_ACTION(m);
3101 /* allocate a small request from the best fitting chunk in a treebin */
3102 static void* tmalloc_small(mstate m, size_t nb) {
3106 binmap_t leastbit = least_bit(m->treemap);
3107 compute_bit2idx(leastbit, i);
3108 v = t = *treebin_at(m, i);
3109 rsize = chunksize(t) - nb;
3111 while ((t = leftmost_child(t)) != 0) {
3112 size_t trem = chunksize(t) - nb;
3119 if (RTCHECK(ok_address(m, v))) {
3120 mchunkptr r = chunk_plus_offset(v, nb);
3121 assert(chunksize(v) == rsize + nb);
3122 if (RTCHECK(ok_next(v, r))) {
3123 unlink_large_chunk(m, v);
3124 if (rsize < MIN_CHUNK_SIZE)
3125 set_inuse_and_pinuse(m, v, (rsize + nb));
3127 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3128 set_size_and_pinuse_of_free_chunk(r, rsize);
3129 replace_dv(m, r, rsize);
3131 return chunk2mem(v);
3135 CORRUPTION_ERROR_ACTION(m);
3141 void* dlmalloc(size_t bytes) {
3144 If a small request (< 256 bytes minus per-chunk overhead):
3145 1. If one exists, use a remainderless chunk in associated smallbin.
3146 (Remainderless means that there are too few excess bytes to
3147 represent as a chunk.)
3148 2. If it is big enough, use the dv chunk, which is normally the
3149 chunk adjacent to the one used for the most recent small request.
3150 3. If one exists, split the smallest available chunk in a bin,
3151 saving remainder in dv.
3152 4. If it is big enough, use the top chunk.
3153 5. If available, get memory from system and use it
3154 Otherwise, for a large request:
3155 1. Find the smallest available binned chunk that fits, and use it
3156 if it is better fitting than dv chunk, splitting if necessary.
3157 2. If better fitting than any binned chunk, use the dv chunk.
3158 3. If it is big enough, use the top chunk.
3159 4. If request size >= mmap threshold, try to directly mmap this chunk.
3160 5. If available, get memory from system and use it
3162 The ugly goto's here ensure that postaction occurs along all paths.
3166 ensure_initialization(); /* initialize in sys_alloc if not using locks */
3169 if (!PREACTION(gm)) {
3172 if (bytes <= MAX_SMALL_REQUEST) {
3175 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3176 idx = small_index(nb);
3177 smallbits = gm->smallmap >> idx;
3179 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3181 idx += ~smallbits & 1; /* Uses next bin if idx empty */
3182 b = smallbin_at(gm, idx);
3184 assert(chunksize(p) == small_index2size(idx));
3185 unlink_first_small_chunk(gm, b, p, idx);
3186 set_inuse_and_pinuse(gm, p, small_index2size(idx));
3188 check_malloced_chunk(gm, mem, nb);
3192 else if (nb > gm->dvsize) {
3193 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3197 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3198 binmap_t leastbit = least_bit(leftbits);
3199 compute_bit2idx(leastbit, i);
3200 b = smallbin_at(gm, i);
3202 assert(chunksize(p) == small_index2size(i));
3203 unlink_first_small_chunk(gm, b, p, i);
3204 rsize = small_index2size(i) - nb;
3205 /* Fit here cannot be remainderless if 4byte sizes */
3206 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3207 set_inuse_and_pinuse(gm, p, small_index2size(i));
3209 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3210 r = chunk_plus_offset(p, nb);
3211 set_size_and_pinuse_of_free_chunk(r, rsize);
3212 replace_dv(gm, r, rsize);
3215 check_malloced_chunk(gm, mem, nb);
3219 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3220 check_malloced_chunk(gm, mem, nb);
3225 else if (bytes >= MAX_REQUEST)
3226 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3228 nb = pad_request(bytes);
3229 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3230 check_malloced_chunk(gm, mem, nb);
3235 if (nb <= gm->dvsize) {
3236 size_t rsize = gm->dvsize - nb;
3237 mchunkptr p = gm->dv;
3238 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3239 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3241 set_size_and_pinuse_of_free_chunk(r, rsize);
3242 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3244 else { /* exhaust dv */
3245 size_t dvs = gm->dvsize;
3248 set_inuse_and_pinuse(gm, p, dvs);
3251 check_malloced_chunk(gm, mem, nb);
3255 else if (nb < gm->topsize) { /* Split top */
3256 size_t rsize = gm->topsize -= nb;
3257 mchunkptr p = gm->top;
3258 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3259 r->head = rsize | PINUSE_BIT;
3260 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3262 check_top_chunk(gm, gm->top);
3263 check_malloced_chunk(gm, mem, nb);
3267 mem = sys_alloc(gm, nb);
3277 /* ---------------------------- free --------------------------- */
3279 void dlfree(void* mem) {
3281 Consolidate freed chunks with preceding or succeeding bordering
3282 free chunks, if they exist, and then place in a bin. Intermixed
3283 with special cases for top, dv, mmapped chunks, and usage errors.
3287 mchunkptr p = mem2chunk(mem);
3289 mstate fm = get_mstate_for(p);
3290 if (!ok_magic(fm)) {
3291 USAGE_ERROR_ACTION(fm, p);
3296 #endif /* FOOTERS */
3297 if (!PREACTION(fm)) {
3298 check_inuse_chunk(fm, p);
3299 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3300 size_t psize = chunksize(p);
3301 mchunkptr next = chunk_plus_offset(p, psize);
3303 size_t prevsize = p->prev_foot;
3304 if (is_mmapped(p)) {
3305 psize += prevsize + MMAP_FOOT_PAD;
3306 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3307 fm->footprint -= psize;
3311 mchunkptr prev = chunk_minus_offset(p, prevsize);
3314 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3316 unlink_chunk(fm, p, prevsize);
3318 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3320 set_free_with_pinuse(p, psize, next);
3329 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3330 if (!cinuse(next)) { /* consolidate forward */
3331 if (next == fm->top) {
3332 size_t tsize = fm->topsize += psize;
3334 p->head = tsize | PINUSE_BIT;
3339 if (should_trim(fm, tsize))
3343 else if (next == fm->dv) {
3344 size_t dsize = fm->dvsize += psize;
3346 set_size_and_pinuse_of_free_chunk(p, dsize);
3350 size_t nsize = chunksize(next);
3352 unlink_chunk(fm, next, nsize);
3353 set_size_and_pinuse_of_free_chunk(p, psize);
3361 set_free_with_pinuse(p, psize, next);
3363 if (is_small(psize)) {
3364 insert_small_chunk(fm, p, psize);
3365 check_free_chunk(fm, p);
3368 tchunkptr tp = (tchunkptr)p;
3369 insert_large_chunk(fm, tp, psize);
3370 check_free_chunk(fm, p);
3371 if (--fm->release_checks == 0)
3372 release_unused_segments(fm);
3378 USAGE_ERROR_ACTION(fm, p);
3385 #endif /* FOOTERS */
3388 void* dlcalloc(size_t n_elements, size_t elem_size) {
3391 if (n_elements != 0) {
3392 req = n_elements * elem_size;
3393 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3394 (req / n_elements != elem_size))
3395 req = MAX_SIZE_T; /* force downstream failure on overflow */
3397 mem = dlmalloc(req);
3398 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3399 memset(mem, 0, req);
3403 #endif /* !ONLY_MSPACES */
3405 /* ------------ Internal support for realloc, memalign, etc -------------- */
3407 /* Try to realloc; only in-place unless can_move true */
3408 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3411 size_t oldsize = chunksize(p);
3412 mchunkptr next = chunk_plus_offset(p, oldsize);
3413 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3414 ok_next(p, next) && ok_pinuse(next))) {
3415 if (is_mmapped(p)) {
3416 newp = mmap_resize(m, p, nb, can_move);
3418 else if (oldsize >= nb) { /* already big enough */
3419 size_t rsize = oldsize - nb;
3420 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3421 mchunkptr r = chunk_plus_offset(p, nb);
3422 set_inuse(m, p, nb);
3423 set_inuse(m, r, rsize);
3424 dispose_chunk(m, r, rsize);
3428 else if (next == m->top) { /* extend into top */
3429 if (oldsize + m->topsize > nb) {
3430 size_t newsize = oldsize + m->topsize;
3431 size_t newtopsize = newsize - nb;
3432 mchunkptr newtop = chunk_plus_offset(p, nb);
3433 set_inuse(m, p, nb);
3434 newtop->head = newtopsize |PINUSE_BIT;
3436 m->topsize = newtopsize;
3440 else if (next == m->dv) { /* extend into dv */
3441 size_t dvs = m->dvsize;
3442 if (oldsize + dvs >= nb) {
3443 size_t dsize = oldsize + dvs - nb;
3444 if (dsize >= MIN_CHUNK_SIZE) {
3445 mchunkptr r = chunk_plus_offset(p, nb);
3446 mchunkptr n = chunk_plus_offset(r, dsize);
3447 set_inuse(m, p, nb);
3448 set_size_and_pinuse_of_free_chunk(r, dsize);
3453 else { /* exhaust dv */
3454 size_t newsize = oldsize + dvs;
3455 set_inuse(m, p, newsize);
3462 else if (!cinuse(next)) { /* extend into next free chunk */
3463 size_t nextsize = chunksize(next);
3464 if (oldsize + nextsize >= nb) {
3465 size_t rsize = oldsize + nextsize - nb;
3466 unlink_chunk(m, next, nextsize);
3467 if (rsize < MIN_CHUNK_SIZE) {
3468 size_t newsize = oldsize + nextsize;
3469 set_inuse(m, p, newsize);
3472 mchunkptr r = chunk_plus_offset(p, nb);
3473 set_inuse(m, p, nb);
3474 set_inuse(m, r, rsize);
3475 dispose_chunk(m, r, rsize);
3482 USAGE_ERROR_ACTION(m, chunk2mem(p));
3487 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3489 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3490 alignment = MIN_CHUNK_SIZE;
3491 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3492 size_t a = MALLOC_ALIGNMENT << 1;
3493 while (a < alignment) a <<= 1;
3496 if (bytes >= MAX_REQUEST - alignment) {
3497 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3498 MALLOC_FAILURE_ACTION;
3502 size_t nb = request2size(bytes);
3503 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3504 mem = internal_malloc(m, req);
3506 mchunkptr p = mem2chunk(mem);
3509 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3511 Find an aligned spot inside chunk. Since we need to give
3512 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3513 the first calculation places us at a spot with less than
3514 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3515 We've allocated enough total room so that this is always
3518 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3521 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3523 mchunkptr newp = (mchunkptr)pos;
3524 size_t leadsize = pos - (char*)(p);
3525 size_t newsize = chunksize(p) - leadsize;
3527 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3528 newp->prev_foot = p->prev_foot + leadsize;
3529 newp->head = newsize;
3531 else { /* Otherwise, give back leader, use the rest */
3532 set_inuse(m, newp, newsize);
3533 set_inuse(m, p, leadsize);
3534 dispose_chunk(m, p, leadsize);
3539 /* Give back spare room at the end */
3540 if (!is_mmapped(p)) {
3541 size_t size = chunksize(p);
3542 if (size > nb + MIN_CHUNK_SIZE) {
3543 size_t remainder_size = size - nb;
3544 mchunkptr remainder = chunk_plus_offset(p, nb);
3545 set_inuse(m, p, nb);
3546 set_inuse(m, remainder, remainder_size);
3547 dispose_chunk(m, remainder, remainder_size);
3552 assert (chunksize(p) >= nb);
3553 assert(((size_t)mem & (alignment - 1)) == 0);
3554 check_inuse_chunk(m, p);
3562 Common support for independent_X routines, handling
3563 all of the combinations that can result.
3565 bit 0 set if all elements are same size (using sizes[0])
3566 bit 1 set if elements should be zeroed
3568 static void** ialloc(mstate m,
3574 size_t element_size; /* chunksize of each element, if all same */
3575 size_t contents_size; /* total size of elements */
3576 size_t array_size; /* request size of pointer array */
3577 void* mem; /* malloced aggregate space */
3578 mchunkptr p; /* corresponding chunk */
3579 size_t remainder_size; /* remaining bytes while splitting */
3580 void** marray; /* either "chunks" or malloced ptr array */
3581 mchunkptr array_chunk; /* chunk for malloced ptr array */
3582 flag_t was_enabled; /* to disable mmap */
3586 ensure_initialization();
3587 /* compute array length, if needed */
3589 if (n_elements == 0)
3590 return chunks; /* nothing to do */
3595 /* if empty req, must still return chunk representing empty array */
3596 if (n_elements == 0)
3597 return (void**)internal_malloc(m, 0);
3599 array_size = request2size(n_elements * (sizeof(void*)));
3602 /* compute total element size */
3603 if (opts & 0x1) { /* all-same-size */
3604 element_size = request2size(*sizes);
3605 contents_size = n_elements * element_size;
3607 else { /* add up all the sizes */
3610 for (i = 0; i != n_elements; ++i)
3611 contents_size += request2size(sizes[i]);
3614 size = contents_size + array_size;
3617 Allocate the aggregate chunk. First disable direct-mmapping so
3618 malloc won't use it, since we would not be able to later
3619 free/realloc space internal to a segregated mmap region.
3621 was_enabled = use_mmap(m);
3623 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3629 if (PREACTION(m)) return 0;
3631 remainder_size = chunksize(p);
3633 assert(!is_mmapped(p));
3635 if (opts & 0x2) { /* optionally clear the elements */
3636 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3639 /* If not provided, allocate the pointer array as final part of chunk */
3641 size_t array_chunk_size;
3642 array_chunk = chunk_plus_offset(p, contents_size);
3643 array_chunk_size = remainder_size - contents_size;
3644 marray = (void**) (chunk2mem(array_chunk));
3645 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3646 remainder_size = contents_size;
3649 /* split out elements */
3650 for (i = 0; ; ++i) {
3651 marray[i] = chunk2mem(p);
3652 if (i != n_elements-1) {
3653 if (element_size != 0)
3654 size = element_size;
3656 size = request2size(sizes[i]);
3657 remainder_size -= size;
3658 set_size_and_pinuse_of_inuse_chunk(m, p, size);
3659 p = chunk_plus_offset(p, size);
3661 else { /* the final element absorbs any overallocation slop */
3662 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3668 if (marray != chunks) {
3669 /* final element must have exactly exhausted chunk */
3670 if (element_size != 0) {
3671 assert(remainder_size == element_size);
3674 assert(remainder_size == request2size(sizes[i]));
3676 check_inuse_chunk(m, mem2chunk(marray));
3678 for (i = 0; i != n_elements; ++i)
3679 check_inuse_chunk(m, mem2chunk(marray[i]));
3687 /* Try to free all pointers in the given array.
3688 Note: this could be made faster, by delaying consolidation,
3689 at the price of disabling some user integrity checks, We
3690 still optimize some consolidations by combining adjacent
3691 chunks before freeing, which will occur often if allocated
3692 with ialloc or the array is sorted.
3694 static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3696 if (!PREACTION(m)) {
3698 void** fence = &(array[nelem]);
3699 for (a = array; a != fence; ++a) {
3702 mchunkptr p = mem2chunk(mem);
3703 size_t psize = chunksize(p);
3705 if (get_mstate_for(p) != m) {
3710 check_inuse_chunk(m, p);
3712 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3713 void ** b = a + 1; /* try to merge with next chunk */
3714 mchunkptr next = next_chunk(p);
3715 if (b != fence && *b == chunk2mem(next)) {
3716 size_t newsize = chunksize(next) + psize;
3717 set_inuse(m, p, newsize);
3721 dispose_chunk(m, p, psize);
3724 CORRUPTION_ERROR_ACTION(m);
3729 if (should_trim(m, m->topsize))
3737 #if MALLOC_INSPECT_ALL
3738 static void internal_inspect_all(mstate m,
3739 void(*handler)(void *start,
3742 void* callback_arg),
3744 if (is_initialized(m)) {
3745 mchunkptr top = m->top;
3747 for (s = &m->seg; s != 0; s = s->next) {
3748 mchunkptr q = align_as_chunk(s->base);
3749 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3750 mchunkptr next = next_chunk(q);
3751 size_t sz = chunksize(q);
3755 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3756 start = chunk2mem(q);
3760 if (is_small(sz)) { /* offset by possible bookkeeping */
3761 start = (void*)((char*)q + sizeof(struct malloc_chunk));
3764 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3767 if (start < (void*)next) /* skip if all space is bookkeeping */
3768 handler(start, next, used, arg);
3776 #endif /* MALLOC_INSPECT_ALL */
3778 /* ------------------ Exported realloc, memalign, etc -------------------- */
3782 void* dlrealloc(void* oldmem, size_t bytes) {
3785 mem = dlmalloc(bytes);
3787 else if (bytes >= MAX_REQUEST) {
3788 MALLOC_FAILURE_ACTION;
3790 #ifdef REALLOC_ZERO_BYTES_FREES
3791 else if (bytes == 0) {
3794 #endif /* REALLOC_ZERO_BYTES_FREES */
3796 size_t nb = request2size(bytes);
3797 mchunkptr oldp = mem2chunk(oldmem);
3801 mstate m = get_mstate_for(oldp);
3803 USAGE_ERROR_ACTION(m, oldmem);
3806 #endif /* FOOTERS */
3807 if (!PREACTION(m)) {
3808 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3811 check_inuse_chunk(m, newp);
3812 mem = chunk2mem(newp);
3815 mem = internal_malloc(m, bytes);
3817 size_t oc = chunksize(oldp) - overhead_for(oldp);
3818 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3819 internal_free(m, oldmem);
3827 void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3830 if (bytes >= MAX_REQUEST) {
3831 MALLOC_FAILURE_ACTION;
3834 size_t nb = request2size(bytes);
3835 mchunkptr oldp = mem2chunk(oldmem);
3839 mstate m = get_mstate_for(oldp);
3841 USAGE_ERROR_ACTION(m, oldmem);
3844 #endif /* FOOTERS */
3845 if (!PREACTION(m)) {
3846 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3849 check_inuse_chunk(m, newp);
3858 void* dlmemalign(size_t alignment, size_t bytes) {
3859 if (alignment <= MALLOC_ALIGNMENT) {
3860 return dlmalloc(bytes);
3862 return internal_memalign(gm, alignment, bytes);
3865 int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3867 if (alignment == MALLOC_ALIGNMENT)
3868 mem = dlmalloc(bytes);
3870 size_t d = alignment / sizeof(void*);
3871 size_t r = alignment % sizeof(void*);
3872 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3874 else if (bytes <= MAX_REQUEST - alignment) {
3875 if (alignment < MIN_CHUNK_SIZE)
3876 alignment = MIN_CHUNK_SIZE;
3877 mem = internal_memalign(gm, alignment, bytes);
3888 void* dlvalloc(size_t bytes) {
3890 ensure_initialization();
3891 pagesz = mparams.page_size;
3892 return dlmemalign(pagesz, bytes);
3895 void* dlpvalloc(size_t bytes) {
3897 ensure_initialization();
3898 pagesz = mparams.page_size;
3899 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3902 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3904 size_t sz = elem_size; /* serves as 1-element array */
3905 return ialloc(gm, n_elements, &sz, 3, chunks);
3908 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3910 return ialloc(gm, n_elements, sizes, 0, chunks);
3913 size_t dlbulk_free(void* array[], size_t nelem) {
3914 return internal_bulk_free(gm, array, nelem);
3917 #if MALLOC_INSPECT_ALL
3918 void dlmalloc_inspect_all(void(*handler)(void *start,
3921 void* callback_arg),
3923 ensure_initialization();
3924 if (!PREACTION(gm)) {
3925 internal_inspect_all(gm, handler, arg);
3929 #endif /* MALLOC_INSPECT_ALL */
3931 int dlmalloc_trim(size_t pad) {
3933 ensure_initialization();
3934 if (!PREACTION(gm)) {
3935 result = sys_trim(gm, pad);
3941 size_t dlmalloc_footprint(void) {
3942 return gm->footprint;
3945 size_t dlmalloc_max_footprint(void) {
3946 return gm->max_footprint;
3949 size_t dlmalloc_footprint_limit(void) {
3950 size_t maf = gm->footprint_limit;
3951 return maf == 0 ? MAX_SIZE_T : maf;
3954 size_t dlmalloc_set_footprint_limit(size_t bytes) {
3955 size_t result; /* invert sense of 0 */
3957 result = granularity_align(1); /* Use minimal size */
3958 if (bytes == MAX_SIZE_T)
3959 result = 0; /* disable */
3961 result = granularity_align(bytes);
3962 return gm->footprint_limit = result;
3966 struct dlmallinfo dlmallinfo(void) {
3967 return internal_mallinfo(gm);
3969 #endif /* NO_MALLINFO */
3971 #if !NO_MALLOC_STATS
3972 void dlmalloc_stats() {
3973 internal_malloc_stats(gm);
3975 #endif /* NO_MALLOC_STATS */
3977 int dlmallopt(int param_number, int value) {
3978 return change_mparam(param_number, value);
3981 size_t dlmalloc_usable_size(void* mem) {
3983 mchunkptr p = mem2chunk(mem);
3985 return chunksize(p) - overhead_for(p);
3990 #endif /* !ONLY_MSPACES */
3992 /* ----------------------------- user mspaces ---------------------------- */
3996 static mstate init_user_mstate(char* tbase, size_t tsize) {
3997 size_t msize = pad_request(sizeof(struct malloc_state));
3999 mchunkptr msp = align_as_chunk(tbase);
4000 mstate m = (mstate)(chunk2mem(msp));
4001 memset(m, 0, msize);
4002 (void)INITIAL_LOCK(&m->mutex);
4003 msp->head = (msize|INUSE_BITS);
4004 m->seg.base = m->least_addr = tbase;
4005 m->seg.size = m->footprint = m->max_footprint = tsize;
4006 m->magic = mparams.magic;
4007 m->release_checks = MAX_RELEASE_CHECK_RATE;
4008 m->mflags = mparams.default_mflags;
4011 disable_contiguous(m);
4013 mn = next_chunk(mem2chunk(m));
4014 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4015 check_top_chunk(m, m->top);
4019 mspace create_mspace(size_t capacity, int locked) {
4022 ensure_initialization();
4023 msize = pad_request(sizeof(struct malloc_state));
4024 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4025 size_t rs = ((capacity == 0)? mparams.granularity :
4026 (capacity + TOP_FOOT_SIZE + msize));
4027 size_t tsize = granularity_align(rs);
4028 char* tbase = (char*)(CALL_MMAP(tsize));
4029 if (tbase != CMFAIL) {
4030 m = init_user_mstate(tbase, tsize);
4031 m->seg.sflags = USE_MMAP_BIT;
4032 set_lock(m, locked);
4038 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4041 ensure_initialization();
4042 msize = pad_request(sizeof(struct malloc_state));
4043 if (capacity > msize + TOP_FOOT_SIZE &&
4044 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4045 m = init_user_mstate((char*)base, capacity);
4046 m->seg.sflags = EXTERN_BIT;
4047 set_lock(m, locked);
4052 int mspace_track_large_chunks(mspace msp, int enable) {
4054 mstate ms = (mstate)msp;
4055 if (!PREACTION(ms)) {
4056 if (!use_mmap(ms)) {
4069 size_t destroy_mspace(mspace msp) {
4071 mstate ms = (mstate)msp;
4073 msegmentptr sp = &ms->seg;
4074 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4076 char* base = sp->base;
4077 size_t size = sp->size;
4078 flag_t flag = sp->sflags;
4079 (void)base; /* placate people compiling -Wunused-variable */
4081 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4082 CALL_MUNMAP(base, size) == 0)
4087 USAGE_ERROR_ACTION(ms,ms);
4092 void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
4098 this_seg = &ms->seg;
4100 *addrp = this_seg->base;
4101 *sizep = this_seg->size;
4104 int mspace_is_heap_object (mspace msp, void *p)
4112 this_seg = &ms->seg;
4117 base = this_seg->base;
4118 if (pp >= base && pp < (base + this_seg->size))
4120 this_seg = this_seg->next;
4123 if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
4129 void *mspace_least_addr (mspace msp)
4131 mstate ms = (mstate) msp;
4132 return (void *) ms->least_addr;
4135 void mspace_disable_expand (mspace msp)
4137 mstate ms = (mstate)msp;
4139 disable_expand (ms);
4142 int mspace_enable_disable_trace (mspace msp, int enable)
4144 mstate ms = (mstate)msp;
4145 int was_enabled = 0;
4155 return (was_enabled);
4158 int mspace_is_traced (mspace msp)
4160 mstate ms = (mstate)msp;
4167 void* mspace_get_aligned (mspace msp,
4168 unsigned long n_user_data_bytes,
4169 unsigned long align,
4170 unsigned long align_offset) {
4172 unsigned long searchp;
4173 unsigned *wwp; /* "where's Waldo" pointer */
4174 mstate ms = (mstate)msp;
4177 * Allocate space for the "Where's Waldo?" pointer
4178 * the base of the dlmalloc object
4180 n_user_data_bytes += sizeof(unsigned);
4183 * Alignment requests less than the size of an mmx vector are ignored
4185 if (align < sizeof (uword)) {
4186 rv = mspace_malloc (msp, n_user_data_bytes);
4190 if (use_trace(ms)) {
4191 mchunkptr p = mem2chunk(rv);
4192 size_t psize = chunksize(p);
4194 mheap_get_trace ((unsigned long)rv + sizeof (unsigned), psize);
4197 wwp = (unsigned *)rv;
4199 rv += sizeof (unsigned);
4205 * Alignment requests greater than 4K must be at offset zero,
4206 * and must be freed using mspace_free_no_offset - or never freed -
4207 * since the "Where's Waldo?" pointer would waste too much space.
4209 * Waldo is the address of the chunk of memory returned by mspace_malloc,
4210 * which we need later to call mspace_free...
4212 if (align > 4<<10 || align_offset == ~0UL) {
4213 n_user_data_bytes -= sizeof(unsigned);
4214 assert(align_offset == 0);
4215 rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
4217 /* Trace the allocation */
4218 if (rv && use_trace(ms)) {
4219 mchunkptr p = mem2chunk(rv);
4220 size_t psize = chunksize(p);
4221 mheap_get_trace ((unsigned long)rv, psize);
4226 align = clib_max (align, MALLOC_ALIGNMENT);
4227 align = max_pow2 (align);
4229 /* Correct align offset to be smaller than alignment. */
4230 align_offset &= (align - 1);
4232 n_user_data_bytes += align;
4233 rv = mspace_malloc (msp, n_user_data_bytes);
4238 /* Honor the alignment request */
4239 searchp = (unsigned long)(rv + sizeof (unsigned));
4241 #if 0 /* this is the idea... */
4242 while ((searchp + align_offset) % align)
4247 unsigned long where_now, delta;
4249 where_now = (searchp + align_offset) % align;
4250 delta = align - where_now;
4255 wwp = (unsigned *)(searchp - sizeof(unsigned));
4256 *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
4257 assert (*wwp < align);
4259 if (use_trace(ms)) {
4260 mchunkptr p = mem2chunk(rv);
4261 size_t psize = chunksize(p);
4262 mheap_get_trace (searchp, psize);
4264 return (void *) searchp;
4267 void mspace_put (mspace msp, void *p_arg)
4269 char *object_header;
4271 mstate ms = (mstate)msp;
4273 /* Find the object header delta */
4274 wwp = (unsigned *)p_arg;
4277 /* Recover the dlmalloc object pointer */
4278 object_header = (char *)wwp;
4279 object_header -= *wwp;
4281 /* Tracing (if enabled) */
4284 mchunkptr p = mem2chunk(object_header);
4285 size_t psize = chunksize(p);
4287 mheap_put_trace ((unsigned long)p_arg, psize);
4291 /* Poison the object */
4293 size_t psize = mspace_usable_size (object_header);
4294 memset (object_header, 0x13, psize);
4298 /* And free it... */
4299 mspace_free (msp, object_header);
4302 void mspace_put_no_offset (mspace msp, void *p_arg)
4304 mstate ms = (mstate)msp;
4308 mchunkptr p = mem2chunk(p_arg);
4309 size_t psize = chunksize(p);
4311 mheap_put_trace ((unsigned long)p_arg, psize);
4313 mspace_free (msp, p_arg);
4316 size_t mspace_usable_size_with_delta (const void *p)
4319 char *object_header;
4322 /* Find the object header delta */
4323 wwp = (unsigned *)p;
4326 /* Recover the dlmalloc object pointer */
4327 object_header = (char *)wwp;
4328 object_header -= *wwp;
4330 usable_size = mspace_usable_size (object_header);
4331 /* account for the offset and the size of the offset... */
4332 usable_size -= (*wwp + sizeof (*wwp));
4337 mspace versions of routines are near-clones of the global
4338 versions. This is not so nice but better than the alternatives.
4341 void* mspace_malloc(mspace msp, size_t bytes) {
4342 mstate ms = (mstate)msp;
4343 if (!ok_magic(ms)) {
4344 USAGE_ERROR_ACTION(ms,ms);
4347 if (!PREACTION(ms)) {
4350 if (bytes <= MAX_SMALL_REQUEST) {
4353 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4354 idx = small_index(nb);
4355 smallbits = ms->smallmap >> idx;
4357 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4359 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4360 b = smallbin_at(ms, idx);
4362 assert(chunksize(p) == small_index2size(idx));
4363 unlink_first_small_chunk(ms, b, p, idx);
4364 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4366 check_malloced_chunk(ms, mem, nb);
4370 else if (nb > ms->dvsize) {
4371 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4375 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4376 binmap_t leastbit = least_bit(leftbits);
4377 compute_bit2idx(leastbit, i);
4378 b = smallbin_at(ms, i);
4380 assert(chunksize(p) == small_index2size(i));
4381 unlink_first_small_chunk(ms, b, p, i);
4382 rsize = small_index2size(i) - nb;
4383 /* Fit here cannot be remainderless if 4byte sizes */
4384 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4385 set_inuse_and_pinuse(ms, p, small_index2size(i));
4387 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4388 r = chunk_plus_offset(p, nb);
4389 set_size_and_pinuse_of_free_chunk(r, rsize);
4390 replace_dv(ms, r, rsize);
4393 check_malloced_chunk(ms, mem, nb);
4397 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4398 check_malloced_chunk(ms, mem, nb);
4403 else if (bytes >= MAX_REQUEST)
4404 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4406 nb = pad_request(bytes);
4407 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4408 check_malloced_chunk(ms, mem, nb);
4413 if (nb <= ms->dvsize) {
4414 size_t rsize = ms->dvsize - nb;
4415 mchunkptr p = ms->dv;
4416 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4417 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4419 set_size_and_pinuse_of_free_chunk(r, rsize);
4420 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4422 else { /* exhaust dv */
4423 size_t dvs = ms->dvsize;
4426 set_inuse_and_pinuse(ms, p, dvs);
4429 check_malloced_chunk(ms, mem, nb);
4433 else if (nb < ms->topsize) { /* Split top */
4434 size_t rsize = ms->topsize -= nb;
4435 mchunkptr p = ms->top;
4436 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4437 r->head = rsize | PINUSE_BIT;
4438 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4440 check_top_chunk(ms, ms->top);
4441 check_malloced_chunk(ms, mem, nb);
4445 mem = sys_alloc(ms, nb);
4455 void mspace_free(mspace msp, void* mem) {
4457 mchunkptr p = mem2chunk(mem);
4459 mstate fm = get_mstate_for(p);
4460 (void)msp; /* placate people compiling -Wunused */
4462 mstate fm = (mstate)msp;
4463 #endif /* FOOTERS */
4464 if (!ok_magic(fm)) {
4465 USAGE_ERROR_ACTION(fm, p);
4468 if (!PREACTION(fm)) {
4469 check_inuse_chunk(fm, p);
4470 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4471 size_t psize = chunksize(p);
4472 mchunkptr next = chunk_plus_offset(p, psize);
4474 size_t prevsize = p->prev_foot;
4475 if (is_mmapped(p)) {
4476 psize += prevsize + MMAP_FOOT_PAD;
4477 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4478 fm->footprint -= psize;
4482 mchunkptr prev = chunk_minus_offset(p, prevsize);
4485 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4487 unlink_chunk(fm, p, prevsize);
4489 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4491 set_free_with_pinuse(p, psize, next);
4500 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4501 if (!cinuse(next)) { /* consolidate forward */
4502 if (next == fm->top) {
4503 size_t tsize = fm->topsize += psize;
4505 p->head = tsize | PINUSE_BIT;
4510 if (should_trim(fm, tsize))
4514 else if (next == fm->dv) {
4515 size_t dsize = fm->dvsize += psize;
4517 set_size_and_pinuse_of_free_chunk(p, dsize);
4521 size_t nsize = chunksize(next);
4523 unlink_chunk(fm, next, nsize);
4524 set_size_and_pinuse_of_free_chunk(p, psize);
4532 set_free_with_pinuse(p, psize, next);
4534 if (is_small(psize)) {
4535 insert_small_chunk(fm, p, psize);
4536 check_free_chunk(fm, p);
4539 tchunkptr tp = (tchunkptr)p;
4540 insert_large_chunk(fm, tp, psize);
4541 check_free_chunk(fm, p);
4542 if (--fm->release_checks == 0)
4543 release_unused_segments(fm);
4549 USAGE_ERROR_ACTION(fm, p);
4556 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4559 mstate ms = (mstate)msp;
4560 if (!ok_magic(ms)) {
4561 USAGE_ERROR_ACTION(ms,ms);
4564 if (n_elements != 0) {
4565 req = n_elements * elem_size;
4566 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4567 (req / n_elements != elem_size))
4568 req = MAX_SIZE_T; /* force downstream failure on overflow */
4570 mem = internal_malloc(ms, req);
4571 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4572 memset(mem, 0, req);
4576 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4579 mem = mspace_malloc(msp, bytes);
4581 else if (bytes >= MAX_REQUEST) {
4582 MALLOC_FAILURE_ACTION;
4584 #ifdef REALLOC_ZERO_BYTES_FREES
4585 else if (bytes == 0) {
4586 mspace_free(msp, oldmem);
4588 #endif /* REALLOC_ZERO_BYTES_FREES */
4590 size_t nb = request2size(bytes);
4591 mchunkptr oldp = mem2chunk(oldmem);
4593 mstate m = (mstate)msp;
4595 mstate m = get_mstate_for(oldp);
4597 USAGE_ERROR_ACTION(m, oldmem);
4600 #endif /* FOOTERS */
4601 if (!PREACTION(m)) {
4602 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4605 check_inuse_chunk(m, newp);
4606 mem = chunk2mem(newp);
4609 mem = mspace_malloc(m, bytes);
4611 size_t oc = chunksize(oldp) - overhead_for(oldp);
4612 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4613 mspace_free(m, oldmem);
4621 void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4624 if (bytes >= MAX_REQUEST) {
4625 MALLOC_FAILURE_ACTION;
4628 size_t nb = request2size(bytes);
4629 mchunkptr oldp = mem2chunk(oldmem);
4631 mstate m = (mstate)msp;
4633 mstate m = get_mstate_for(oldp);
4634 (void)msp; /* placate people compiling -Wunused */
4636 USAGE_ERROR_ACTION(m, oldmem);
4639 #endif /* FOOTERS */
4640 if (!PREACTION(m)) {
4641 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4644 check_inuse_chunk(m, newp);
4653 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4654 mstate ms = (mstate)msp;
4655 if (!ok_magic(ms)) {
4656 USAGE_ERROR_ACTION(ms,ms);
4659 if (alignment <= MALLOC_ALIGNMENT)
4660 return mspace_malloc(msp, bytes);
4661 return internal_memalign(ms, alignment, bytes);
4664 void** mspace_independent_calloc(mspace msp, size_t n_elements,
4665 size_t elem_size, void* chunks[]) {
4666 size_t sz = elem_size; /* serves as 1-element array */
4667 mstate ms = (mstate)msp;
4668 if (!ok_magic(ms)) {
4669 USAGE_ERROR_ACTION(ms,ms);
4672 return ialloc(ms, n_elements, &sz, 3, chunks);
4675 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4676 size_t sizes[], void* chunks[]) {
4677 mstate ms = (mstate)msp;
4678 if (!ok_magic(ms)) {
4679 USAGE_ERROR_ACTION(ms,ms);
4682 return ialloc(ms, n_elements, sizes, 0, chunks);
4685 size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4686 return internal_bulk_free((mstate)msp, array, nelem);
4689 #if MALLOC_INSPECT_ALL
4690 void mspace_inspect_all(mspace msp,
4691 void(*handler)(void *start,
4694 void* callback_arg),
4696 mstate ms = (mstate)msp;
4698 if (!PREACTION(ms)) {
4699 internal_inspect_all(ms, handler, arg);
4704 USAGE_ERROR_ACTION(ms,ms);
4707 #endif /* MALLOC_INSPECT_ALL */
4709 int mspace_trim(mspace msp, size_t pad) {
4711 mstate ms = (mstate)msp;
4713 if (!PREACTION(ms)) {
4714 result = sys_trim(ms, pad);
4719 USAGE_ERROR_ACTION(ms,ms);
4724 #if !NO_MALLOC_STATS
4725 void mspace_malloc_stats(mspace msp) {
4726 mstate ms = (mstate)msp;
4728 internal_malloc_stats(ms);
4731 USAGE_ERROR_ACTION(ms,ms);
4734 #endif /* NO_MALLOC_STATS */
4736 size_t mspace_footprint(mspace msp) {
4738 mstate ms = (mstate)msp;
4740 result = ms->footprint;
4743 USAGE_ERROR_ACTION(ms,ms);
4748 size_t mspace_max_footprint(mspace msp) {
4750 mstate ms = (mstate)msp;
4752 result = ms->max_footprint;
4755 USAGE_ERROR_ACTION(ms,ms);
4760 size_t mspace_footprint_limit(mspace msp) {
4762 mstate ms = (mstate)msp;
4764 size_t maf = ms->footprint_limit;
4765 result = (maf == 0) ? MAX_SIZE_T : maf;
4768 USAGE_ERROR_ACTION(ms,ms);
4773 size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4775 mstate ms = (mstate)msp;
4778 result = granularity_align(1); /* Use minimal size */
4779 if (bytes == MAX_SIZE_T)
4780 result = 0; /* disable */
4782 result = granularity_align(bytes);
4783 ms->footprint_limit = result;
4786 USAGE_ERROR_ACTION(ms,ms);
4792 struct dlmallinfo mspace_mallinfo(mspace msp) {
4793 mstate ms = (mstate)msp;
4794 if (!ok_magic(ms)) {
4795 USAGE_ERROR_ACTION(ms,ms);
4797 return internal_mallinfo(ms);
4799 #endif /* NO_MALLINFO */
4801 size_t mspace_usable_size(const void* mem) {
4803 mchunkptr p = mem2chunk(mem);
4805 return chunksize(p) - overhead_for(p);
4810 int mspace_mallopt(int param_number, int value) {
4811 return change_mparam(param_number, value);
4814 #endif /* MSPACES */
4817 /* -------------------- Alternative MORECORE functions ------------------- */
4820 Guidelines for creating a custom version of MORECORE:
4822 * For best performance, MORECORE should allocate in multiples of pagesize.
4823 * MORECORE may allocate more memory than requested. (Or even less,
4824 but this will usually result in a malloc failure.)
4825 * MORECORE must not allocate memory when given argument zero, but
4826 instead return one past the end address of memory from previous
4828 * For best performance, consecutive calls to MORECORE with positive
4829 arguments should return increasing addresses, indicating that
4830 space has been contiguously extended.
4831 * Even though consecutive calls to MORECORE need not return contiguous
4832 addresses, it must be OK for malloc'ed chunks to span multiple
4833 regions in those cases where they do happen to be contiguous.
4834 * MORECORE need not handle negative arguments -- it may instead
4835 just return MFAIL when given negative arguments.
4836 Negative arguments are always multiples of pagesize. MORECORE
4837 must not misinterpret negative args as large positive unsigned
4838 args. You can suppress all such calls from even occurring by defining
4839 MORECORE_CANNOT_TRIM,
4841 As an example alternative MORECORE, here is a custom allocator
4842 kindly contributed for pre-OSX macOS. It uses virtually but not
4843 necessarily physically contiguous non-paged memory (locked in,
4844 present and won't get swapped out). You can use it by uncommenting
4845 this section, adding some #includes, and setting up the appropriate
4848 #define MORECORE osMoreCore
4850 There is also a shutdown routine that should somehow be called for
4851 cleanup upon program exit.
4853 #define MAX_POOL_ENTRIES 100
4854 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4855 static int next_os_pool;
4856 void *our_os_pools[MAX_POOL_ENTRIES];
4858 void *osMoreCore(int size)
4861 static void *sbrk_top = 0;
4865 if (size < MINIMUM_MORECORE_SIZE)
4866 size = MINIMUM_MORECORE_SIZE;
4867 if (CurrentExecutionLevel() == kTaskLevel)
4868 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4871 return (void *) MFAIL;
4873 // save ptrs so they can be freed during cleanup
4874 our_os_pools[next_os_pool] = ptr;
4876 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4877 sbrk_top = (char *) ptr + size;
4882 // we don't currently support shrink behavior
4883 return (void *) MFAIL;
4891 // cleanup any allocated memory pools
4892 // called as last thing before shutting down driver
4894 void osCleanupMem(void)
4898 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4901 PoolDeallocate(*ptr);
4909 /* -----------------------------------------------------------------------
4911 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4912 * fix bad comparison in dlposix_memalign
4913 * don't reuse adjusted asize in sys_alloc
4914 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4915 * reduce compiler warnings -- thanks to all who reported/suggested these
4917 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4918 * Always perform unlink checks unless INSECURE
4919 * Add posix_memalign.
4920 * Improve realloc to expand in more cases; expose realloc_in_place.
4921 Thanks to Peter Buhr for the suggestion.
4922 * Add footprint_limit, inspect_all, bulk_free. Thanks
4923 to Barry Hayes and others for the suggestions.
4924 * Internal refactorings to avoid calls while holding locks
4925 * Use non-reentrant locks by default. Thanks to Roland McGrath
4927 * Small fixes to mspace_destroy, reset_on_error.
4928 * Various configuration extensions/changes. Thanks
4929 to all who contributed these.
4931 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4932 * Update Creative Commons URL
4934 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4935 * Use zeros instead of prev foot for is_mmapped
4936 * Add mspace_track_large_chunks; thanks to Jean Brouwers
4937 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4938 * Fix insufficient sys_alloc padding when using 16byte alignment
4939 * Fix bad error check in mspace_footprint
4940 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4941 * Reentrant spin locks; thanks to Earl Chew and others
4942 * Win32 improvements; thanks to Niall Douglas and Earl Chew
4943 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4944 * Extension hook in malloc_state
4945 * Various small adjustments to reduce warnings on some compilers
4946 * Various configuration extensions/changes for more platforms. Thanks
4947 to all who contributed these.
4949 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4950 * Add max_footprint functions
4951 * Ensure all appropriate literals are size_t
4952 * Fix conditional compilation problem for some #define settings
4953 * Avoid concatenating segments with the one provided
4954 in create_mspace_with_base
4955 * Rename some variables to avoid compiler shadowing warnings
4956 * Use explicit lock initialization.
4957 * Better handling of sbrk interference.
4958 * Simplify and fix segment insertion, trimming and mspace_destroy
4959 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4960 * Thanks especially to Dennis Flanagan for help on these.
4962 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4963 * Fix memalign brace error.
4965 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4966 * Fix improper #endif nesting in C++
4967 * Add explicit casts needed for C++
4969 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4970 * Use trees for large bins
4972 * Use segments to unify sbrk-based and mmap-based system allocation,
4973 removing need for emulation on most platforms without sbrk.
4974 * Default safety checks
4975 * Optional footer checks. Thanks to William Robertson for the idea.
4976 * Internal code refactoring
4977 * Incorporate suggestions and platform-specific changes.
4978 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4979 Aaron Bachmann, Emery Berger, and others.
4980 * Speed up non-fastbin processing enough to remove fastbins.
4981 * Remove useless cfree() to avoid conflicts with other apps.
4982 * Remove internal memcpy, memset. Compilers handle builtins better.
4983 * Remove some options that no one ever used and rename others.
4985 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4986 * Fix malloc_state bitmap array misdeclaration
4988 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4989 * Allow tuning of FIRST_SORTED_BIN_SIZE
4990 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4991 * Better detection and support for non-contiguousness of MORECORE.
4992 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
4993 * Bypass most of malloc if no frees. Thanks To Emery Berger.
4994 * Fix freeing of old top non-contiguous chunk im sysmalloc.
4995 * Raised default trim and map thresholds to 256K.
4996 * Fix mmap-related #defines. Thanks to Lubos Lunak.
4997 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
4998 * Branch-free bin calculation
4999 * Default trim and mmap thresholds now 256K.
5001 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5002 * Introduce independent_comalloc and independent_calloc.
5003 Thanks to Michael Pachos for motivation and help.
5004 * Make optional .h file available
5005 * Allow > 2GB requests on 32bit systems.
5006 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5007 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5009 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5011 * memalign: check alignment arg
5012 * realloc: don't try to shift chunks backwards, since this
5013 leads to more fragmentation in some programs and doesn't
5014 seem to help in any others.
5015 * Collect all cases in malloc requiring system memory into sysmalloc
5016 * Use mmap as backup to sbrk
5017 * Place all internal state in malloc_state
5018 * Introduce fastbins (although similar to 2.5.1)
5019 * Many minor tunings and cosmetic improvements
5020 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5021 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5022 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5023 * Include errno.h to support default failure action.
5025 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5026 * return null for negative arguments
5027 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5028 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5029 (e.g. WIN32 platforms)
5030 * Cleanup header file inclusion for WIN32 platforms
5031 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5032 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5033 memory allocation routines
5034 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5035 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5036 usage of 'assert' in non-WIN32 code
5037 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5039 * Always call 'fREe()' rather than 'free()'
5041 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5042 * Fixed ordering problem with boundary-stamping
5044 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5045 * Added pvalloc, as recommended by H.J. Liu
5046 * Added 64bit pointer support mainly from Wolfram Gloger
5047 * Added anonymously donated WIN32 sbrk emulation
5048 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5049 * malloc_extend_top: fix mask error that caused wastage after
5051 * Add linux mremap support code from HJ Liu
5053 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5054 * Integrated most documentation with the code.
5055 * Add support for mmap, with help from
5056 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5057 * Use last_remainder in more cases.
5058 * Pack bins using idea from colin@nyx10.cs.du.edu
5059 * Use ordered bins instead of best-fit threshold
5060 * Eliminate block-local decls to simplify tracing and debugging.
5061 * Support another case of realloc via move into top
5062 * Fix error occurring when initial sbrk_base not word-aligned.
5063 * Rely on page size for units instead of SBRK_UNIT to
5064 avoid surprises about sbrk alignment conventions.
5065 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5066 (raymond@es.ele.tue.nl) for the suggestion.
5067 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5068 * More precautions for cases where other routines call sbrk,
5069 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5070 * Added macros etc., allowing use in linux libc from
5071 H.J. Lu (hjl@gnu.ai.mit.edu)
5072 * Inverted this history list
5074 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5075 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5076 * Removed all preallocation code since under current scheme
5077 the work required to undo bad preallocations exceeds
5078 the work saved in good cases for most test programs.
5079 * No longer use return list or unconsolidated bins since
5080 no scheme using them consistently outperforms those that don't
5081 given above changes.
5082 * Use best fit for very large chunks to prevent some worst-cases.
5083 * Added some support for debugging
5085 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5086 * Removed footers when chunks are in use. Thanks to
5087 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5089 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5090 * Added malloc_trim, with help from Wolfram Gloger
5091 (wmglo@Dent.MED.Uni-Muenchen.DE).
5093 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5095 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5096 * realloc: try to expand in both directions
5097 * malloc: swap order of clean-bin strategy;
5098 * realloc: only conditionally expand backwards
5099 * Try not to scavenge used bins
5100 * Use bin counts as a guide to preallocation
5101 * Occasionally bin return list chunks in first scan
5102 * Add a few optimizations from colin@nyx10.cs.du.edu
5104 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5105 * faster bin computation & slightly different binning
5106 * merged all consolidations to one part of malloc proper
5107 (eliminating old malloc_find_space & malloc_clean_bin)
5108 * Scan 2 returns chunks (not just 1)
5109 * Propagate failure in realloc if malloc returns 0
5110 * Add stuff to allow compilation on non-ANSI compilers
5111 from kpv@research.att.com
5113 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5114 * removed potential for odd address access in prev_chunk
5115 * removed dependency on getpagesize.h
5116 * misc cosmetics and a bit more internal documentation
5117 * anticosmetics: mangled names in macros to evade debugger strangeness
5118 * tested on sparc, hp-700, dec-mips, rs6000
5119 with gcc & native cc (hp, dec only) allowing
5120 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5122 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5123 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5124 structure of old version, but most details differ.)