487 #ifndef DLMALLOC_VERSION
488 #define DLMALLOC_VERSION 20804
496 #define LACKS_FCNTL_H
501 #define WIN32_LEAN_AND_MEAN
504 #define HAVE_MORECORE 0
505 #define LACKS_UNISTD_H
506 #define LACKS_SYS_PARAM_H
507 #define LACKS_SYS_MMAN_H
508 #define LACKS_STRING_H
509 #define LACKS_STRINGS_H
510 #define LACKS_SYS_TYPES_H
511 #define LACKS_ERRNO_H
512 #ifndef MALLOC_FAILURE_ACTION
513 #define MALLOC_FAILURE_ACTION
516 #define MMAP_CLEARS 0
518 #define MMAP_CLEARS 1
522 #if defined(DARWIN) || defined(_DARWIN)
524 #ifndef HAVE_MORECORE
525 #define HAVE_MORECORE 0
528 #ifndef MALLOC_ALIGNMENT
529 #define MALLOC_ALIGNMENT ((size_t)16U)
534 #ifndef LACKS_SYS_TYPES_H
535 #include <sys/types.h>
538 #if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
539 #define SPIN_LOCKS_AVAILABLE 1
541 #define SPIN_LOCKS_AVAILABLE 0
545 #define MAX_SIZE_T (~(size_t)0)
548 #define ONLY_MSPACES 0
550 #define ONLY_MSPACES 1
559 #ifndef MALLOC_ALIGNMENT
560 #define MALLOC_ALIGNMENT ((size_t)8U)
566 #define ABORT abort()
568 #ifndef ABORT_ON_ASSERT_FAILURE
569 #define ABORT_ON_ASSERT_FAILURE 1
571 #ifndef PROCEED_ON_ERROR
572 #define PROCEED_ON_ERROR 0
577 #ifndef USE_SPIN_LOCKS
578 #if USE_LOCKS && SPIN_LOCKS_AVAILABLE
579 #define USE_SPIN_LOCKS 1
581 #define USE_SPIN_LOCKS 0
591 #define MMAP_CLEARS 1
595 #define HAVE_MREMAP 1
597 #define HAVE_MREMAP 0
600 #ifndef MALLOC_FAILURE_ACTION
601 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
603 #ifndef HAVE_MORECORE
605 #define HAVE_MORECORE 0
607 #define HAVE_MORECORE 1
611 #define MORECORE_CONTIGUOUS 0
613 #define MORECORE_DEFAULT sbrk
614 #ifndef MORECORE_CONTIGUOUS
615 #define MORECORE_CONTIGUOUS 1
618 #ifndef DEFAULT_GRANULARITY
619 #if (MORECORE_CONTIGUOUS || defined(WIN32))
620 #define DEFAULT_GRANULARITY (0)
622 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
625 #ifndef DEFAULT_TRIM_THRESHOLD
626 #ifndef MORECORE_CANNOT_TRIM
627 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
629 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
632 #ifndef DEFAULT_MMAP_THRESHOLD
634 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
636 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
639 #ifndef MAX_RELEASE_CHECK_RATE
641 #define MAX_RELEASE_CHECK_RATE 4095
643 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
646 #ifndef USE_BUILTIN_FFS
647 #define USE_BUILTIN_FFS 0
649 #ifndef USE_DEV_RANDOM
650 #define USE_DEV_RANDOM 0
653 #define NO_MALLINFO 0
655 #ifndef MALLINFO_FIELD_TYPE
656 #define MALLINFO_FIELD_TYPE size_t
658 #ifndef NO_SEGMENT_TRAVERSAL
659 #define NO_SEGMENT_TRAVERSAL 0
669 #define M_TRIM_THRESHOLD (-1)
670 #define M_GRANULARITY (-2)
671 #define M_MMAP_THRESHOLD (-3)
702 #ifdef HAVE_USR_INCLUDE_MALLOC_H
703 #include "/usr/include/malloc.h"
705 #ifndef STRUCT_MALLINFO_DECLARED
706 #define STRUCT_MALLINFO_DECLARED 1
729 #if defined(__GNUC__)
730 #define FORCEINLINE __inline __attribute__ ((always_inline))
731 #elif defined(_MSC_VER)
732 #define FORCEINLINE __forceinline
736 #if defined(__GNUC__)
737 #define NOINLINE __attribute__ ((noinline))
738 #elif defined(_MSC_VER)
739 #define NOINLINE __declspec(noinline)
748 #define FORCEINLINE inline
759 #ifndef USE_DL_PREFIX
760 #define dlcalloc mycalloc
761 #define dlfree myfree
762 #define dlmalloc mymalloc
763 #define dlmemalign mymemalign
764 #define dlrealloc myrealloc
765 #define dlvalloc myvalloc
766 #define dlpvalloc mypvalloc
767 #define dlmallinfo mymallinfo
768 #define dlmallopt mymallopt
769 #define dlmalloc_trim mymalloc_trim
770 #define dlmalloc_stats mymalloc_stats
771 #define dlmalloc_usable_size mymalloc_usable_size
772 #define dlmalloc_footprint mymalloc_footprint
773 #define dlmalloc_max_footprint mymalloc_max_footprint
774 #define dlindependent_calloc myindependent_calloc
775 #define dlindependent_comalloc myindependent_comalloc
1202 void*
mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1215 size_t elem_size,
void* chunks[]);
1222 size_t sizes[],
void* chunks[]);
1286 #pragma warning( disable : 4146 )
1291 #ifndef LACKS_ERRNO_H
1297 #ifndef LACKS_STDLIB_H
1301 #if ABORT_ON_ASSERT_FAILURE
1303 #define assert(x) if(!(x)) ABORT
1313 #ifndef LACKS_STRING_H
1317 #ifndef LACKS_STRINGS_H
1318 #include <strings.h>
1322 #ifndef LACKS_SYS_MMAN_H
1324 #if (defined(linux) && !defined(__USE_GNU))
1326 #include <sys/mman.h>
1329 #include <sys/mman.h>
1332 #ifndef LACKS_FCNTL_H
1336 #ifndef LACKS_UNISTD_H
1339 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1340 extern void* sbrk(ptrdiff_t);
1347 #include <pthread.h>
1348 #if defined (__SVR4) && defined (__sun)
1357 LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
1358 LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
1363 #pragma intrinsic (_InterlockedCompareExchange)
1364 #pragma intrinsic (_InterlockedExchange)
1365 #define interlockedcompareexchange _InterlockedCompareExchange
1366 #define interlockedexchange _InterlockedExchange
1371 #if defined(_MSC_VER) && _MSC_VER>=1300
1372 #ifndef BitScanForward
1376 unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
1377 unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
1382 #define BitScanForward _BitScanForward
1383 #define BitScanReverse _BitScanReverse
1384 #pragma intrinsic(_BitScanForward)
1385 #pragma intrinsic(_BitScanReverse)
1390 #ifndef malloc_getpagesize
1391 # ifdef _SC_PAGESIZE
1392 # ifndef _SC_PAGE_SIZE
1393 # define _SC_PAGE_SIZE _SC_PAGESIZE
1396 # ifdef _SC_PAGE_SIZE
1397 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1399 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1400 extern size_t getpagesize();
1401 # define malloc_getpagesize getpagesize()
1404 # define malloc_getpagesize getpagesize()
1406 # ifndef LACKS_SYS_PARAM_H
1407 # include <sys/param.h>
1409 # ifdef EXEC_PAGESIZE
1410 # define malloc_getpagesize EXEC_PAGESIZE
1414 # define malloc_getpagesize NBPG
1416 # define malloc_getpagesize (NBPG * CLSIZE)
1420 # define malloc_getpagesize NBPC
1423 # define malloc_getpagesize PAGESIZE
1425 # define malloc_getpagesize ((size_t)4096U)
1441 #define SIZE_T_SIZE (sizeof(size_t))
1442 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1446 #define SIZE_T_ZERO ((size_t)0)
1447 #define SIZE_T_ONE ((size_t)1)
1448 #define SIZE_T_TWO ((size_t)2)
1449 #define SIZE_T_FOUR ((size_t)4)
1450 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1451 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1452 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1453 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1456 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1459 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1462 #define align_offset(A)\
1463 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1464 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1476 #define MFAIL ((void*)(MAX_SIZE_T))
1477 #define CMFAIL ((char*)(MFAIL))
1482 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
1483 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1484 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1485 #define MAP_ANONYMOUS MAP_ANON
1487 #ifdef MAP_ANONYMOUS
1488 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1489 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1495 #define MMAP_FLAGS (MAP_PRIVATE)
1497 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
1498 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1499 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1500 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1503 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1509 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1510 return (ptr != 0)? ptr:
MFAIL;
1514 static FORCEINLINE void* win32direct_mmap(
size_t size) {
1515 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1517 return (ptr != 0)? ptr:
MFAIL;
1521 static FORCEINLINE int win32munmap(
void* ptr,
size_t size) {
1522 MEMORY_BASIC_INFORMATION minfo;
1523 char* cptr = (
char*)ptr;
1525 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1527 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1528 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1530 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1532 cptr += minfo.RegionSize;
1533 size -= minfo.RegionSize;
1538 #define MMAP_DEFAULT(s) win32mmap(s)
1539 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
1540 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
1546 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1556 #define CALL_MORECORE(S) MORECORE(S)
1558 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
1561 #define CALL_MORECORE(S) MFAIL
1568 #define USE_MMAP_BIT (SIZE_T_ONE)
1571 #define CALL_MMAP(s) MMAP(s)
1573 #define CALL_MMAP(s) MMAP_DEFAULT(s)
1576 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1578 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
1581 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1583 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1586 #define USE_MMAP_BIT (SIZE_T_ZERO)
1588 #define MMAP(s) MFAIL
1589 #define MUNMAP(a, s) (-1)
1590 #define DIRECT_MMAP(s) MFAIL
1591 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1592 #define CALL_MMAP(s) MMAP(s)
1593 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1599 #if HAVE_MMAP && HAVE_MREMAP
1601 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1603 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1606 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1610 #define USE_NONCONTIGUOUS_BIT (4U)
1613 #define EXTERN_BIT (8U)
1650 #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
1654 struct pthread_mlock_t {
1655 volatile unsigned int l;
1659 #define MLOCK_T struct pthread_mlock_t
1660 #define CURRENT_THREAD pthread_self()
1661 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
1662 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
1663 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
1664 #define TRY_LOCK(sl) pthread_try_lock(sl)
1665 #define SPINS_PER_YIELD 63
1667 static G4ThreadLocal MLOCK_T malloc_global_mutex = { 0, 0, 0};
1669 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1671 volatile unsigned int* lp = &sl->l;
1674 if (sl->threadid == CURRENT_THREAD) {
1684 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
1686 :
"r" (val),
"m" (*(lp)),
"0"(cmp)
1690 sl->threadid = CURRENT_THREAD;
1695 if ((++spins & SPINS_PER_YIELD) == 0) {
1696 #if defined (__SVR4) && defined (__sun)
1699 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
1709 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1710 volatile unsigned int* lp = &sl->l;
1712 assert(sl->threadid == CURRENT_THREAD);
1717 __asm__ __volatile__ (
"lock; xchgl %0, %1"
1719 :
"m" (*(lp)),
"0"(prev)
1724 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1725 volatile unsigned int* lp = &sl->l;
1727 if (sl->threadid == CURRENT_THREAD) {
1736 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
1738 :
"r" (val),
"m" (*(lp)),
"0"(cmp)
1742 sl->threadid = CURRENT_THREAD;
1753 struct win32_mlock_t {
1759 #define MLOCK_T struct win32_mlock_t
1760 #define CURRENT_THREAD GetCurrentThreadId()
1761 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
1762 #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
1763 #define RELEASE_LOCK(sl) win32_release_lock(sl)
1764 #define TRY_LOCK(sl) win32_try_lock(sl)
1765 #define SPINS_PER_YIELD 63
1767 static G4ThreadLocal MLOCK_T malloc_global_mutex = { 0, 0, 0};
1769 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
1773 if (sl->threadid == CURRENT_THREAD) {
1779 if (!interlockedexchange(&sl->l, 1)) {
1781 sl->threadid = CURRENT_THREAD;
1786 if ((++spins & SPINS_PER_YIELD) == 0)
1791 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
1792 assert(sl->threadid == CURRENT_THREAD);
1796 interlockedexchange (&sl->l, 0);
1800 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
1802 if (sl->threadid == CURRENT_THREAD) {
1808 if (!interlockedexchange(&sl->l, 1)){
1810 sl->threadid = CURRENT_THREAD;
1824 #define MLOCK_T pthread_mutex_t
1825 #define CURRENT_THREAD pthread_self()
1826 #define INITIAL_LOCK(sl) pthread_init_lock(sl)
1827 #define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl)
1828 #define RELEASE_LOCK(sl) pthread_mutex_unlock(sl)
1829 #define TRY_LOCK(sl) (!pthread_mutex_trylock(sl))
1831 static G4ThreadLocal MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
1836 #ifndef PTHREAD_MUTEX_RECURSIVE
1837 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
1839 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
1840 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
1844 static int pthread_init_lock (MLOCK_T *sl) {
1845 pthread_mutexattr_t attr;
1846 if (pthread_mutexattr_init(&attr))
return 1;
1847 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
1848 if (pthread_mutex_init(sl, &attr))
return 1;
1849 if (pthread_mutexattr_destroy(&attr))
return 1;
1855 #define MLOCK_T CRITICAL_SECTION
1856 #define CURRENT_THREAD GetCurrentThreadId()
1857 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
1858 #define ACQUIRE_LOCK(s) (EnterCriticalSection(sl), 0)
1859 #define RELEASE_LOCK(s) LeaveCriticalSection(sl)
1860 #define TRY_LOCK(s) TryEnterCriticalSection(sl)
1861 #define NEED_GLOBAL_LOCK_INIT
1864 static G4ThreadLocal volatile long malloc_global_mutex_status;
1867 static void init_malloc_global_mutex() {
1869 long stat = malloc_global_mutex_status;
1874 interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) {
1875 InitializeCriticalSection(&malloc_global_mutex);
1876 interlockedexchange(&malloc_global_mutex_status,1);
1901 #define USE_LOCK_BIT (2U)
1903 #define USE_LOCK_BIT (0U)
1904 #define INITIAL_LOCK(l)
1908 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
1909 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
1911 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
1912 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
1915 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
1916 #define RELEASE_MALLOC_GLOBAL_LOCK()
2073 #define MCHUNK_SIZE (sizeof(mchunk))
2076 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2078 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
2082 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2084 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
2087 #define MIN_CHUNK_SIZE\
2088 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2091 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
2092 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
2094 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
2097 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
2098 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2101 #define pad_request(req) \
2102 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2105 #define request2size(req) \
2106 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
2119 #define PINUSE_BIT (SIZE_T_ONE)
2120 #define CINUSE_BIT (SIZE_T_TWO)
2121 #define FLAG4_BIT (SIZE_T_FOUR)
2122 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
2123 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
2126 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
2129 #define cinuse(p) ((p)->head & CINUSE_BIT)
2130 #define pinuse(p) ((p)->head & PINUSE_BIT)
2131 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
2132 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
2134 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
2136 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
2139 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2140 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2143 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2144 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2147 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2150 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2151 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2154 #define set_size_and_pinuse_of_free_chunk(p, s)\
2155 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2158 #define set_free_with_pinuse(p, s, n)\
2159 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2162 #define overhead_for(p)\
2163 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2167 #define calloc_must_clear(p) (!is_mmapped(p))
2169 #define calloc_must_clear(p) (1)
2280 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2346 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
2347 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2437 #define NSMALLBINS (32U)
2438 #define NTREEBINS (32U)
2439 #define SMALLBIN_SHIFT (3U)
2440 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2441 #define TREEBIN_SHIFT (8U)
2442 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2443 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2444 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2493 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
2500 #define is_global(M) ((M) == &_gm_)
2504 #define is_initialized(M) ((M)->top != 0)
2510 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2511 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2512 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2514 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2515 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2516 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2518 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2519 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2521 #define set_lock(M,L)\
2522 ((M)->mflags = (L)?\
2523 ((M)->mflags | USE_LOCK_BIT) :\
2524 ((M)->mflags & ~USE_LOCK_BIT))
2527 #define page_align(S)\
2528 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2531 #define granularity_align(S)\
2532 (((S) + (mparams.granularity - SIZE_T_ONE))\
2533 & ~(mparams.granularity - SIZE_T_ONE))
2538 #define mmap_align(S) granularity_align(S)
2540 #define mmap_align(S) page_align(S)
2544 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2546 #define is_page_aligned(S)\
2547 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2548 #define is_granularity_aligned(S)\
2549 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2552 #define segment_holds(S, A)\
2553 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2557 msegmentptr
sp = &m->
seg;
2559 if (addr >= sp->
base && addr < sp->base + sp->
size)
2561 if ((sp = sp->
next) == 0)
2568 msegmentptr
sp = &m->
seg;
2570 if ((
char*)sp >= ss->
base && (
char*)sp < ss->base + ss->
size)
2572 if ((sp = sp->
next) == 0)
2577 #ifndef MORECORE_CANNOT_TRIM
2578 #define should_trim(M,s) ((s) > (M)->trim_check)
2580 #define should_trim(M,s) (0)
2588 #define TOP_FOOT_SIZE\
2589 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2602 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2603 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2607 #define PREACTION(M) (0)
2611 #define POSTACTION(M)
2624 #if PROCEED_ON_ERROR
2627 int malloc_corruption_error_count;
2630 static void reset_on_error(mstate
m);
2632 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2633 #define USAGE_ERROR_ACTION(m, p)
2637 #ifndef CORRUPTION_ERROR_ACTION
2638 #define CORRUPTION_ERROR_ACTION(m) ABORT
2641 #ifndef USAGE_ERROR_ACTION
2642 #define USAGE_ERROR_ACTION(m,p) ABORT
2651 #define check_free_chunk(M,P)
2652 #define check_inuse_chunk(M,P)
2653 #define check_malloced_chunk(M,P,N)
2654 #define check_mmapped_chunk(M,P)
2655 #define check_malloc_state(M)
2656 #define check_top_chunk(M,P)
2659 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2660 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2661 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2662 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2663 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2664 #define check_malloc_state(M) do_check_malloc_state(M)
2666 static void do_check_any_chunk(mstate
m, mchunkptr p);
2667 static void do_check_top_chunk(mstate
m, mchunkptr p);
2668 static void do_check_mmapped_chunk(mstate
m, mchunkptr p);
2669 static void do_check_inuse_chunk(mstate
m, mchunkptr p);
2670 static void do_check_free_chunk(mstate
m, mchunkptr p);
2671 static void do_check_malloced_chunk(mstate
m,
void* mem,
size_t s);
2672 static void do_check_tree(mstate
m, tchunkptr t);
2673 static void do_check_treebin(mstate
m, bindex_t i);
2674 static void do_check_smallbin(mstate
m, bindex_t i);
2675 static void do_check_malloc_state(mstate
m);
2676 static int bin_find(mstate
m, mchunkptr x);
2677 static size_t traverse_and_check(mstate
m);
2682 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2683 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2684 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2685 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2688 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2689 #define treebin_at(M,i) (&((M)->treebins[i]))
2692 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2693 #define compute_tree_index(S, I)\
2695 unsigned int X = S >> TREEBIN_SHIFT;\
2698 else if (X > 0xFFFF)\
2702 __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
2703 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2707 #elif defined (__INTEL_COMPILER)
2708 #define compute_tree_index(S, I)\
2710 size_t X = S >> TREEBIN_SHIFT;\
2713 else if (X > 0xFFFF)\
2716 unsigned int K = _bit_scan_reverse (X); \
2717 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2721 #elif defined(_MSC_VER) && _MSC_VER>=1300
2722 #define compute_tree_index(S, I)\
2724 size_t X = S >> TREEBIN_SHIFT;\
2727 else if (X > 0xFFFF)\
2731 _BitScanReverse((DWORD *) &K, X);\
2732 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2737 #define compute_tree_index(S, I)\
2739 size_t X = S >> TREEBIN_SHIFT;\
2742 else if (X > 0xFFFF)\
2745 unsigned int Y = (unsigned int)X;\
2746 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2747 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2749 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2750 K = 14 - N + ((Y <<= K) >> 15);\
2751 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2757 #define bit_for_tree_index(i) \
2758 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2761 #define leftshift_for_tree_index(i) \
2762 ((i == NTREEBINS-1)? 0 : \
2763 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2766 #define minsize_for_tree_index(i) \
2767 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2768 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2774 #define idx2bit(i) ((binmap_t)(1) << (i))
2777 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2778 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2779 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2781 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2782 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2783 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2786 #define least_bit(x) ((x) & -(x))
2789 #define left_bits(x) ((x<<1) | -(x<<1))
2792 #define same_or_left_bits(x) ((x) | -(x))
2796 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2797 #define compute_bit2idx(X, I)\
2800 __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
2804 #elif defined (__INTEL_COMPILER)
2805 #define compute_bit2idx(X, I)\
2808 J = _bit_scan_forward (X); \
2812 #elif defined(_MSC_VER) && _MSC_VER>=1300
2813 #define compute_bit2idx(X, I)\
2816 _BitScanForward((DWORD *) &J, X);\
2820 #elif USE_BUILTIN_FFS
2821 #define compute_bit2idx(X, I) I = ffs(X)-1
2824 #define compute_bit2idx(X, I)\
2826 unsigned int Y = X - 1;\
2827 unsigned int K = Y >> (16-4) & 16;\
2828 unsigned int N = K; Y >>= K;\
2829 N += K = Y >> (8-3) & 8; Y >>= K;\
2830 N += K = Y >> (4-2) & 4; Y >>= K;\
2831 N += K = Y >> (2-1) & 2; Y >>= K;\
2832 N += K = Y >> (1-0) & 1; Y >>= K;\
2833 I = (bindex_t)(N + Y);\
2868 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2870 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2872 #define ok_inuse(p) is_inuse(p)
2874 #define ok_pinuse(p) pinuse(p)
2877 #define ok_address(M, a) (1)
2878 #define ok_next(b, n) (1)
2879 #define ok_inuse(p) (1)
2880 #define ok_pinuse(p) (1)
2883 #if (FOOTERS && !INSECURE)
2885 #define ok_magic(M) ((M)->magic == mparams.magic)
2887 #define ok_magic(M) (1)
2893 #if defined(__GNUC__) && __GNUC__ >= 3
2894 #define RTCHECK(e) __builtin_expect(e, 1)
2896 #define RTCHECK(e) (e)
2899 #define RTCHECK(e) (1)
2906 #define mark_inuse_foot(M,p,s)
2911 #define set_inuse(M,p,s)\
2912 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2913 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2916 #define set_inuse_and_pinuse(M,p,s)\
2917 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2918 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2921 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2922 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2927 #define mark_inuse_foot(M,p,s)\
2928 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2930 #define get_mstate_for(p)\
2931 ((mstate)(((mchunkptr)((char*)(p) +\
2932 (chunksize(p))))->prev_foot ^ mparams.magic))
2934 #define set_inuse(M,p,s)\
2935 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2936 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2937 mark_inuse_foot(M,p,s))
2939 #define set_inuse_and_pinuse(M,p,s)\
2940 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2941 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2942 mark_inuse_foot(M,p,s))
2944 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2945 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2946 mark_inuse_foot(M, p, s))
2954 #ifdef NEED_GLOBAL_LOCK_INIT
2955 if (malloc_global_mutex_status <= 0)
2956 init_malloc_global_mutex();
2970 SYSTEM_INFO system_info;
2971 GetSystemInfo(&system_info);
2972 psize = system_info.dwPageSize;
2984 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
2986 (
sizeof(
int) < 4) ||
2998 #if MORECORE_CONTIGUOUS
3013 unsigned char buf[
sizeof(size_t)];
3015 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
3016 read(fd, buf,
sizeof(buf)) ==
sizeof(buf)) {
3017 magic = *((
size_t *) buf);
3023 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3025 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3027 magic |= (size_t)8U;
3028 magic &= ~(size_t)7U;
3041 val = (value == -1)?
MAX_SIZE_T : (
size_t)value;
3042 switch(param_number) {
3065 static void do_check_any_chunk(mstate
m, mchunkptr p) {
3071 static void do_check_top_chunk(mstate m, mchunkptr p) {
3085 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3099 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3100 do_check_any_chunk(m, p);
3106 do_check_mmapped_chunk(m, p);
3110 static void do_check_free_chunk(mstate m, mchunkptr p) {
3113 do_check_any_chunk(m, p);
3117 if (p != m->
dv && p != m->
top) {
3133 static void do_check_malloced_chunk(mstate m,
void* mem,
size_t s) {
3137 do_check_inuse_chunk(m, p);
3147 static void do_check_tree(mstate m, tchunkptr t) {
3150 bindex_t tindex = t->
index;
3160 do_check_any_chunk(m, ((mchunkptr)u));
3177 *((tbinptr*)(u->
parent)) == u);
3178 if (u->
child[0] != 0) {
3181 do_check_tree(m, u->
child[0]);
3183 if (u->
child[1] != 0) {
3186 do_check_tree(m, u->
child[1]);
3198 static void do_check_treebin(mstate m, bindex_t i) {
3201 int empty = (m->
treemap & (1U << i)) == 0;
3205 do_check_tree(m, t);
3209 static void do_check_smallbin(mstate m, bindex_t i) {
3211 mchunkptr p = b->
bk;
3212 unsigned int empty = (m->
smallmap & (1U << i)) == 0;
3216 for (; p != b; p = p->
bk) {
3220 do_check_free_chunk(m, p);
3227 do_check_inuse_chunk(m, q);
3233 static int bin_find(mstate m, mchunkptr x) {
3243 }
while ((p = p->
fd) != b);
3252 while (t != 0 &&
chunksize(t) != size) {
3259 if (u == (tchunkptr)x)
3261 }
while ((u = u->
fd) != t);
3269 static size_t traverse_and_check(mstate m) {
3272 msegmentptr s = &m->
seg;
3276 mchunkptr lastq = 0;
3283 do_check_inuse_chunk(m, q);
3286 assert(q == m->
dv || bin_find(m, q));
3288 do_check_free_chunk(m, q);
3300 static void do_check_malloc_state(mstate m) {
3305 do_check_smallbin(m, i);
3307 do_check_treebin(m, i);
3310 do_check_any_chunk(m, m->
dv);
3317 do_check_top_chunk(m, m->
top);
3323 total = traverse_and_check(m);
3333 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3341 msegmentptr s = &m->
seg;
3380 msegmentptr s = &m->
seg;
3397 std::cerr <<
"max system bytes = " << (
unsigned long)(maxfp) << std::endl
3398 <<
"system bytes = " << (
unsigned long)(fp) << std::endl
3399 <<
"in use bytes = " << (
unsigned long)(used) << std::endl;
3415 #define insert_small_chunk(M, P, S) {\
3416 bindex_t I = small_index(S);\
3417 mchunkptr B = smallbin_at(M, I);\
3419 assert(S >= MIN_CHUNK_SIZE);\
3420 if (!smallmap_is_marked(M, I))\
3421 mark_smallmap(M, I);\
3422 else if (RTCHECK(ok_address(M, B->fd)))\
3425 CORRUPTION_ERROR_ACTION(M);\
3434 #define unlink_small_chunk(M, P, S) {\
3435 mchunkptr F = P->fd;\
3436 mchunkptr B = P->bk;\
3437 bindex_t I = small_index(S);\
3440 assert(chunksize(P) == small_index2size(I));\
3442 clear_smallmap(M, I);\
3443 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3444 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3449 CORRUPTION_ERROR_ACTION(M);\
3454 #define unlink_first_small_chunk(M, B, P, I) {\
3455 mchunkptr F = P->fd;\
3458 assert(chunksize(P) == small_index2size(I));\
3460 clear_smallmap(M, I);\
3461 else if (RTCHECK(ok_address(M, F))) {\
3466 CORRUPTION_ERROR_ACTION(M);\
3474 #define replace_dv(M, P, S) {\
3475 size_t DVS = M->dvsize;\
3477 mchunkptr DV = M->dv;\
3478 assert(is_small(DVS));\
3479 insert_small_chunk(M, DV, DVS);\
3488 #define insert_large_chunk(M, X, S) {\
3491 compute_tree_index(S, I);\
3492 H = treebin_at(M, I);\
3494 X->child[0] = X->child[1] = 0;\
3495 if (!treemap_is_marked(M, I)) {\
3496 mark_treemap(M, I);\
3498 X->parent = (tchunkptr)H;\
3503 size_t K = S << leftshift_for_tree_index(I);\
3505 if (chunksize(T) != S) {\
3506 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3510 else if (RTCHECK(ok_address(M, C))) {\
3517 CORRUPTION_ERROR_ACTION(M);\
3522 tchunkptr F = T->fd;\
3523 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3531 CORRUPTION_ERROR_ACTION(M);\
3556 #define unlink_large_chunk(M, X) {\
3557 tchunkptr XP = X->parent;\
3560 tchunkptr F = X->fd;\
3562 if (RTCHECK(ok_address(M, F))) {\
3567 CORRUPTION_ERROR_ACTION(M);\
3572 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3573 ((R = *(RP = &(X->child[0]))) != 0)) {\
3575 while ((*(CP = &(R->child[1])) != 0) ||\
3576 (*(CP = &(R->child[0])) != 0)) {\
3579 if (RTCHECK(ok_address(M, RP)))\
3582 CORRUPTION_ERROR_ACTION(M);\
3587 tbinptr* H = treebin_at(M, X->index);\
3589 if ((*H = R) == 0) \
3590 clear_treemap(M, X->index);\
3592 else if (RTCHECK(ok_address(M, XP))) {\
3593 if (XP->child[0] == X) \
3599 CORRUPTION_ERROR_ACTION(M);\
3601 if (RTCHECK(ok_address(M, R))) {\
3604 if ((C0 = X->child[0]) != 0) {\
3605 if (RTCHECK(ok_address(M, C0))) {\
3610 CORRUPTION_ERROR_ACTION(M);\
3612 if ((C1 = X->child[1]) != 0) {\
3613 if (RTCHECK(ok_address(M, C1))) {\
3618 CORRUPTION_ERROR_ACTION(M);\
3622 CORRUPTION_ERROR_ACTION(M);\
3629 #define insert_chunk(M, P, S)\
3630 if (is_small(S)) insert_small_chunk(M, P, S)\
3631 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3633 #define unlink_chunk(M, P, S)\
3634 if (is_small(S)) unlink_small_chunk(M, P, S)\
3635 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3641 #define internal_malloc(m, b) mspace_malloc(m, b)
3642 #define internal_free(m, mem) mspace_free(m,mem);
3645 #define internal_malloc(m, b)\
3646 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3647 #define internal_free(m, mem)\
3648 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3650 #define internal_malloc(m, b) dlmalloc(b)
3651 #define internal_free(m, mem) dlfree(mem)
3680 if (m->
least_addr == 0 || mm < m->least_addr)
3706 oldmmsize, newmmsize, 1);
3708 mchunkptr newp = (
mchunkptr)(cp + offset);
3715 if (cp < m->least_addr)
3729 static void init_top(mstate m, mchunkptr p,
size_t psize) {
3749 bin->
fd = bin->
bk = bin;
3753 #if PROCEED_ON_ERROR
3756 static void reset_on_error(mstate m) {
3758 ++malloc_corruption_error_count;
3777 size_t psize = (
char*)oldfirst - (
char*)p;
3779 size_t qsize = psize - nb;
3782 assert((
char*)oldfirst > (
char*)q);
3787 if (oldfirst == m->
top) {
3788 size_t tsize = m->
topsize += qsize;
3793 else if (oldfirst == m->
dv) {
3794 size_t dsize = m->
dvsize += qsize;
3815 static void add_segment(mstate m,
char* tbase,
size_t tsize, flag_t mmapped) {
3817 char* old_top = (
char*)m->
top;
3819 char* old_end = oldsp->
base + oldsp->size;
3823 char* asp = rawsp + offset;
3828 mchunkptr p = tnext;
3848 if ((
char*)(&(nextp->
head)) < old_end)
3856 if (csp != old_top) {
3858 size_t psize = csp - old_top;
3873 flag_t mmap_flag = 0;
3918 asize += (
page_align((
size_t)base) - (size_t)base);
3987 size_t ssize = end - br;
4002 if (m->
least_addr == 0 || tbase < m->least_addr)
4024 msegmentptr sp = &m->
seg;
4026 while (sp != 0 && tbase != sp->
base + sp->
size)
4036 if (tbase < m->least_addr)
4039 while (sp != 0 && sp->
base != tbase + tsize)
4044 char* oldbase = sp->
base;
4054 if (nb < m->topsize) {
4055 size_t rsize = m->
topsize -= nb;
4056 mchunkptr p = m->
top;
4074 size_t released = 0;
4076 msegmentptr pred = &m->
seg;
4077 msegmentptr sp = pred->
next;
4079 char* base = sp->
base;
4080 size_t size = sp->
size;
4081 msegmentptr next = sp->
next;
4121 size_t released = 0;
4136 sp->
size >= extra &&
4138 size_t newsize = sp->
size - extra;
4153 if (old_br == sp->
base + sp->
size) {
4156 if (rel_br !=
CMFAIL && new_br < old_br)
4157 released = old_br - new_br;
4164 if (released != 0) {
4165 sp->
size -= released;
4181 return (released != 0)? 1 : 0;
4203 if ((rsize = trem) == 0)
4208 if (rt != 0 && rt != t)
4217 if (t == 0 && v == 0) {
4219 if (leftbits != 0) {
4221 binmap_t leastbit =
least_bit(leftbits);
4237 if (v != 0 && rsize < (
size_t)(m->
dvsize - nb)) {
4317 else if (oldsize >= nb) {
4318 size_t rsize = oldsize - nb;
4327 else if (next == m->
top && oldsize + m->
topsize > nb) {
4329 size_t newsize = oldsize + m->
topsize;
4330 size_t newtopsize = newsize - nb;
4362 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
4378 if ((alignment & (alignment-
SIZE_T_ONE)) != 0) {
4380 while (a < alignment) a <<= 1;
4399 if ((((
size_t)(mem)) % alignment) != 0) {
4408 char* br = (
char*)
mem2chunk((
size_t)(((size_t)((
size_t)mem +
4415 size_t leadsize = pos - (
char*)(p);
4416 size_t newsize =
chunksize(p) - leadsize;
4420 newp->
head = newsize;
4434 size_t remainder_size = size - nb;
4437 set_inuse(m, remainder, remainder_size);
4474 size_t element_size;
4475 size_t contents_size;
4479 size_t remainder_size;
4481 mchunkptr array_chunk;
4489 if (n_elements == 0)
4496 if (n_elements == 0)
4497 return (
void**)NULL;
4499 array_size =
request2size(n_elements * (
sizeof(
void*)));
4505 contents_size = n_elements * element_size;
4510 for (i = 0; i != n_elements; ++i)
4514 size = contents_size + array_size;
4536 memset((
size_t*)mem, 0, remainder_size -
SIZE_T_SIZE - array_size);
4540 if (marray == NULL) {
4541 size_t array_chunk_size;
4543 array_chunk_size = remainder_size - contents_size;
4544 marray = (
void**) (
chunk2mem(array_chunk));
4546 remainder_size = contents_size;
4550 for (i = 0; ; ++i) {
4552 if (i != n_elements-1) {
4553 if (element_size != 0)
4554 size = element_size;
4557 remainder_size -= size;
4568 if (marray != chunks) {
4570 if (element_size != 0) {
4571 assert(remainder_size == element_size);
4578 for (i = 0; i != n_elements; ++i)
4628 smallbits =
gm->smallmap >> idx;
4630 if ((smallbits & 0x3U) != 0) {
4632 idx += ~smallbits & 1;
4643 else if (nb >
gm->dvsize) {
4644 if (smallbits != 0) {
4649 binmap_t leastbit =
least_bit(leftbits);
4686 if (nb <= gm->dvsize) {
4687 size_t rsize =
gm->dvsize - nb;
4688 mchunkptr p =
gm->dv;
4696 size_t dvs =
gm->dvsize;
4706 else if (nb < gm->topsize) {
4707 size_t rsize =
gm->topsize -= nb;
4708 mchunkptr p =
gm->top;
4738 mstate
fm = get_mstate_for(p);
4780 if (next == fm->
top) {
4781 size_t tsize = fm->
topsize += psize;
4792 else if (next == fm->
dv) {
4793 size_t dsize = fm->
dvsize += psize;
4840 if (n_elements != 0) {
4841 req = n_elements * elem_size;
4842 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4843 (req / n_elements != elem_size))
4848 memset(mem, 0, req);
4855 #ifdef REALLOC_ZERO_BYTES_FREES
4865 mstate m = get_mstate_for(
mem2chunk(oldmem));
4881 size_t sz = elem_size;
4882 return ialloc(
gm, n_elements, &sz, 3, chunks);
4887 return ialloc(
gm, n_elements, sizes, 0, chunks);
4915 return gm->footprint;
4919 return gm->max_footprint;
4956 memset(m, 0, msize);
4978 <<
"MSPACES: " <<
MSPACES <<
", "
4981 <<
" USE_LOCKS: " <<
USE_LOCKS << std::endl;
4989 std::cout <<
"handle capacity paramenter: " << capacity <<
", "
4995 char* tbase = (
char*)(
CALL_MMAP(tsize));
5038 msegmentptr sp = &ms->
seg;
5040 char* base = sp->
base;
5041 size_t size = sp->
size;
5042 flag_t flag = sp->
sflags;
5077 if ((smallbits & 0x3U) != 0) {
5079 idx += ~smallbits & 1;
5090 else if (nb > ms->
dvsize) {
5091 if (smallbits != 0) {
5096 binmap_t leastbit =
least_bit(leftbits);
5133 if (nb <= ms->dvsize) {
5134 size_t rsize = ms->
dvsize - nb;
5135 mchunkptr p = ms->
dv;
5153 else if (nb < ms->topsize) {
5154 size_t rsize = ms->
topsize -= nb;
5155 mchunkptr p = ms->
top;
5179 mstate
fm = get_mstate_for(p);
5222 if (next == fm->
top) {
5223 size_t tsize = fm->
topsize += psize;
5234 else if (next == fm->
dv) {
5235 size_t dsize = fm->
dvsize += psize;
5284 if (n_elements != 0) {
5285 req = n_elements * elem_size;
5286 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
5287 (req / n_elements != elem_size))
5292 memset(mem, 0, req);
5299 #ifdef REALLOC_ZERO_BYTES_FREES
5308 mstate
ms = get_mstate_for(p);
5330 size_t elem_size,
void* chunks[]) {
5331 size_t sz = elem_size;
5337 return ialloc(ms, n_elements, &sz, 3, chunks);
5341 size_t sizes[],
void* chunks[]) {
5347 return ialloc(ms, n_elements, sizes, 0, chunks);
static void * sys_alloc(mstate m, size_t nb)
#define DEFAULT_MMAP_THRESHOLD
struct malloc_tree_chunk * tbinptr
#define CALL_MREMAP(addr, osz, nsz, mv)
Define CALL_MREMAP.
void mspace_malloc_stats(mspace msp)
#define chunk_minus_offset(p, s)
#define is_mmapped_segment(S)
#define SYS_ALLOC_PADDING
#define CORRUPTION_ERROR_ACTION(m)
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
static void * mmap_alloc(mstate m, size_t nb)
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
MALLINFO_FIELD_TYPE arena
struct malloc_chunk * mchunkptr
struct malloc_tree_chunk * tchunkptr
MALLINFO_FIELD_TYPE hblks
#define leftshift_for_tree_index(i)
size_t mspace_usable_size(void *mem)
#define insert_small_chunk(M, P, S)
#define CALL_MUNMAP(a, s)
#define unlink_first_small_chunk(M, B, P, I)
void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
struct malloc_tree_chunk * parent
int mspace_mallopt(int, int)
#define CALL_MORECORE(S)
Define CALL_MORECORE.
#define is_page_aligned(S)
int mspace_trim(mspace msp, size_t pad)
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
#define small_index2size(i)
#define malloc_getpagesize
static G4ThreadLocal struct malloc_state _gm_
tbinptr treebins[NTREEBINS]
#define MALLINFO_FIELD_TYPE
#define DEFAULT_GRANULARITY
#define dlmalloc_max_footprint
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
#define chunk_plus_offset(p, s)
void * mspace_malloc(mspace msp, size_t bytes)
struct mallinfo mspace_mallinfo(mspace msp)
MALLINFO_FIELD_TYPE ordblks
size_t mspace_footprint(mspace msp)
static G4ThreadLocal int dev_zero_fd
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
#define request2size(req)
void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)
#define check_mmapped_chunk(M, P)
#define mark_inuse_foot(M, p, s)
struct malloc_chunk * sbinptr
#define granularity_align(S)
static size_t release_unused_segments(mstate m)
#define smallbin_at(M, i)
#define dlindependent_comalloc
#define FOUR_SIZE_T_SIZES
#define dlmalloc_usable_size
struct malloc_tree_chunk * fd
size_t mspace_max_footprint(mspace msp)
static int sys_trim(mstate m, size_t pad)
static void init_top(mstate m, mchunkptr p, size_t psize)
#define USAGE_ERROR_ACTION(m, p)
#define MORECORE_CONTIGUOUS
#define unlink_large_chunk(M, X)
#define dlindependent_calloc
static G4ThreadLocal struct malloc_params mparams
struct malloc_segment * next
#define is_initialized(M)
#define treemap_is_marked(M, i)
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
#define use_noncontiguous(M)
mchunkptr smallbins[(NSMALLBINS+1)*2]
static void * tmalloc_large(mstate m, size_t nb)
static int init_mparams(void)
#define CALL_DIRECT_MMAP(s)
#define segment_holds(S, A)
#define compute_bit2idx(X, I)
static int has_segment_link(mstate m, msegmentptr ss)
#define RELEASE_MALLOC_GLOBAL_LOCK()
void mspace_free(mspace msp, void *mem)
static void * tmalloc_small(mstate m, size_t nb)
#define check_malloc_state(M)
#define internal_free(m, mem)
static void internal_malloc_stats(mstate m)
#define disable_contiguous(M)
struct malloc_segment * msegmentptr
static msegmentptr segment_holding(mstate m, char *addr)
struct malloc_state * mstate
#define smallmap_is_marked(M, i)
MALLINFO_FIELD_TYPE fordblks
#define ensure_initialization()
void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
#define NO_SEGMENT_TRAVERSAL
#define unlink_chunk(M, P, S)
#define is_extern_segment(S)
#define USE_MMAP_BIT
Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP.
#define set_inuse(M, p, s)
int mspace_track_large_chunks(mspace msp, int enable)
#define should_trim(M, s)
#define leftmost_child(t)
#define check_free_chunk(M, P)
#define check_top_chunk(M, P)
#define set_size_and_pinuse_of_free_chunk(p, s)
#define check_inuse_chunk(M, P)
#define insert_large_chunk(M, X, S)
#define check_malloced_chunk(M, P, N)
#define minsize_for_tree_index(i)
G4double total(Particle const *const p1, Particle const *const p2)
mspace create_mspace_with_base(void *base, size_t capacity, int locked)
MALLINFO_FIELD_TYPE fsmblks
#define DEFAULT_TRIM_THRESHOLD
MALLINFO_FIELD_TYPE keepcost
#define dlmalloc_footprint
struct malloc_tree_chunk * bk
MALLINFO_FIELD_TYPE hblkhd
static struct mallinfo internal_mallinfo(mstate m)
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define align_as_chunk(A)
#define replace_dv(M, P, S)
struct malloc_tree_chunk * child[2]
#define internal_malloc(m, b)
#define USE_NONCONTIGUOUS_BIT
static mstate init_user_mstate(char *tbase, size_t tsize)
void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
size_t destroy_mspace(mspace msp)
#define MALLOC_FAILURE_ACTION
#define insert_chunk(M, P, S)
void * mspace_realloc(mspace msp, void *mem, size_t newsize)
static void init_bins(mstate m)
#define set_free_with_pinuse(p, s, n)
mspace create_mspace(size_t capacity, int locked)
#define MAX_RELEASE_CHECK_RATE
#define MAX_SMALL_REQUEST
static const G4double pos
#define set_inuse_and_pinuse(M, p, s)
#define calloc_must_clear(p)
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
MALLINFO_FIELD_TYPE uordblks
#define compute_tree_index(S, I)
MALLINFO_FIELD_TYPE smblks
static int change_mparam(int param_number, int value)
MALLINFO_FIELD_TYPE usmblks