/src/3rdparty/ptmalloc/malloc-private.h

https://bitbucket.org/ultra_iter/qt-vtl · C Header · 170 lines · 108 code · 30 blank · 32 comment · 3 complexity · 82bcecb696a915047ec7b6c5d8dfe141 MD5 · raw file

  1. /*
  2. $Id: malloc-private.h,v 1.4 2006/03/31 12:56:52 wg Exp $
  3. Private header file for ptmalloc3, created by Wolfram Gloger
  4. and released to the public domain, as explained at
  5. http://creativecommons.org/licenses/publicdomain.
  6. */
  7. /* The following file is replicated from malloc.c */
  8. #ifndef MALLOC_PRIVATE_H
  9. #define MALLOC_PRIVATE_H
  10. #ifndef MALLOC_ALIGNMENT
  11. # define MALLOC_ALIGNMENT ((size_t)8U)
  12. #endif
  13. #ifndef USE_LOCKS
  14. # define USE_LOCKS 0
  15. #endif
  16. /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  17. #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
  18. /* the number of bytes to offset an address to align it */
  19. #define align_offset(A)\
  20. ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  21. ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  22. #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  23. #define MAP_ANONYMOUS MAP_ANON
  24. #endif /* MAP_ANON */
  25. #ifdef MAP_ANONYMOUS
  26. #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
  27. #define CALL_MMAP(s) mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, -1, 0)
  28. #else /* MAP_ANONYMOUS */
  29. /*
  30. Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  31. is unlikely to be needed, but is supplied just in case.
  32. */
  33. #include <fcntl.h> /* for O_RDWR */
  34. #define MMAP_FLAGS (MAP_PRIVATE)
  35. static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  36. #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  37. (dev_zero_fd = open("/dev/zero", O_RDWR), \
  38. mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, dev_zero_fd, 0)) : \
  39. mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, dev_zero_fd, 0))
  40. #endif /* MAP_ANONYMOUS */
  41. #define CALL_MUNMAP(a, s) munmap((a), (s))
  42. struct malloc_chunk {
  43. size_t prev_foot; /* Size of previous chunk (if free). */
  44. size_t head; /* Size and inuse bits. */
  45. struct malloc_chunk* fd; /* double links -- used only if free. */
  46. struct malloc_chunk* bk;
  47. };
  48. typedef struct malloc_chunk mchunk;
  49. typedef struct malloc_chunk* mchunkptr;
  50. typedef unsigned int binmap_t;
  51. typedef unsigned int flag_t;
  52. struct malloc_tree_chunk;
  53. typedef struct malloc_tree_chunk* tbinptr;
  54. struct malloc_segment {
  55. char* base; /* base address */
  56. size_t size; /* allocated size */
  57. struct malloc_segment* next; /* ptr to next segment */
  58. flag_t sflags; /* mmap and extern flag */
  59. };
  60. typedef struct malloc_segment msegment;
  61. #define NSMALLBINS (32U)
  62. #define NTREEBINS (32U)
  63. struct malloc_state {
  64. binmap_t smallmap;
  65. binmap_t treemap;
  66. size_t dvsize;
  67. size_t topsize;
  68. char* least_addr;
  69. mchunkptr dv;
  70. mchunkptr top;
  71. size_t trim_check;
  72. size_t release_checks;
  73. size_t magic;
  74. mchunkptr smallbins[(NSMALLBINS+1)*2];
  75. tbinptr treebins[NTREEBINS];
  76. size_t footprint;
  77. size_t max_footprint;
  78. flag_t mflags;
  79. #if USE_LOCKS
  80. MLOCK_T mutex;
  81. #endif /* USE_LOCKS */
  82. msegment seg;
  83. void* extp;
  84. size_t exts;
  85. };
  86. /*
  87. TOP_FOOT_SIZE is padding at the end of a segment, including space
  88. that may be needed to place segment records and fenceposts when new
  89. noncontiguous segments are added.
  90. */
  91. #define TOP_FOOT_SIZE\
  92. (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  93. /* ------------------- Chunks sizes and alignments ----------------------- */
  94. #define MCHUNK_SIZE (sizeof(mchunk))
  95. #define CHUNK_OVERHEAD (SIZE_T_SIZE)
  96. /* MMapped chunks need a second word of overhead ... */
  97. #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  98. /* ... and additional padding for fake next-chunk at foot */
  99. #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
  100. /* The smallest size we can malloc is an aligned minimal chunk */
  101. #define MIN_CHUNK_SIZE\
  102. ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  103. /* conversion from malloc headers to user pointers, and back */
  104. #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
  105. #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  106. /* chunk associated with aligned address A */
  107. #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
  108. /* pad request bytes into a usable size */
  109. #define pad_request(req) \
  110. (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  111. /* The byte and bit size of a size_t */
  112. #define SIZE_T_SIZE (sizeof(size_t))
  113. #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
  114. /* Some constants coerced to size_t */
  115. /* Annoying but necessary to avoid errors on some platforms */
  116. #define SIZE_T_ZERO ((size_t)0)
  117. #define SIZE_T_ONE ((size_t)1)
  118. #define SIZE_T_TWO ((size_t)2)
  119. #define SIZE_T_FOUR ((size_t)4)
  120. #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
  121. #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
  122. #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  123. #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
  124. #define IS_MMAPPED_BIT (SIZE_T_ONE)
  125. #define PINUSE_BIT (SIZE_T_ONE)
  126. #define CINUSE_BIT (SIZE_T_TWO)
  127. #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|SIZE_T_FOUR)
  128. /* head field is or'ed with NON_MAIN_ARENA if the chunk was obtained
  129. from a non-main arena. This is only set immediately before handing
  130. the chunk to the user, if necessary. */
  131. #define NON_MAIN_ARENA (SIZE_T_FOUR)
  132. #define cinuse(p) ((p)->head & CINUSE_BIT)
  133. #define pinuse(p) ((p)->head & PINUSE_BIT)
  134. #define chunksize(p) ((p)->head & ~(FLAG_BITS))
  135. #define is_mmapped(p)\
  136. (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  137. /* Get the internal overhead associated with chunk p */
  138. #define overhead_for(p)\
  139. (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  140. #endif /* MALLOC_PRIVATE_H */