PageRenderTime 71ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/Zend/zend_alloc.c

http://github.com/infusion/PHP
C | 2659 lines | 2208 code | 366 blank | 85 comment | 457 complexity | 17886f0447980a3fe71ed7176502f53f MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, LGPL-2.1, BSD-3-Clause
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) 1998-2011 Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@zend.com> |
  16. | Zeev Suraski <zeev@zend.com> |
  17. | Dmitry Stogov <dmitry@zend.com> |
  18. +----------------------------------------------------------------------+
  19. */
  20. /* $Id: zend_alloc.c 308090 2011-02-07 10:25:34Z pajoye $ */
  21. #include "zend.h"
  22. #include "zend_alloc.h"
  23. #include "zend_globals.h"
  24. #include "zend_operators.h"
  25. #ifdef HAVE_SIGNAL_H
  26. # include <signal.h>
  27. #endif
  28. #ifdef HAVE_UNISTD_H
  29. # include <unistd.h>
  30. #endif
  31. #ifdef ZEND_WIN32
  32. # include <wincrypt.h>
  33. # include <process.h>
  34. #endif
  35. #ifndef ZEND_MM_HEAP_PROTECTION
  36. # define ZEND_MM_HEAP_PROTECTION ZEND_DEBUG
  37. #endif
  38. #ifndef ZEND_MM_SAFE_UNLINKING
  39. # define ZEND_MM_SAFE_UNLINKING 1
  40. #endif
  41. #ifndef ZEND_MM_COOKIES
  42. # define ZEND_MM_COOKIES ZEND_DEBUG
  43. #endif
  44. #ifdef _WIN64
  45. # define PTR_FMT "0x%0.16I64x"
  46. /*
  47. #elif sizeof(long) == 8
  48. # define PTR_FMT "0x%0.16lx"
  49. */
  50. #else
  51. # define PTR_FMT "0x%0.8lx"
  52. #endif
  53. #if ZEND_DEBUG
  54. void zend_debug_alloc_output(char *format, ...)
  55. {
  56. char output_buf[256];
  57. va_list args;
  58. va_start(args, format);
  59. vsprintf(output_buf, format, args);
  60. va_end(args);
  61. #ifdef ZEND_WIN32
  62. OutputDebugString(output_buf);
  63. #else
  64. fprintf(stderr, "%s", output_buf);
  65. #endif
  66. }
  67. #endif
  68. #if (defined (__GNUC__) && __GNUC__ > 2 ) && !defined(__INTEL_COMPILER) && !defined(DARWIN) && !defined(__hpux) && !defined(_AIX)
  69. static void zend_mm_panic(const char *message) __attribute__ ((noreturn));
  70. #endif
  71. static void zend_mm_panic(const char *message)
  72. {
  73. fprintf(stderr, "%s\n", message);
  74. /* See http://support.microsoft.com/kb/190351 */
  75. #ifdef PHP_WIN32
  76. fflush(stderr);
  77. #endif
  78. #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
  79. kill(getpid(), SIGSEGV);
  80. #endif
  81. exit(1);
  82. }
  83. /*******************/
  84. /* Storage Manager */
  85. /*******************/
  86. #ifdef ZEND_WIN32
  87. # define HAVE_MEM_WIN32 /* use VirtualAlloc() to allocate memory */
  88. #endif
  89. #define HAVE_MEM_MALLOC /* use malloc() to allocate segments */
  90. #include <sys/types.h>
  91. #include <sys/stat.h>
  92. #if HAVE_LIMITS_H
  93. #include <limits.h>
  94. #endif
  95. #include <fcntl.h>
  96. #include <errno.h>
  97. #if defined(HAVE_MEM_MMAP_ANON) || defined(HAVE_MEM_MMAP_ZERO)
  98. # ifdef HAVE_MREMAP
  99. # ifndef _GNU_SOURCE
  100. # define _GNU_SOURCE
  101. # endif
  102. # ifndef __USE_GNU
  103. # define __USE_GNU
  104. # endif
  105. # endif
  106. # include <sys/mman.h>
  107. # ifndef MAP_ANON
  108. # ifdef MAP_ANONYMOUS
  109. # define MAP_ANON MAP_ANONYMOUS
  110. # endif
  111. # endif
  112. # ifndef MREMAP_MAYMOVE
  113. # define MREMAP_MAYMOVE 0
  114. # endif
  115. # ifndef MAP_FAILED
  116. # define MAP_FAILED ((void*)-1)
  117. # endif
  118. #endif
  119. static zend_mm_storage* zend_mm_mem_dummy_init(void *params)
  120. {
  121. return malloc(sizeof(zend_mm_storage));
  122. }
  123. static void zend_mm_mem_dummy_dtor(zend_mm_storage *storage)
  124. {
  125. free(storage);
  126. }
  127. static void zend_mm_mem_dummy_compact(zend_mm_storage *storage)
  128. {
  129. }
  130. #if defined(HAVE_MEM_MMAP_ANON) || defined(HAVE_MEM_MMAP_ZERO)
  131. static zend_mm_segment* zend_mm_mem_mmap_realloc(zend_mm_storage *storage, zend_mm_segment* segment, size_t size)
  132. {
  133. zend_mm_segment *ret;
  134. #ifdef HAVE_MREMAP
  135. #if defined(__NetBSD__)
  136. /* NetBSD 5 supports mremap but takes an extra newp argument */
  137. ret = (zend_mm_segment*)mremap(segment, segment->size, segment, size, MREMAP_MAYMOVE);
  138. #else
  139. ret = (zend_mm_segment*)mremap(segment, segment->size, size, MREMAP_MAYMOVE);
  140. #endif
  141. if (ret == MAP_FAILED) {
  142. #endif
  143. ret = storage->handlers->_alloc(storage, size);
  144. if (ret) {
  145. memcpy(ret, segment, size > segment->size ? segment->size : size);
  146. storage->handlers->_free(storage, segment);
  147. }
  148. #ifdef HAVE_MREMAP
  149. }
  150. #endif
  151. return ret;
  152. }
  153. static void zend_mm_mem_mmap_free(zend_mm_storage *storage, zend_mm_segment* segment)
  154. {
  155. munmap((void*)segment, segment->size);
  156. }
  157. #endif
  158. #ifdef HAVE_MEM_MMAP_ANON
  159. static zend_mm_segment* zend_mm_mem_mmap_anon_alloc(zend_mm_storage *storage, size_t size)
  160. {
  161. zend_mm_segment *ret = (zend_mm_segment*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  162. if (ret == MAP_FAILED) {
  163. ret = NULL;
  164. }
  165. return ret;
  166. }
  167. # define ZEND_MM_MEM_MMAP_ANON_DSC {"mmap_anon", zend_mm_mem_dummy_init, zend_mm_mem_dummy_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_mmap_anon_alloc, zend_mm_mem_mmap_realloc, zend_mm_mem_mmap_free}
  168. #endif
  169. #ifdef HAVE_MEM_MMAP_ZERO
  170. static int zend_mm_dev_zero_fd = -1;
  171. static zend_mm_storage* zend_mm_mem_mmap_zero_init(void *params)
  172. {
  173. if (zend_mm_dev_zero_fd != -1) {
  174. zend_mm_dev_zero_fd = open("/dev/zero", O_RDWR, S_IRUSR | S_IWUSR);
  175. }
  176. if (zend_mm_dev_zero_fd >= 0) {
  177. return malloc(sizeof(zend_mm_storage));
  178. } else {
  179. return NULL;
  180. }
  181. }
  182. static void zend_mm_mem_mmap_zero_dtor(zend_mm_storage *storage)
  183. {
  184. close(zend_mm_dev_zero_fd);
  185. free(storage);
  186. }
  187. static zend_mm_segment* zend_mm_mem_mmap_zero_alloc(zend_mm_storage *storage, size_t size)
  188. {
  189. zend_mm_segment *ret = (zend_mm_segment*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, zend_mm_dev_zero_fd, 0);
  190. if (ret == MAP_FAILED) {
  191. ret = NULL;
  192. }
  193. return ret;
  194. }
  195. # define ZEND_MM_MEM_MMAP_ZERO_DSC {"mmap_zero", zend_mm_mem_mmap_zero_init, zend_mm_mem_mmap_zero_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_mmap_zero_alloc, zend_mm_mem_mmap_realloc, zend_mm_mem_mmap_free}
  196. #endif
  197. #ifdef HAVE_MEM_WIN32
  198. static zend_mm_storage* zend_mm_mem_win32_init(void *params)
  199. {
  200. HANDLE heap = HeapCreate(HEAP_NO_SERIALIZE, 0, 0);
  201. zend_mm_storage* storage;
  202. if (heap == NULL) {
  203. return NULL;
  204. }
  205. storage = (zend_mm_storage*)malloc(sizeof(zend_mm_storage));
  206. if (storage == NULL) {
  207. HeapDestroy(heap);
  208. return NULL;
  209. }
  210. storage->data = (void*) heap;
  211. return storage;
  212. }
  213. static void zend_mm_mem_win32_dtor(zend_mm_storage *storage)
  214. {
  215. HeapDestroy((HANDLE)storage->data);
  216. free(storage);
  217. }
  218. static void zend_mm_mem_win32_compact(zend_mm_storage *storage)
  219. {
  220. HeapDestroy((HANDLE)storage->data);
  221. storage->data = (void*)HeapCreate(HEAP_NO_SERIALIZE, 0, 0);
  222. }
  223. static zend_mm_segment* zend_mm_mem_win32_alloc(zend_mm_storage *storage, size_t size)
  224. {
  225. return (zend_mm_segment*) HeapAlloc((HANDLE)storage->data, HEAP_NO_SERIALIZE, size);
  226. }
  227. static void zend_mm_mem_win32_free(zend_mm_storage *storage, zend_mm_segment* segment)
  228. {
  229. HeapFree((HANDLE)storage->data, HEAP_NO_SERIALIZE, segment);
  230. }
  231. static zend_mm_segment* zend_mm_mem_win32_realloc(zend_mm_storage *storage, zend_mm_segment* segment, size_t size)
  232. {
  233. return (zend_mm_segment*) HeapReAlloc((HANDLE)storage->data, HEAP_NO_SERIALIZE, segment, size);
  234. }
  235. # define ZEND_MM_MEM_WIN32_DSC {"win32", zend_mm_mem_win32_init, zend_mm_mem_win32_dtor, zend_mm_mem_win32_compact, zend_mm_mem_win32_alloc, zend_mm_mem_win32_realloc, zend_mm_mem_win32_free}
  236. #endif
  237. #ifdef HAVE_MEM_MALLOC
  238. static zend_mm_segment* zend_mm_mem_malloc_alloc(zend_mm_storage *storage, size_t size)
  239. {
  240. return (zend_mm_segment*)malloc(size);
  241. }
  242. static zend_mm_segment* zend_mm_mem_malloc_realloc(zend_mm_storage *storage, zend_mm_segment *ptr, size_t size)
  243. {
  244. return (zend_mm_segment*)realloc(ptr, size);
  245. }
  246. static void zend_mm_mem_malloc_free(zend_mm_storage *storage, zend_mm_segment *ptr)
  247. {
  248. free(ptr);
  249. }
  250. # define ZEND_MM_MEM_MALLOC_DSC {"malloc", zend_mm_mem_dummy_init, zend_mm_mem_dummy_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_malloc_alloc, zend_mm_mem_malloc_realloc, zend_mm_mem_malloc_free}
  251. #endif
  252. static const zend_mm_mem_handlers mem_handlers[] = {
  253. #ifdef HAVE_MEM_WIN32
  254. ZEND_MM_MEM_WIN32_DSC,
  255. #endif
  256. #ifdef HAVE_MEM_MALLOC
  257. ZEND_MM_MEM_MALLOC_DSC,
  258. #endif
  259. #ifdef HAVE_MEM_MMAP_ANON
  260. ZEND_MM_MEM_MMAP_ANON_DSC,
  261. #endif
  262. #ifdef HAVE_MEM_MMAP_ZERO
  263. ZEND_MM_MEM_MMAP_ZERO_DSC,
  264. #endif
  265. {NULL, NULL, NULL, NULL, NULL, NULL}
  266. };
  267. # define ZEND_MM_STORAGE_DTOR() heap->storage->handlers->dtor(heap->storage)
  268. # define ZEND_MM_STORAGE_ALLOC(size) heap->storage->handlers->_alloc(heap->storage, size)
  269. # define ZEND_MM_STORAGE_REALLOC(ptr, size) heap->storage->handlers->_realloc(heap->storage, ptr, size)
  270. # define ZEND_MM_STORAGE_FREE(ptr) heap->storage->handlers->_free(heap->storage, ptr)
  271. /****************/
  272. /* Heap Manager */
  273. /****************/
  274. #define MEM_BLOCK_VALID 0x7312F8DC
  275. #define MEM_BLOCK_FREED 0x99954317
  276. #define MEM_BLOCK_CACHED 0xFB8277DC
  277. #define MEM_BLOCK_GUARD 0x2A8FCC84
  278. #define MEM_BLOCK_LEAK 0x6C5E8F2D
  279. /* mm block type */
  280. typedef struct _zend_mm_block_info {
  281. #if ZEND_MM_COOKIES
  282. size_t _cookie;
  283. #endif
  284. size_t _size;
  285. size_t _prev;
  286. } zend_mm_block_info;
  287. #if ZEND_DEBUG
  288. typedef struct _zend_mm_debug_info {
  289. char *filename;
  290. uint lineno;
  291. char *orig_filename;
  292. uint orig_lineno;
  293. size_t size;
  294. #if ZEND_MM_HEAP_PROTECTION
  295. unsigned int start_magic;
  296. #endif
  297. } zend_mm_debug_info;
  298. #elif ZEND_MM_HEAP_PROTECTION
  299. typedef struct _zend_mm_debug_info {
  300. size_t size;
  301. unsigned int start_magic;
  302. } zend_mm_debug_info;
  303. #endif
  304. typedef struct _zend_mm_block {
  305. zend_mm_block_info info;
  306. #if ZEND_DEBUG
  307. unsigned int magic;
  308. # ifdef ZTS
  309. THREAD_T thread_id;
  310. # endif
  311. zend_mm_debug_info debug;
  312. #elif ZEND_MM_HEAP_PROTECTION
  313. zend_mm_debug_info debug;
  314. #endif
  315. } zend_mm_block;
  316. typedef struct _zend_mm_small_free_block {
  317. zend_mm_block_info info;
  318. #if ZEND_DEBUG
  319. unsigned int magic;
  320. # ifdef ZTS
  321. THREAD_T thread_id;
  322. # endif
  323. #endif
  324. struct _zend_mm_free_block *prev_free_block;
  325. struct _zend_mm_free_block *next_free_block;
  326. } zend_mm_small_free_block;
  327. typedef struct _zend_mm_free_block {
  328. zend_mm_block_info info;
  329. #if ZEND_DEBUG
  330. unsigned int magic;
  331. # ifdef ZTS
  332. THREAD_T thread_id;
  333. # endif
  334. #endif
  335. struct _zend_mm_free_block *prev_free_block;
  336. struct _zend_mm_free_block *next_free_block;
  337. struct _zend_mm_free_block **parent;
  338. struct _zend_mm_free_block *child[2];
  339. } zend_mm_free_block;
  340. #define ZEND_MM_NUM_BUCKETS (sizeof(size_t) << 3)
  341. #define ZEND_MM_CACHE 1
  342. #define ZEND_MM_CACHE_SIZE (ZEND_MM_NUM_BUCKETS * 4 * 1024)
  343. #ifndef ZEND_MM_CACHE_STAT
  344. # define ZEND_MM_CACHE_STAT 0
  345. #endif
  346. struct _zend_mm_heap {
  347. int use_zend_alloc;
  348. void *(*_malloc)(size_t);
  349. void (*_free)(void*);
  350. void *(*_realloc)(void*, size_t);
  351. size_t free_bitmap;
  352. size_t large_free_bitmap;
  353. size_t block_size;
  354. size_t compact_size;
  355. zend_mm_segment *segments_list;
  356. zend_mm_storage *storage;
  357. size_t real_size;
  358. size_t real_peak;
  359. size_t limit;
  360. size_t size;
  361. size_t peak;
  362. size_t reserve_size;
  363. void *reserve;
  364. int overflow;
  365. int internal;
  366. #if ZEND_MM_CACHE
  367. unsigned int cached;
  368. zend_mm_free_block *cache[ZEND_MM_NUM_BUCKETS];
  369. #endif
  370. zend_mm_free_block *free_buckets[ZEND_MM_NUM_BUCKETS*2];
  371. zend_mm_free_block *large_free_buckets[ZEND_MM_NUM_BUCKETS];
  372. zend_mm_free_block *rest_buckets[2];
  373. #if ZEND_MM_CACHE_STAT
  374. struct {
  375. int count;
  376. int max_count;
  377. int hit;
  378. int miss;
  379. } cache_stat[ZEND_MM_NUM_BUCKETS+1];
  380. #endif
  381. };
  382. #define ZEND_MM_SMALL_FREE_BUCKET(heap, index) \
  383. (zend_mm_free_block*) ((char*)&heap->free_buckets[index * 2] + \
  384. sizeof(zend_mm_free_block*) * 2 - \
  385. sizeof(zend_mm_small_free_block))
  386. #define ZEND_MM_REST_BUCKET(heap) \
  387. (zend_mm_free_block*)((char*)&heap->rest_buckets[0] + \
  388. sizeof(zend_mm_free_block*) * 2 - \
  389. sizeof(zend_mm_small_free_block))
  390. #if ZEND_MM_COOKIES
  391. static unsigned int _zend_mm_cookie = 0;
  392. # define ZEND_MM_COOKIE(block) \
  393. (((size_t)(block)) ^ _zend_mm_cookie)
  394. # define ZEND_MM_SET_COOKIE(block) \
  395. (block)->info._cookie = ZEND_MM_COOKIE(block)
  396. # define ZEND_MM_CHECK_COOKIE(block) \
  397. if (UNEXPECTED((block)->info._cookie != ZEND_MM_COOKIE(block))) { \
  398. zend_mm_panic("zend_mm_heap corrupted"); \
  399. }
  400. #else
  401. # define ZEND_MM_SET_COOKIE(block)
  402. # define ZEND_MM_CHECK_COOKIE(block)
  403. #endif
  404. /* Default memory segment size */
  405. #define ZEND_MM_SEG_SIZE (256 * 1024)
  406. /* Reserved space for error reporting in case of memory overflow */
  407. #define ZEND_MM_RESERVE_SIZE (8*1024)
  408. #ifdef _WIN64
  409. # define ZEND_MM_LONG_CONST(x) (x##i64)
  410. #else
  411. # define ZEND_MM_LONG_CONST(x) (x##L)
  412. #endif
  413. #define ZEND_MM_TYPE_MASK ZEND_MM_LONG_CONST(0x3)
  414. #define ZEND_MM_FREE_BLOCK ZEND_MM_LONG_CONST(0x0)
  415. #define ZEND_MM_USED_BLOCK ZEND_MM_LONG_CONST(0x1)
  416. #define ZEND_MM_GUARD_BLOCK ZEND_MM_LONG_CONST(0x3)
  417. #define ZEND_MM_BLOCK(b, type, size) do { \
  418. size_t _size = (size); \
  419. (b)->info._size = (type) | _size; \
  420. ZEND_MM_BLOCK_AT(b, _size)->info._prev = (type) | _size; \
  421. ZEND_MM_SET_COOKIE(b); \
  422. } while (0);
  423. #define ZEND_MM_LAST_BLOCK(b) do { \
  424. (b)->info._size = ZEND_MM_GUARD_BLOCK | ZEND_MM_ALIGNED_HEADER_SIZE; \
  425. ZEND_MM_SET_MAGIC(b, MEM_BLOCK_GUARD); \
  426. } while (0);
  427. #define ZEND_MM_BLOCK_SIZE(b) ((b)->info._size & ~ZEND_MM_TYPE_MASK)
  428. #define ZEND_MM_IS_FREE_BLOCK(b) (!((b)->info._size & ZEND_MM_USED_BLOCK))
  429. #define ZEND_MM_IS_USED_BLOCK(b) ((b)->info._size & ZEND_MM_USED_BLOCK)
  430. #define ZEND_MM_IS_GUARD_BLOCK(b) (((b)->info._size & ZEND_MM_TYPE_MASK) == ZEND_MM_GUARD_BLOCK)
  431. #define ZEND_MM_NEXT_BLOCK(b) ZEND_MM_BLOCK_AT(b, ZEND_MM_BLOCK_SIZE(b))
  432. #define ZEND_MM_PREV_BLOCK(b) ZEND_MM_BLOCK_AT(b, -(int)((b)->info._prev & ~ZEND_MM_TYPE_MASK))
  433. #define ZEND_MM_PREV_BLOCK_IS_FREE(b) (!((b)->info._prev & ZEND_MM_USED_BLOCK))
  434. #define ZEND_MM_MARK_FIRST_BLOCK(b) ((b)->info._prev = ZEND_MM_GUARD_BLOCK)
  435. #define ZEND_MM_IS_FIRST_BLOCK(b) ((b)->info._prev == ZEND_MM_GUARD_BLOCK)
  436. /* optimized access */
  437. #define ZEND_MM_FREE_BLOCK_SIZE(b) (b)->info._size
  438. /* Aligned header size */
  439. #define ZEND_MM_ALIGNED_HEADER_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_block))
  440. #define ZEND_MM_ALIGNED_FREE_HEADER_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_small_free_block))
  441. #define ZEND_MM_MIN_ALLOC_BLOCK_SIZE ZEND_MM_ALIGNED_SIZE(ZEND_MM_ALIGNED_HEADER_SIZE + END_MAGIC_SIZE)
  442. #define ZEND_MM_ALIGNED_MIN_HEADER_SIZE (ZEND_MM_MIN_ALLOC_BLOCK_SIZE>ZEND_MM_ALIGNED_FREE_HEADER_SIZE?ZEND_MM_MIN_ALLOC_BLOCK_SIZE:ZEND_MM_ALIGNED_FREE_HEADER_SIZE)
  443. #define ZEND_MM_ALIGNED_SEGMENT_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_segment))
  444. #define ZEND_MM_MIN_SIZE ((ZEND_MM_ALIGNED_MIN_HEADER_SIZE>(ZEND_MM_ALIGNED_HEADER_SIZE+END_MAGIC_SIZE))?(ZEND_MM_ALIGNED_MIN_HEADER_SIZE-(ZEND_MM_ALIGNED_HEADER_SIZE+END_MAGIC_SIZE)):0)
  445. #define ZEND_MM_MAX_SMALL_SIZE ((ZEND_MM_NUM_BUCKETS<<ZEND_MM_ALIGNMENT_LOG2)+ZEND_MM_ALIGNED_MIN_HEADER_SIZE)
  446. #define ZEND_MM_TRUE_SIZE(size) ((size<ZEND_MM_MIN_SIZE)?(ZEND_MM_ALIGNED_MIN_HEADER_SIZE):(ZEND_MM_ALIGNED_SIZE(size+ZEND_MM_ALIGNED_HEADER_SIZE+END_MAGIC_SIZE)))
  447. #define ZEND_MM_BUCKET_INDEX(true_size) ((true_size>>ZEND_MM_ALIGNMENT_LOG2)-(ZEND_MM_ALIGNED_MIN_HEADER_SIZE>>ZEND_MM_ALIGNMENT_LOG2))
  448. #define ZEND_MM_SMALL_SIZE(true_size) (true_size < ZEND_MM_MAX_SMALL_SIZE)
  449. /* Memory calculations */
  450. #define ZEND_MM_BLOCK_AT(blk, offset) ((zend_mm_block *) (((char *) (blk))+(offset)))
  451. #define ZEND_MM_DATA_OF(p) ((void *) (((char *) (p))+ZEND_MM_ALIGNED_HEADER_SIZE))
  452. #define ZEND_MM_HEADER_OF(blk) ZEND_MM_BLOCK_AT(blk, -(int)ZEND_MM_ALIGNED_HEADER_SIZE)
  453. /* Debug output */
  454. #if ZEND_DEBUG
  455. # ifdef ZTS
  456. # define ZEND_MM_SET_THREAD_ID(block) \
  457. ((zend_mm_block*)(block))->thread_id = tsrm_thread_id()
  458. # define ZEND_MM_BAD_THREAD_ID(block) ((block)->thread_id != tsrm_thread_id())
  459. # else
  460. # define ZEND_MM_SET_THREAD_ID(block)
  461. # define ZEND_MM_BAD_THREAD_ID(block) 0
  462. # endif
  463. # define ZEND_MM_VALID_PTR(block) \
  464. zend_mm_check_ptr(heap, block, 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC)
  465. # define ZEND_MM_SET_MAGIC(block, val) do { \
  466. (block)->magic = (val); \
  467. } while (0)
  468. # define ZEND_MM_CHECK_MAGIC(block, val) do { \
  469. if ((block)->magic != (val)) { \
  470. zend_mm_panic("zend_mm_heap corrupted"); \
  471. } \
  472. } while (0)
  473. # define ZEND_MM_SET_DEBUG_INFO(block, __size, set_valid, set_thread) do { \
  474. ((zend_mm_block*)(block))->debug.filename = __zend_filename; \
  475. ((zend_mm_block*)(block))->debug.lineno = __zend_lineno; \
  476. ((zend_mm_block*)(block))->debug.orig_filename = __zend_orig_filename; \
  477. ((zend_mm_block*)(block))->debug.orig_lineno = __zend_orig_lineno; \
  478. ZEND_MM_SET_BLOCK_SIZE(block, __size); \
  479. if (set_valid) { \
  480. ZEND_MM_SET_MAGIC(block, MEM_BLOCK_VALID); \
  481. } \
  482. if (set_thread) { \
  483. ZEND_MM_SET_THREAD_ID(block); \
  484. } \
  485. } while (0)
  486. #else
  487. # define ZEND_MM_VALID_PTR(ptr) EXPECTED(ptr != NULL)
  488. # define ZEND_MM_SET_MAGIC(block, val)
  489. # define ZEND_MM_CHECK_MAGIC(block, val)
  490. # define ZEND_MM_SET_DEBUG_INFO(block, __size, set_valid, set_thread) ZEND_MM_SET_BLOCK_SIZE(block, __size)
  491. #endif
  492. #if ZEND_MM_HEAP_PROTECTION
  493. # define ZEND_MM_CHECK_PROTECTION(block) \
  494. do { \
  495. if ((block)->debug.start_magic != _mem_block_start_magic || \
  496. memcmp(ZEND_MM_END_MAGIC_PTR(block), &_mem_block_end_magic, END_MAGIC_SIZE) != 0) { \
  497. zend_mm_panic("zend_mm_heap corrupted"); \
  498. } \
  499. } while (0)
  500. # define ZEND_MM_END_MAGIC_PTR(block) \
  501. (((char*)(ZEND_MM_DATA_OF(block))) + ((zend_mm_block*)(block))->debug.size)
  502. # define END_MAGIC_SIZE sizeof(unsigned int)
  503. # define ZEND_MM_SET_BLOCK_SIZE(block, __size) do { \
  504. char *p; \
  505. ((zend_mm_block*)(block))->debug.size = (__size); \
  506. p = ZEND_MM_END_MAGIC_PTR(block); \
  507. ((zend_mm_block*)(block))->debug.start_magic = _mem_block_start_magic; \
  508. memcpy(p, &_mem_block_end_magic, END_MAGIC_SIZE); \
  509. } while (0)
  510. static unsigned int _mem_block_start_magic = 0;
  511. static unsigned int _mem_block_end_magic = 0;
  512. #else
  513. # if ZEND_DEBUG
  514. # define ZEND_MM_SET_BLOCK_SIZE(block, _size) \
  515. ((zend_mm_block*)(block))->debug.size = (_size)
  516. # else
  517. # define ZEND_MM_SET_BLOCK_SIZE(block, _size)
  518. # endif
  519. # define ZEND_MM_CHECK_PROTECTION(block)
  520. # define END_MAGIC_SIZE 0
  521. #endif
  522. #if ZEND_MM_SAFE_UNLINKING
  523. # define ZEND_MM_CHECK_BLOCK_LINKAGE(block) \
  524. if (UNEXPECTED((block)->info._size != ZEND_MM_BLOCK_AT(block, ZEND_MM_FREE_BLOCK_SIZE(block))->info._prev) || \
  525. UNEXPECTED(!UNEXPECTED(ZEND_MM_IS_FIRST_BLOCK(block)) && \
  526. UNEXPECTED(ZEND_MM_PREV_BLOCK(block)->info._size != (block)->info._prev))) { \
  527. zend_mm_panic("zend_mm_heap corrupted"); \
  528. }
  529. #define ZEND_MM_CHECK_TREE(block) \
  530. if (UNEXPECTED(*((block)->parent) != (block))) { \
  531. zend_mm_panic("zend_mm_heap corrupted"); \
  532. }
  533. #else
  534. # define ZEND_MM_CHECK_BLOCK_LINKAGE(block)
  535. # define ZEND_MM_CHECK_TREE(block)
  536. #endif
  537. #define ZEND_MM_LARGE_BUCKET_INDEX(S) zend_mm_high_bit(S)
  538. static void *_zend_mm_alloc_int(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC;
  539. static void _zend_mm_free_int(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  540. static void *_zend_mm_realloc_int(zend_mm_heap *heap, void *p, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  541. static inline unsigned int zend_mm_high_bit(size_t _size)
  542. {
  543. #if defined(__GNUC__) && defined(i386)
  544. unsigned int n;
  545. __asm__("bsrl %1,%0\n\t" : "=r" (n) : "rm" (_size));
  546. return n;
  547. #elif defined(__GNUC__) && defined(__x86_64__)
  548. unsigned long n;
  549. __asm__("bsrq %1,%0\n\t" : "=r" (n) : "rm" (_size));
  550. return (unsigned int)n;
  551. #elif defined(_MSC_VER) && defined(_M_IX86)
  552. __asm {
  553. bsr eax, _size
  554. }
  555. #else
  556. unsigned int n = 0;
  557. while (_size != 0) {
  558. _size = _size >> 1;
  559. n++;
  560. }
  561. return n-1;
  562. #endif
  563. }
  564. static inline unsigned int zend_mm_low_bit(size_t _size)
  565. {
  566. #if defined(__GNUC__) && defined(i386)
  567. unsigned int n;
  568. __asm__("bsfl %1,%0\n\t" : "=r" (n) : "rm" (_size));
  569. return n;
  570. #elif defined(__GNUC__) && defined(__x86_64__)
  571. unsigned long n;
  572. __asm__("bsfq %1,%0\n\t" : "=r" (n) : "rm" (_size));
  573. return (unsigned int)n;
  574. #elif defined(_MSC_VER) && defined(_M_IX86)
  575. __asm {
  576. bsf eax, _size
  577. }
  578. #else
  579. static const int offset[16] = {4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0};
  580. unsigned int n;
  581. unsigned int index = 0;
  582. n = offset[_size & 15];
  583. while (n == 4) {
  584. _size >>= 4;
  585. index += n;
  586. n = offset[_size & 15];
  587. }
  588. return index + n;
  589. #endif
  590. }
  591. static inline void zend_mm_add_to_rest_list(zend_mm_heap *heap, zend_mm_free_block *mm_block)
  592. {
  593. zend_mm_free_block *prev, *next;
  594. ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_FREED);
  595. if (!ZEND_MM_SMALL_SIZE(ZEND_MM_FREE_BLOCK_SIZE(mm_block))) {
  596. mm_block->parent = NULL;
  597. }
  598. prev = heap->rest_buckets[0];
  599. next = prev->next_free_block;
  600. mm_block->prev_free_block = prev;
  601. mm_block->next_free_block = next;
  602. prev->next_free_block = next->prev_free_block = mm_block;
  603. }
  604. static inline void zend_mm_add_to_free_list(zend_mm_heap *heap, zend_mm_free_block *mm_block)
  605. {
  606. size_t size;
  607. size_t index;
  608. ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_FREED);
  609. size = ZEND_MM_FREE_BLOCK_SIZE(mm_block);
  610. if (EXPECTED(!ZEND_MM_SMALL_SIZE(size))) {
  611. zend_mm_free_block **p;
  612. index = ZEND_MM_LARGE_BUCKET_INDEX(size);
  613. p = &heap->large_free_buckets[index];
  614. mm_block->child[0] = mm_block->child[1] = NULL;
  615. if (!*p) {
  616. *p = mm_block;
  617. mm_block->parent = p;
  618. mm_block->prev_free_block = mm_block->next_free_block = mm_block;
  619. heap->large_free_bitmap |= (ZEND_MM_LONG_CONST(1) << index);
  620. } else {
  621. size_t m;
  622. for (m = size << (ZEND_MM_NUM_BUCKETS - index); ; m <<= 1) {
  623. zend_mm_free_block *prev = *p;
  624. if (ZEND_MM_FREE_BLOCK_SIZE(prev) != size) {
  625. p = &prev->child[(m >> (ZEND_MM_NUM_BUCKETS-1)) & 1];
  626. if (!*p) {
  627. *p = mm_block;
  628. mm_block->parent = p;
  629. mm_block->prev_free_block = mm_block->next_free_block = mm_block;
  630. break;
  631. }
  632. } else {
  633. zend_mm_free_block *next = prev->next_free_block;
  634. prev->next_free_block = next->prev_free_block = mm_block;
  635. mm_block->next_free_block = next;
  636. mm_block->prev_free_block = prev;
  637. mm_block->parent = NULL;
  638. break;
  639. }
  640. }
  641. }
  642. } else {
  643. zend_mm_free_block *prev, *next;
  644. index = ZEND_MM_BUCKET_INDEX(size);
  645. prev = ZEND_MM_SMALL_FREE_BUCKET(heap, index);
  646. if (prev->prev_free_block == prev) {
  647. heap->free_bitmap |= (ZEND_MM_LONG_CONST(1) << index);
  648. }
  649. next = prev->next_free_block;
  650. mm_block->prev_free_block = prev;
  651. mm_block->next_free_block = next;
  652. prev->next_free_block = next->prev_free_block = mm_block;
  653. }
  654. }
  655. static inline void zend_mm_remove_from_free_list(zend_mm_heap *heap, zend_mm_free_block *mm_block)
  656. {
  657. zend_mm_free_block *prev = mm_block->prev_free_block;
  658. zend_mm_free_block *next = mm_block->next_free_block;
  659. ZEND_MM_CHECK_MAGIC(mm_block, MEM_BLOCK_FREED);
  660. if (EXPECTED(prev == mm_block)) {
  661. zend_mm_free_block **rp, **cp;
  662. #if ZEND_MM_SAFE_UNLINKING
  663. if (UNEXPECTED(next != mm_block)) {
  664. zend_mm_panic("zend_mm_heap corrupted");
  665. }
  666. #endif
  667. rp = &mm_block->child[mm_block->child[1] != NULL];
  668. prev = *rp;
  669. if (EXPECTED(prev == NULL)) {
  670. size_t index = ZEND_MM_LARGE_BUCKET_INDEX(ZEND_MM_FREE_BLOCK_SIZE(mm_block));
  671. ZEND_MM_CHECK_TREE(mm_block);
  672. *mm_block->parent = NULL;
  673. if (mm_block->parent == &heap->large_free_buckets[index]) {
  674. heap->large_free_bitmap &= ~(ZEND_MM_LONG_CONST(1) << index);
  675. }
  676. } else {
  677. while (*(cp = &(prev->child[prev->child[1] != NULL])) != NULL) {
  678. prev = *cp;
  679. rp = cp;
  680. }
  681. *rp = NULL;
  682. subst_block:
  683. ZEND_MM_CHECK_TREE(mm_block);
  684. *mm_block->parent = prev;
  685. prev->parent = mm_block->parent;
  686. if ((prev->child[0] = mm_block->child[0])) {
  687. ZEND_MM_CHECK_TREE(prev->child[0]);
  688. prev->child[0]->parent = &prev->child[0];
  689. }
  690. if ((prev->child[1] = mm_block->child[1])) {
  691. ZEND_MM_CHECK_TREE(prev->child[1]);
  692. prev->child[1]->parent = &prev->child[1];
  693. }
  694. }
  695. } else {
  696. #if ZEND_MM_SAFE_UNLINKING
  697. if (UNEXPECTED(prev->next_free_block != mm_block) || UNEXPECTED(next->prev_free_block != mm_block)) {
  698. zend_mm_panic("zend_mm_heap corrupted");
  699. }
  700. #endif
  701. prev->next_free_block = next;
  702. next->prev_free_block = prev;
  703. if (EXPECTED(ZEND_MM_SMALL_SIZE(ZEND_MM_FREE_BLOCK_SIZE(mm_block)))) {
  704. if (EXPECTED(prev == next)) {
  705. size_t index = ZEND_MM_BUCKET_INDEX(ZEND_MM_FREE_BLOCK_SIZE(mm_block));
  706. if (EXPECTED(heap->free_buckets[index*2] == heap->free_buckets[index*2+1])) {
  707. heap->free_bitmap &= ~(ZEND_MM_LONG_CONST(1) << index);
  708. }
  709. }
  710. } else if (UNEXPECTED(mm_block->parent != NULL)) {
  711. goto subst_block;
  712. }
  713. }
  714. }
  715. static inline void zend_mm_init(zend_mm_heap *heap)
  716. {
  717. zend_mm_free_block* p;
  718. int i;
  719. heap->free_bitmap = 0;
  720. heap->large_free_bitmap = 0;
  721. #if ZEND_MM_CACHE
  722. heap->cached = 0;
  723. memset(heap->cache, 0, sizeof(heap->cache));
  724. #endif
  725. #if ZEND_MM_CACHE_STAT
  726. for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) {
  727. heap->cache_stat[i].count = 0;
  728. }
  729. #endif
  730. p = ZEND_MM_SMALL_FREE_BUCKET(heap, 0);
  731. for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) {
  732. p->next_free_block = p;
  733. p->prev_free_block = p;
  734. p = (zend_mm_free_block*)((char*)p + sizeof(zend_mm_free_block*) * 2);
  735. heap->large_free_buckets[i] = NULL;
  736. }
  737. heap->rest_buckets[0] = heap->rest_buckets[1] = ZEND_MM_REST_BUCKET(heap);
  738. }
  739. static void zend_mm_del_segment(zend_mm_heap *heap, zend_mm_segment *segment)
  740. {
  741. zend_mm_segment **p = &heap->segments_list;
  742. while (*p != segment) {
  743. p = &(*p)->next_segment;
  744. }
  745. *p = segment->next_segment;
  746. heap->real_size -= segment->size;
  747. ZEND_MM_STORAGE_FREE(segment);
  748. }
  749. #if ZEND_MM_CACHE
  750. static void zend_mm_free_cache(zend_mm_heap *heap)
  751. {
  752. int i;
  753. for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) {
  754. if (heap->cache[i]) {
  755. zend_mm_free_block *mm_block = heap->cache[i];
  756. while (mm_block) {
  757. size_t size = ZEND_MM_BLOCK_SIZE(mm_block);
  758. zend_mm_free_block *q = mm_block->prev_free_block;
  759. zend_mm_block *next_block = ZEND_MM_NEXT_BLOCK(mm_block);
  760. heap->cached -= size;
  761. if (ZEND_MM_PREV_BLOCK_IS_FREE(mm_block)) {
  762. mm_block = (zend_mm_free_block*)ZEND_MM_PREV_BLOCK(mm_block);
  763. size += ZEND_MM_FREE_BLOCK_SIZE(mm_block);
  764. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) mm_block);
  765. }
  766. if (ZEND_MM_IS_FREE_BLOCK(next_block)) {
  767. size += ZEND_MM_FREE_BLOCK_SIZE(next_block);
  768. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block);
  769. }
  770. ZEND_MM_BLOCK(mm_block, ZEND_MM_FREE_BLOCK, size);
  771. if (ZEND_MM_IS_FIRST_BLOCK(mm_block) &&
  772. ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_NEXT_BLOCK(mm_block))) {
  773. zend_mm_del_segment(heap, (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE));
  774. } else {
  775. zend_mm_add_to_free_list(heap, (zend_mm_free_block *) mm_block);
  776. }
  777. mm_block = q;
  778. }
  779. heap->cache[i] = NULL;
  780. #if ZEND_MM_CACHE_STAT
  781. heap->cache_stat[i].count = 0;
  782. #endif
  783. }
  784. }
  785. }
  786. #endif
  787. #if ZEND_MM_HEAP_PROTECTION || ZEND_MM_COOKIES
  788. static void zend_mm_random(unsigned char *buf, size_t size) /* {{{ */
  789. {
  790. size_t i = 0;
  791. unsigned char t;
  792. #ifdef ZEND_WIN32
  793. HCRYPTPROV hCryptProv;
  794. int has_context = 0;
  795. if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) {
  796. /* Could mean that the key container does not exist, let try
  797. again by asking for a new one */
  798. if (GetLastError() == NTE_BAD_KEYSET) {
  799. if (CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) {
  800. has_context = 1;
  801. }
  802. }
  803. } else {
  804. has_context = 1;
  805. }
  806. if (has_context) {
  807. do {
  808. BOOL ret = CryptGenRandom(hCryptProv, size, buf);
  809. CryptReleaseContext(hCryptProv, 0);
  810. if (ret) {
  811. while (i < size && buf[i] != 0) {
  812. i++;
  813. }
  814. if (i == size) {
  815. return;
  816. }
  817. }
  818. } while (0);
  819. }
  820. #elif defined(HAVE_DEV_URANDOM)
  821. int fd = open("/dev/urandom", 0);
  822. if (fd >= 0) {
  823. if (read(fd, buf, size) == size) {
  824. while (i < size && buf[i] != 0) {
  825. i++;
  826. }
  827. if (i == size) {
  828. close(fd);
  829. return;
  830. }
  831. }
  832. close(fd);
  833. }
  834. #endif
  835. t = (unsigned char)getpid();
  836. while (i < size) {
  837. do {
  838. buf[i] = ((unsigned char)rand()) ^ t;
  839. } while (buf[i] == 0);
  840. t = buf[i++] << 1;
  841. }
  842. }
  843. /* }}} */
  844. #endif
  845. /* Notes:
  846. * - This function may alter the block_sizes values to match platform alignment
  847. * - This function does *not* perform sanity checks on the arguments
  848. */
  849. ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_mem_handlers *handlers, size_t block_size, size_t reserve_size, int internal, void *params)
  850. {
  851. zend_mm_storage *storage;
  852. zend_mm_heap *heap;
  853. #if 0
  854. int i;
  855. printf("ZEND_MM_ALIGNMENT=%d\n", ZEND_MM_ALIGNMENT);
  856. printf("ZEND_MM_ALIGNMENT_LOG2=%d\n", ZEND_MM_ALIGNMENT_LOG2);
  857. printf("ZEND_MM_MIN_SIZE=%d\n", ZEND_MM_MIN_SIZE);
  858. printf("ZEND_MM_MAX_SMALL_SIZE=%d\n", ZEND_MM_MAX_SMALL_SIZE);
  859. printf("ZEND_MM_ALIGNED_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_HEADER_SIZE);
  860. printf("ZEND_MM_ALIGNED_FREE_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_FREE_HEADER_SIZE);
  861. printf("ZEND_MM_MIN_ALLOC_BLOCK_SIZE=%d\n", ZEND_MM_MIN_ALLOC_BLOCK_SIZE);
  862. printf("ZEND_MM_ALIGNED_MIN_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_MIN_HEADER_SIZE);
  863. printf("ZEND_MM_ALIGNED_SEGMENT_SIZE=%d\n", ZEND_MM_ALIGNED_SEGMENT_SIZE);
  864. for (i = 0; i < ZEND_MM_MAX_SMALL_SIZE; i++) {
  865. printf("%3d%c: %3ld %d %2ld\n", i, (i == ZEND_MM_MIN_SIZE?'*':' '), (long)ZEND_MM_TRUE_SIZE(i), ZEND_MM_SMALL_SIZE(ZEND_MM_TRUE_SIZE(i)), (long)ZEND_MM_BUCKET_INDEX(ZEND_MM_TRUE_SIZE(i)));
  866. }
  867. exit(0);
  868. #endif
  869. #if ZEND_MM_HEAP_PROTECTION
  870. if (_mem_block_start_magic == 0) {
  871. zend_mm_random((unsigned char*)&_mem_block_start_magic, sizeof(_mem_block_start_magic));
  872. }
  873. if (_mem_block_end_magic == 0) {
  874. zend_mm_random((unsigned char*)&_mem_block_end_magic, sizeof(_mem_block_end_magic));
  875. }
  876. #endif
  877. #if ZEND_MM_COOKIES
  878. if (_zend_mm_cookie == 0) {
  879. zend_mm_random((unsigned char*)&_zend_mm_cookie, sizeof(_zend_mm_cookie));
  880. }
  881. #endif
  882. if (zend_mm_low_bit(block_size) != zend_mm_high_bit(block_size)) {
  883. fprintf(stderr, "'block_size' must be a power of two\n");
  884. /* See http://support.microsoft.com/kb/190351 */
  885. #ifdef PHP_WIN32
  886. fflush(stderr);
  887. #endif
  888. exit(255);
  889. }
  890. storage = handlers->init(params);
  891. if (!storage) {
  892. fprintf(stderr, "Cannot initialize zend_mm storage [%s]\n", handlers->name);
  893. /* See http://support.microsoft.com/kb/190351 */
  894. #ifdef PHP_WIN32
  895. fflush(stderr);
  896. #endif
  897. exit(255);
  898. }
  899. storage->handlers = handlers;
  900. heap = malloc(sizeof(struct _zend_mm_heap));
  901. if (heap == NULL) {
  902. fprintf(stderr, "Cannot allocate heap for zend_mm storage [%s]\n", handlers->name);
  903. #ifdef PHP_WIN32
  904. fflush(stderr);
  905. #endif
  906. exit(255);
  907. }
  908. heap->storage = storage;
  909. heap->block_size = block_size;
  910. heap->compact_size = 0;
  911. heap->segments_list = NULL;
  912. zend_mm_init(heap);
  913. # if ZEND_MM_CACHE_STAT
  914. memset(heap->cache_stat, 0, sizeof(heap->cache_stat));
  915. # endif
  916. heap->use_zend_alloc = 1;
  917. heap->real_size = 0;
  918. heap->overflow = 0;
  919. heap->real_peak = 0;
  920. heap->limit = ZEND_MM_LONG_CONST(1)<<(ZEND_MM_NUM_BUCKETS-2);
  921. heap->size = 0;
  922. heap->peak = 0;
  923. heap->internal = internal;
  924. heap->reserve = NULL;
  925. heap->reserve_size = reserve_size;
  926. if (reserve_size > 0) {
  927. heap->reserve = _zend_mm_alloc_int(heap, reserve_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  928. }
  929. if (internal) {
  930. int i;
  931. zend_mm_free_block *p, *q, *orig;
  932. zend_mm_heap *mm_heap = _zend_mm_alloc_int(heap, sizeof(zend_mm_heap) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  933. *mm_heap = *heap;
  934. p = ZEND_MM_SMALL_FREE_BUCKET(mm_heap, 0);
  935. orig = ZEND_MM_SMALL_FREE_BUCKET(heap, 0);
  936. for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) {
  937. q = p;
  938. while (q->prev_free_block != orig) {
  939. q = q->prev_free_block;
  940. }
  941. q->prev_free_block = p;
  942. q = p;
  943. while (q->next_free_block != orig) {
  944. q = q->next_free_block;
  945. }
  946. q->next_free_block = p;
  947. p = (zend_mm_free_block*)((char*)p + sizeof(zend_mm_free_block*) * 2);
  948. orig = (zend_mm_free_block*)((char*)orig + sizeof(zend_mm_free_block*) * 2);
  949. if (mm_heap->large_free_buckets[i]) {
  950. mm_heap->large_free_buckets[i]->parent = &mm_heap->large_free_buckets[i];
  951. }
  952. }
  953. mm_heap->rest_buckets[0] = mm_heap->rest_buckets[1] = ZEND_MM_REST_BUCKET(mm_heap);
  954. free(heap);
  955. heap = mm_heap;
  956. }
  957. return heap;
  958. }
  959. ZEND_API zend_mm_heap *zend_mm_startup(void)
  960. {
  961. int i;
  962. size_t seg_size;
  963. char *mem_type = getenv("ZEND_MM_MEM_TYPE");
  964. char *tmp;
  965. const zend_mm_mem_handlers *handlers;
  966. zend_mm_heap *heap;
  967. if (mem_type == NULL) {
  968. i = 0;
  969. } else {
  970. for (i = 0; mem_handlers[i].name; i++) {
  971. if (strcmp(mem_handlers[i].name, mem_type) == 0) {
  972. break;
  973. }
  974. }
  975. if (!mem_handlers[i].name) {
  976. fprintf(stderr, "Wrong or unsupported zend_mm storage type '%s'\n", mem_type);
  977. fprintf(stderr, " supported types:\n");
  978. /* See http://support.microsoft.com/kb/190351 */
  979. #ifdef PHP_WIN32
  980. fflush(stderr);
  981. #endif
  982. for (i = 0; mem_handlers[i].name; i++) {
  983. fprintf(stderr, " '%s'\n", mem_handlers[i].name);
  984. }
  985. /* See http://support.microsoft.com/kb/190351 */
  986. #ifdef PHP_WIN32
  987. fflush(stderr);
  988. #endif
  989. exit(255);
  990. }
  991. }
  992. handlers = &mem_handlers[i];
  993. tmp = getenv("ZEND_MM_SEG_SIZE");
  994. if (tmp) {
  995. seg_size = zend_atoi(tmp, 0);
  996. if (zend_mm_low_bit(seg_size) != zend_mm_high_bit(seg_size)) {
  997. fprintf(stderr, "ZEND_MM_SEG_SIZE must be a power of two\n");
  998. /* See http://support.microsoft.com/kb/190351 */
  999. #ifdef PHP_WIN32
  1000. fflush(stderr);
  1001. #endif
  1002. exit(255);
  1003. } else if (seg_size < ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE) {
  1004. fprintf(stderr, "ZEND_MM_SEG_SIZE is too small\n");
  1005. /* See http://support.microsoft.com/kb/190351 */
  1006. #ifdef PHP_WIN32
  1007. fflush(stderr);
  1008. #endif
  1009. exit(255);
  1010. }
  1011. } else {
  1012. seg_size = ZEND_MM_SEG_SIZE;
  1013. }
  1014. heap = zend_mm_startup_ex(handlers, seg_size, ZEND_MM_RESERVE_SIZE, 0, NULL);
  1015. if (heap) {
  1016. tmp = getenv("ZEND_MM_COMPACT");
  1017. if (tmp) {
  1018. heap->compact_size = zend_atoi(tmp, 0);
  1019. } else {
  1020. heap->compact_size = 2 * 1024 * 1024;
  1021. }
  1022. }
  1023. return heap;
  1024. }
  1025. #if ZEND_DEBUG
  1026. static long zend_mm_find_leaks(zend_mm_segment *segment, zend_mm_block *b)
  1027. {
  1028. long leaks = 0;
  1029. zend_mm_block *p, *q;
  1030. p = ZEND_MM_NEXT_BLOCK(b);
  1031. while (1) {
  1032. if (ZEND_MM_IS_GUARD_BLOCK(p)) {
  1033. ZEND_MM_CHECK_MAGIC(p, MEM_BLOCK_GUARD);
  1034. segment = segment->next_segment;
  1035. if (!segment) {
  1036. break;
  1037. }
  1038. p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1039. continue;
  1040. }
  1041. q = ZEND_MM_NEXT_BLOCK(p);
  1042. if (q <= p ||
  1043. (char*)q > (char*)segment + segment->size ||
  1044. p->info._size != q->info._prev) {
  1045. zend_mm_panic("zend_mm_heap corrupted");
  1046. }
  1047. if (!ZEND_MM_IS_FREE_BLOCK(p)) {
  1048. if (p->magic == MEM_BLOCK_VALID) {
  1049. if (p->debug.filename==b->debug.filename && p->debug.lineno==b->debug.lineno) {
  1050. ZEND_MM_SET_MAGIC(p, MEM_BLOCK_LEAK);
  1051. leaks++;
  1052. }
  1053. #if ZEND_MM_CACHE
  1054. } else if (p->magic == MEM_BLOCK_CACHED) {
  1055. /* skip it */
  1056. #endif
  1057. } else if (p->magic != MEM_BLOCK_LEAK) {
  1058. zend_mm_panic("zend_mm_heap corrupted");
  1059. }
  1060. }
  1061. p = q;
  1062. }
  1063. return leaks;
  1064. }
  1065. static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC)
  1066. {
  1067. zend_mm_segment *segment = heap->segments_list;
  1068. zend_mm_block *p, *q;
  1069. zend_uint total = 0;
  1070. if (!segment) {
  1071. return;
  1072. }
  1073. p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1074. while (1) {
  1075. q = ZEND_MM_NEXT_BLOCK(p);
  1076. if (q <= p ||
  1077. (char*)q > (char*)segment + segment->size ||
  1078. p->info._size != q->info._prev) {
  1079. zend_mm_panic("zend_mm_heap corrupted");
  1080. }
  1081. if (!ZEND_MM_IS_FREE_BLOCK(p)) {
  1082. if (p->magic == MEM_BLOCK_VALID) {
  1083. long repeated;
  1084. zend_leak_info leak;
  1085. ZEND_MM_SET_MAGIC(p, MEM_BLOCK_LEAK);
  1086. leak.addr = ZEND_MM_DATA_OF(p);
  1087. leak.size = p->debug.size;
  1088. leak.filename = p->debug.filename;
  1089. leak.lineno = p->debug.lineno;
  1090. leak.orig_filename = p->debug.orig_filename;
  1091. leak.orig_lineno = p->debug.orig_lineno;
  1092. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
  1093. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
  1094. repeated = zend_mm_find_leaks(segment, p);
  1095. total += 1 + repeated;
  1096. if (repeated) {
  1097. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
  1098. }
  1099. #if ZEND_MM_CACHE
  1100. } else if (p->magic == MEM_BLOCK_CACHED) {
  1101. /* skip it */
  1102. #endif
  1103. } else if (p->magic != MEM_BLOCK_LEAK) {
  1104. zend_mm_panic("zend_mm_heap corrupted");
  1105. }
  1106. }
  1107. if (ZEND_MM_IS_GUARD_BLOCK(q)) {
  1108. segment = segment->next_segment;
  1109. if (!segment) {
  1110. break;
  1111. }
  1112. q = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1113. }
  1114. p = q;
  1115. }
  1116. if (total) {
  1117. zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total TSRMLS_CC);
  1118. }
  1119. }
  1120. static int zend_mm_check_ptr(zend_mm_heap *heap, void *ptr, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1121. {
  1122. zend_mm_block *p;
  1123. int no_cache_notice = 0;
  1124. int had_problems = 0;
  1125. int valid_beginning = 1;
  1126. if (silent==2) {
  1127. silent = 1;
  1128. no_cache_notice = 1;
  1129. } else if (silent==3) {
  1130. silent = 0;
  1131. no_cache_notice = 1;
  1132. }
  1133. if (!silent) {
  1134. TSRMLS_FETCH();
  1135. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
  1136. zend_debug_alloc_output("---------------------------------------\n");
  1137. zend_debug_alloc_output("%s(%d) : Block "PTR_FMT" status:\n" ZEND_FILE_LINE_RELAY_CC, ptr);
  1138. if (__zend_orig_filename) {
  1139. zend_debug_alloc_output("%s(%d) : Actual location (location was relayed)\n" ZEND_FILE_LINE_ORIG_RELAY_CC);
  1140. }
  1141. if (!ptr) {
  1142. zend_debug_alloc_output("NULL\n");
  1143. zend_debug_alloc_output("---------------------------------------\n");
  1144. return 0;
  1145. }
  1146. }
  1147. if (!ptr) {
  1148. if (silent) {
  1149. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1150. }
  1151. }
  1152. p = ZEND_MM_HEADER_OF(ptr);
  1153. #ifdef ZTS
  1154. if (ZEND_MM_BAD_THREAD_ID(p)) {
  1155. if (!silent) {
  1156. zend_debug_alloc_output("Invalid pointer: ((thread_id=0x%0.8X) != (expected=0x%0.8X))\n", (long)p->thread_id, (long)tsrm_thread_id());
  1157. had_problems = 1;
  1158. } else {
  1159. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1160. }
  1161. }
  1162. #endif
  1163. if (p->info._size != ZEND_MM_NEXT_BLOCK(p)->info._prev) {
  1164. if (!silent) {
  1165. zend_debug_alloc_output("Invalid pointer: ((size="PTR_FMT") != (next.prev="PTR_FMT"))\n", p->info._size, ZEND_MM_NEXT_BLOCK(p)->info._prev);
  1166. had_problems = 1;
  1167. } else {
  1168. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1169. }
  1170. }
  1171. if (p->info._prev != ZEND_MM_GUARD_BLOCK &&
  1172. ZEND_MM_PREV_BLOCK(p)->info._size != p->info._prev) {
  1173. if (!silent) {
  1174. zend_debug_alloc_output("Invalid pointer: ((prev="PTR_FMT") != (prev.size="PTR_FMT"))\n", p->info._prev, ZEND_MM_PREV_BLOCK(p)->info._size);
  1175. had_problems = 1;
  1176. } else {
  1177. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1178. }
  1179. }
  1180. if (had_problems) {
  1181. zend_debug_alloc_output("---------------------------------------\n");
  1182. return 0;
  1183. }
  1184. if (!silent) {
  1185. zend_debug_alloc_output("%10s\t","Beginning: ");
  1186. }
  1187. if (!ZEND_MM_IS_USED_BLOCK(p)) {
  1188. if (!silent) {
  1189. if (p->magic != MEM_BLOCK_FREED) {
  1190. zend_debug_alloc_output("Freed (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_FREED);
  1191. } else {
  1192. zend_debug_alloc_output("Freed\n");
  1193. }
  1194. had_problems = 1;
  1195. } else {
  1196. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1197. }
  1198. } else if (ZEND_MM_IS_GUARD_BLOCK(p)) {
  1199. if (!silent) {
  1200. if (p->magic != MEM_BLOCK_FREED) {
  1201. zend_debug_alloc_output("Guard (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_FREED);
  1202. } else {
  1203. zend_debug_alloc_output("Guard\n");
  1204. }
  1205. had_problems = 1;
  1206. } else {
  1207. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1208. }
  1209. } else {
  1210. switch (p->magic) {
  1211. case MEM_BLOCK_VALID:
  1212. case MEM_BLOCK_LEAK:
  1213. if (!silent) {
  1214. zend_debug_alloc_output("OK (allocated on %s:%d, %d bytes)\n", p->debug.filename, p->debug.lineno, (int)p->debug.size);
  1215. }
  1216. break; /* ok */
  1217. case MEM_BLOCK_CACHED:
  1218. if (!no_cache_notice) {
  1219. if (!silent) {
  1220. zend_debug_alloc_output("Cached\n");
  1221. had_problems = 1;
  1222. } else {
  1223. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1224. }
  1225. }
  1226. case MEM_BLOCK_FREED:
  1227. if (!silent) {
  1228. zend_debug_alloc_output("Freed (invalid)\n");
  1229. had_problems = 1;
  1230. } else {
  1231. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1232. }
  1233. break;
  1234. case MEM_BLOCK_GUARD:
  1235. if (!silent) {
  1236. zend_debug_alloc_output("Guard (invalid)\n");
  1237. had_problems = 1;
  1238. } else {
  1239. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1240. }
  1241. break;
  1242. default:
  1243. if (!silent) {
  1244. zend_debug_alloc_output("Unknown (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_VALID);
  1245. had_problems = 1;
  1246. valid_beginning = 0;
  1247. } else {
  1248. return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1249. }
  1250. break;
  1251. }
  1252. }
  1253. #if ZEND_MM_HEAP_PROTECTION
  1254. if (!valid_beginning) {
  1255. if (!silent) {
  1256. zend_debug_alloc_output("%10s\t", "Start:");
  1257. zend_debug_alloc_output("Unknown\n");
  1258. zend_debug_alloc_output("%10s\t", "End:");
  1259. zend_debug_alloc_output("Unknown\n");
  1260. }
  1261. } else {
  1262. char *end_magic = ZEND_MM_END_MAGIC_PTR(p);
  1263. if (p->debug.start_magic == _mem_block_start_magic) {
  1264. if (!silent) {
  1265. zend_debug_alloc_output("%10s\t", "Start:");
  1266. zend_debug_alloc_output("OK\n");
  1267. }
  1268. } else {
  1269. char *overflow_ptr, *magic_ptr=(char *) &_mem_block_start_magic;
  1270. int overflows=0;
  1271. int i;
  1272. if (silent) {
  1273. return _mem_block_check(ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1274. }
  1275. had_problems = 1;
  1276. overflow_ptr = (char *) &p->debug.start_magic;
  1277. i = END_MAGIC_SIZE;
  1278. while (--i >= 0) {
  1279. if (overflow_ptr[i]!=magic_ptr[i]) {
  1280. overflows++;
  1281. }
  1282. }
  1283. zend_debug_alloc_output("%10s\t", "Start:");
  1284. zend_debug_alloc_output("Overflown (magic=0x%0.8X instead of 0x%0.8X)\n", p->debug.start_magic, _mem_block_start_magic);
  1285. zend_debug_alloc_output("%10s\t","");
  1286. if (overflows >= END_MAGIC_SIZE) {
  1287. zend_debug_alloc_output("At least %d bytes overflown\n", END_MAGIC_SIZE);
  1288. } else {
  1289. zend_debug_alloc_output("%d byte(s) overflown\n", overflows);
  1290. }
  1291. }
  1292. if (memcmp(end_magic, &_mem_block_end_magic, END_MAGIC_SIZE)==0) {
  1293. if (!silent) {
  1294. zend_debug_alloc_output("%10s\t", "End:");
  1295. zend_debug_alloc_output("OK\n");
  1296. }
  1297. } else {
  1298. char *overflow_ptr, *magic_ptr=(char *) &_mem_block_end_magic;
  1299. int overflows=0;
  1300. int i;
  1301. if (silent) {
  1302. return _mem_block_check(ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1303. }
  1304. had_problems = 1;
  1305. overflow_ptr = (char *) end_magic;
  1306. for (i=0; i < END_MAGIC_SIZE; i++) {
  1307. if (overflow_ptr[i]!=magic_ptr[i]) {
  1308. overflows++;
  1309. }
  1310. }
  1311. zend_debug_alloc_output("%10s\t", "End:");
  1312. zend_debug_alloc_output("Overflown (magic=0x%0.8X instead of 0x%0.8X)\n", *end_magic, _mem_block_end_magic);
  1313. zend_debug_alloc_output("%10s\t","");
  1314. if (overflows >= END_MAGIC_SIZE) {
  1315. zend_debug_alloc_output("At least %d bytes overflown\n", END_MAGIC_SIZE);
  1316. } else {
  1317. zend_debug_alloc_output("%d byte(s) overflown\n", overflows);
  1318. }
  1319. }
  1320. }
  1321. #endif
  1322. if (!silent) {
  1323. zend_debug_alloc_output("---------------------------------------\n");
  1324. }
  1325. return ((!had_problems) ? 1 : 0);
  1326. }
  1327. static int zend_mm_check_heap(zend_mm_heap *heap, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1328. {
  1329. zend_mm_segment *segment = heap->segments_list;
  1330. zend_mm_block *p, *q;
  1331. int errors = 0;
  1332. if (!segment) {
  1333. return 0;
  1334. }
  1335. p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1336. while (1) {
  1337. q = ZEND_MM_NEXT_BLOCK(p);
  1338. if (q <= p ||
  1339. (char*)q > (char*)segment + segment->size ||
  1340. p->info._size != q->info._prev) {
  1341. zend_mm_panic("zend_mm_heap corrupted");
  1342. }
  1343. if (!ZEND_MM_IS_FREE_BLOCK(p)) {
  1344. if (p->magic == MEM_BLOCK_VALID || p->magic == MEM_BLOCK_LEAK) {
  1345. if (!zend_mm_check_ptr(heap, ZEND_MM_DATA_OF(p), (silent?2:3) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC)) {
  1346. errors++;
  1347. }
  1348. #if ZEND_MM_CACHE
  1349. } else if (p->magic == MEM_BLOCK_CACHED) {
  1350. /* skip it */
  1351. #endif
  1352. } else if (p->magic != MEM_BLOCK_LEAK) {
  1353. zend_mm_panic("zend_mm_heap corrupted");
  1354. }
  1355. }
  1356. if (ZEND_MM_IS_GUARD_BLOCK(q)) {
  1357. segment = segment->next_segment;
  1358. if (!segment) {
  1359. return errors;
  1360. }
  1361. q = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1362. }
  1363. p = q;
  1364. }
  1365. }
  1366. #endif
  1367. ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, int full_shutdown, int silent TSRMLS_DC)
  1368. {
  1369. zend_mm_storage *storage;
  1370. zend_mm_segment *segment;
  1371. zend_mm_segment *prev;
  1372. int internal;
  1373. if (heap->reserve) {
  1374. #if ZEND_DEBUG
  1375. if (!silent) {
  1376. _zend_mm_free_int(heap, heap->reserve ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  1377. }
  1378. #endif
  1379. heap->reserve = NULL;
  1380. }
  1381. #if ZEND_MM_CACHE_STAT
  1382. if (full_shutdown) {
  1383. FILE *f;
  1384. f = fopen("zend_mm.log", "w");
  1385. if (f) {
  1386. int i,j;
  1387. size_t size, true_size, min_size, max_size;
  1388. int hit = 0, miss = 0;
  1389. fprintf(f, "\nidx min_size max_size true_size max_len hits misses\n");
  1390. size = 0;
  1391. while (1) {
  1392. true_size = ZEND_MM_TRUE_SIZE(size);
  1393. if (ZEND_MM_SMALL_SIZE(true_size)) {
  1394. min_size = size;
  1395. i = ZEND_MM_BUCKET_INDEX(true_size);
  1396. size++;
  1397. while (1) {
  1398. true_size = ZEND_MM_TRUE_SIZE(size);
  1399. if (ZEND_MM_SMALL_SIZE(true_size)) {
  1400. j = ZEND_MM_BUCKET_INDEX(true_size);
  1401. if (j > i) {
  1402. max_size = size-1;
  1403. break;
  1404. }
  1405. } else {
  1406. max_size = size-1;
  1407. break;
  1408. }
  1409. size++;
  1410. }
  1411. hit += heap->cache_stat[i].hit;
  1412. miss += heap->cache_stat[i].miss;
  1413. fprintf(f, "%2d %8d %8d %9d %8d %8d %8d\n", i, (int)min_size, (int)max_size, ZEND_MM_TRUE_SIZE(max_size), heap->cache_stat[i].max_count, heap->cache_stat[i].hit, heap->cache_stat[i].miss);
  1414. } else {
  1415. break;
  1416. }
  1417. }
  1418. fprintf(f, " %8d %8d\n", hit, miss);
  1419. fprintf(f, " %8d %8d\n", heap->cache_stat[ZEND_MM_NUM_BUCKETS].hit, heap->cache_stat[ZEND_MM_NUM_BUCKETS].miss);
  1420. fclose(f);
  1421. }
  1422. }
  1423. #endif
  1424. #if ZEND_DEBUG
  1425. if (!silent) {
  1426. zend_mm_check_leaks(heap TSRMLS_CC);
  1427. }
  1428. #endif
  1429. internal = heap->internal;
  1430. storage = heap->storage;
  1431. segment = heap->segments_list;
  1432. while (segment) {
  1433. prev = segment;
  1434. segment = segment->next_segment;
  1435. ZEND_MM_STORAGE_FREE(prev);
  1436. }
  1437. if (full_shutdown) {
  1438. storage->handlers->dtor(storage);
  1439. if (!internal) {
  1440. free(heap);
  1441. }
  1442. } else {
  1443. if (heap->compact_size &&
  1444. heap->real_peak > heap->compact_size) {
  1445. storage->handlers->compact(storage);
  1446. }
  1447. heap->segments_list = NULL;
  1448. zend_mm_init(heap);
  1449. heap->real_size = 0;
  1450. heap->real_peak = 0;
  1451. heap->size = 0;
  1452. heap->peak = 0;
  1453. if (heap->reserve_size) {
  1454. heap->reserve = _zend_mm_alloc_int(heap, heap->reserve_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  1455. }
  1456. heap->overflow = 0;
  1457. }
  1458. }
  1459. static void zend_mm_safe_error(zend_mm_heap *heap,
  1460. const char *format,
  1461. size_t limit,
  1462. #if ZEND_DEBUG
  1463. const char *filename,
  1464. uint lineno,
  1465. #endif
  1466. size_t size)
  1467. {
  1468. if (heap->reserve) {
  1469. _zend_mm_free_int(heap, heap->reserve ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  1470. heap->reserve = NULL;
  1471. }
  1472. if (heap->overflow == 0) {
  1473. char *error_filename;
  1474. uint error_lineno;
  1475. TSRMLS_FETCH();
  1476. if (zend_is_compiling(TSRMLS_C)) {
  1477. error_filename = zend_get_compiled_filename(TSRMLS_C);
  1478. error_lineno = zend_get_compiled_lineno(TSRMLS_C);
  1479. } else if (EG(in_execution)) {
  1480. error_filename = EG(active_op_array)?EG(active_op_array)->filename:NULL;
  1481. error_lineno = EG(opline_ptr)?(*EG(opline_ptr))->lineno:0;
  1482. } else {
  1483. error_filename = NULL;
  1484. error_lineno = 0;
  1485. }
  1486. if (!error_filename) {
  1487. error_filename = "Unknown";
  1488. }
  1489. heap->overflow = 1;
  1490. zend_try {
  1491. zend_error_noreturn(E_ERROR,
  1492. format,
  1493. limit,
  1494. #if ZEND_DEBUG
  1495. filename,
  1496. lineno,
  1497. #endif
  1498. size);
  1499. } zend_catch {
  1500. if (heap->overflow == 2) {
  1501. fprintf(stderr, "\nFatal error: ");
  1502. fprintf(stderr,
  1503. format,
  1504. limit,
  1505. #if ZEND_DEBUG
  1506. filename,
  1507. lineno,
  1508. #endif
  1509. size);
  1510. fprintf(stderr, " in %s on line %d\n", error_filename, error_lineno);
  1511. }
  1512. /* See http://support.microsoft.com/kb/190351 */
  1513. #ifdef PHP_WIN32
  1514. fflush(stderr);
  1515. #endif
  1516. } zend_end_try();
  1517. } else {
  1518. heap->overflow = 2;
  1519. }
  1520. zend_bailout();
  1521. }
  1522. static zend_mm_free_block *zend_mm_search_large_block(zend_mm_heap *heap, size_t true_size)
  1523. {
  1524. zend_mm_free_block *best_fit;
  1525. size_t index = ZEND_MM_LARGE_BUCKET_INDEX(true_size);
  1526. size_t bitmap = heap->large_free_bitmap >> index;
  1527. zend_mm_free_block *p;
  1528. if (bitmap == 0) {
  1529. return NULL;
  1530. }
  1531. if (UNEXPECTED((bitmap & 1) != 0)) {
  1532. /* Search for best "large" free block */
  1533. zend_mm_free_block *rst = NULL;
  1534. size_t m;
  1535. size_t best_size = -1;
  1536. best_fit = NULL;
  1537. p = heap->large_free_buckets[index];
  1538. for (m = true_size << (ZEND_MM_NUM_BUCKETS - index); ; m <<= 1) {
  1539. if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) {
  1540. return p->next_free_block;
  1541. } else if (ZEND_MM_FREE_BLOCK_SIZE(p) >= true_size &&
  1542. ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) {
  1543. best_size = ZEND_MM_FREE_BLOCK_SIZE(p);
  1544. best_fit = p;
  1545. }
  1546. if ((m & (ZEND_MM_LONG_CONST(1) << (ZEND_MM_NUM_BUCKETS-1))) == 0) {
  1547. if (p->child[1]) {
  1548. rst = p->child[1];
  1549. }
  1550. if (p->child[0]) {
  1551. p = p->child[0];
  1552. } else {
  1553. break;
  1554. }
  1555. } else if (p->child[1]) {
  1556. p = p->child[1];
  1557. } else {
  1558. break;
  1559. }
  1560. }
  1561. for (p = rst; p; p = p->child[p->child[0] != NULL]) {
  1562. if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) {
  1563. return p->next_free_block;
  1564. } else if (ZEND_MM_FREE_BLOCK_SIZE(p) > true_size &&
  1565. ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) {
  1566. best_size = ZEND_MM_FREE_BLOCK_SIZE(p);
  1567. best_fit = p;
  1568. }
  1569. }
  1570. if (best_fit) {
  1571. return best_fit->next_free_block;
  1572. }
  1573. bitmap = bitmap >> 1;
  1574. if (!bitmap) {
  1575. return NULL;
  1576. }
  1577. index++;
  1578. }
  1579. /* Search for smallest "large" free block */
  1580. best_fit = p = heap->large_free_buckets[index + zend_mm_low_bit(bitmap)];
  1581. while ((p = p->child[p->child[0] != NULL])) {
  1582. if (ZEND_MM_FREE_BLOCK_SIZE(p) < ZEND_MM_FREE_BLOCK_SIZE(best_fit)) {
  1583. best_fit = p;
  1584. }
  1585. }
  1586. return best_fit->next_free_block;
  1587. }
  1588. static void *_zend_mm_alloc_int(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1589. {
  1590. zend_mm_free_block *best_fit;
  1591. size_t true_size = ZEND_MM_TRUE_SIZE(size);
  1592. size_t block_size;
  1593. size_t remaining_size;
  1594. size_t segment_size;
  1595. zend_mm_segment *segment;
  1596. int keep_rest = 0;
  1597. if (EXPECTED(ZEND_MM_SMALL_SIZE(true_size))) {
  1598. size_t index = ZEND_MM_BUCKET_INDEX(true_size);
  1599. size_t bitmap;
  1600. if (UNEXPECTED(true_size < size)) {
  1601. goto out_of_memory;
  1602. }
  1603. #if ZEND_MM_CACHE
  1604. if (EXPECTED(heap->cache[index] != NULL)) {
  1605. /* Get block from cache */
  1606. #if ZEND_MM_CACHE_STAT
  1607. heap->cache_stat[index].count--;
  1608. heap->cache_stat[index].hit++;
  1609. #endif
  1610. best_fit = heap->cache[index];
  1611. heap->cache[index] = best_fit->prev_free_block;
  1612. heap->cached -= true_size;
  1613. ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_CACHED);
  1614. ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 0);
  1615. return ZEND_MM_DATA_OF(best_fit);
  1616. }
  1617. #if ZEND_MM_CACHE_STAT
  1618. heap->cache_stat[index].miss++;
  1619. #endif
  1620. #endif
  1621. bitmap = heap->free_bitmap >> index;
  1622. if (bitmap) {
  1623. /* Found some "small" free block that can be used */
  1624. index += zend_mm_low_bit(bitmap);
  1625. best_fit = heap->free_buckets[index*2];
  1626. #if ZEND_MM_CACHE_STAT
  1627. heap->cache_stat[ZEND_MM_NUM_BUCKETS].hit++;
  1628. #endif
  1629. goto zend_mm_finished_searching_for_block;
  1630. }
  1631. }
  1632. #if ZEND_MM_CACHE_STAT
  1633. heap->cache_stat[ZEND_MM_NUM_BUCKETS].miss++;
  1634. #endif
  1635. best_fit = zend_mm_search_large_block(heap, true_size);
  1636. if (!best_fit && heap->real_size >= heap->limit - heap->block_size) {
  1637. zend_mm_free_block *p = heap->rest_buckets[0];
  1638. size_t best_size = -1;
  1639. while (p != ZEND_MM_REST_BUCKET(heap)) {
  1640. if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) {
  1641. best_fit = p;
  1642. goto zend_mm_finished_searching_for_block;
  1643. } else if (ZEND_MM_FREE_BLOCK_SIZE(p) > true_size &&
  1644. ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) {
  1645. best_size = ZEND_MM_FREE_BLOCK_SIZE(p);
  1646. best_fit = p;
  1647. }
  1648. p = p->prev_free_block;
  1649. }
  1650. }
  1651. if (!best_fit) {
  1652. if (true_size > heap->block_size - (ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE)) {
  1653. /* Make sure we add a memory block which is big enough,
  1654. segment must have header "size" and trailer "guard" block */
  1655. segment_size = true_size + ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE;
  1656. segment_size = (segment_size + (heap->block_size-1)) & ~(heap->block_size-1);
  1657. keep_rest = 1;
  1658. } else {
  1659. segment_size = heap->block_size;
  1660. }
  1661. HANDLE_BLOCK_INTERRUPTIONS();
  1662. if (segment_size < true_size ||
  1663. heap->real_size + segment_size > heap->limit) {
  1664. /* Memory limit overflow */
  1665. #if ZEND_MM_CACHE
  1666. zend_mm_free_cache(heap);
  1667. #endif
  1668. HANDLE_UNBLOCK_INTERRUPTIONS();
  1669. #if ZEND_DEBUG
  1670. zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1671. #else
  1672. zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %lu bytes)", heap->limit, size);
  1673. #endif
  1674. }
  1675. segment = (zend_mm_segment *) ZEND_MM_STORAGE_ALLOC(segment_size);
  1676. if (!segment) {
  1677. /* Storage manager cannot allocate memory */
  1678. #if ZEND_MM_CACHE
  1679. zend_mm_free_cache(heap);
  1680. #endif
  1681. HANDLE_UNBLOCK_INTERRUPTIONS();
  1682. out_of_memory:
  1683. #if ZEND_DEBUG
  1684. zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  1685. #else
  1686. zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %lu bytes)", heap->real_size, size);
  1687. #endif
  1688. return NULL;
  1689. }
  1690. heap->real_size += segment_size;
  1691. if (heap->real_size > heap->real_peak) {
  1692. heap->real_peak = heap->real_size;
  1693. }
  1694. segment->size = segment_size;
  1695. segment->next_segment = heap->segments_list;
  1696. heap->segments_list = segment;
  1697. best_fit = (zend_mm_free_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1698. ZEND_MM_MARK_FIRST_BLOCK(best_fit);
  1699. block_size = segment_size - ZEND_MM_ALIGNED_SEGMENT_SIZE - ZEND_MM_ALIGNED_HEADER_SIZE;
  1700. ZEND_MM_LAST_BLOCK(ZEND_MM_BLOCK_AT(best_fit, block_size));
  1701. } else {
  1702. zend_mm_finished_searching_for_block:
  1703. /* remove from free list */
  1704. HANDLE_BLOCK_INTERRUPTIONS();
  1705. ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_FREED);
  1706. ZEND_MM_CHECK_COOKIE(best_fit);
  1707. ZEND_MM_CHECK_BLOCK_LINKAGE(best_fit);
  1708. zend_mm_remove_from_free_list(heap, best_fit);
  1709. block_size = ZEND_MM_FREE_BLOCK_SIZE(best_fit);
  1710. }
  1711. remaining_size = block_size - true_size;
  1712. if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) {
  1713. true_size = block_size;
  1714. ZEND_MM_BLOCK(best_fit, ZEND_MM_USED_BLOCK, true_size);
  1715. } else {
  1716. zend_mm_free_block *new_free_block;
  1717. /* prepare new free block */
  1718. ZEND_MM_BLOCK(best_fit, ZEND_MM_USED_BLOCK, true_size);
  1719. new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(best_fit, true_size);
  1720. ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size);
  1721. /* add the new free block to the free list */
  1722. if (EXPECTED(!keep_rest)) {
  1723. zend_mm_add_to_free_list(heap, new_free_block);
  1724. } else {
  1725. zend_mm_add_to_rest_list(heap, new_free_block);
  1726. }
  1727. }
  1728. ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 1);
  1729. heap->size += true_size;
  1730. if (heap->peak < heap->size) {
  1731. heap->peak = heap->size;
  1732. }
  1733. HANDLE_UNBLOCK_INTERRUPTIONS();
  1734. return ZEND_MM_DATA_OF(best_fit);
  1735. }
  1736. static void _zend_mm_free_int(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1737. {
  1738. zend_mm_block *mm_block;
  1739. zend_mm_block *next_block;
  1740. size_t size;
  1741. if (!ZEND_MM_VALID_PTR(p)) {
  1742. return;
  1743. }
  1744. mm_block = ZEND_MM_HEADER_OF(p);
  1745. size = ZEND_MM_BLOCK_SIZE(mm_block);
  1746. ZEND_MM_CHECK_PROTECTION(mm_block);
  1747. #if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION
  1748. memset(ZEND_MM_DATA_OF(mm_block), 0x5a, mm_block->debug.size);
  1749. #endif
  1750. #if ZEND_MM_CACHE
  1751. if (EXPECTED(ZEND_MM_SMALL_SIZE(size)) && EXPECTED(heap->cached < ZEND_MM_CACHE_SIZE)) {
  1752. size_t index = ZEND_MM_BUCKET_INDEX(size);
  1753. zend_mm_free_block **cache = &heap->cache[index];
  1754. ((zend_mm_free_block*)mm_block)->prev_free_block = *cache;
  1755. *cache = (zend_mm_free_block*)mm_block;
  1756. heap->cached += size;
  1757. ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_CACHED);
  1758. #if ZEND_MM_CACHE_STAT
  1759. if (++heap->cache_stat[index].count > heap->cache_stat[index].max_count) {
  1760. heap->cache_stat[index].max_count = heap->cache_stat[index].count;
  1761. }
  1762. #endif
  1763. return;
  1764. }
  1765. #endif
  1766. HANDLE_BLOCK_INTERRUPTIONS();
  1767. heap->size -= size;
  1768. next_block = ZEND_MM_BLOCK_AT(mm_block, size);
  1769. if (ZEND_MM_IS_FREE_BLOCK(next_block)) {
  1770. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block);
  1771. size += ZEND_MM_FREE_BLOCK_SIZE(next_block);
  1772. }
  1773. if (ZEND_MM_PREV_BLOCK_IS_FREE(mm_block)) {
  1774. mm_block = ZEND_MM_PREV_BLOCK(mm_block);
  1775. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) mm_block);
  1776. size += ZEND_MM_FREE_BLOCK_SIZE(mm_block);
  1777. }
  1778. if (ZEND_MM_IS_FIRST_BLOCK(mm_block) &&
  1779. ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(mm_block, size))) {
  1780. zend_mm_del_segment(heap, (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE));
  1781. } else {
  1782. ZEND_MM_BLOCK(mm_block, ZEND_MM_FREE_BLOCK, size);
  1783. zend_mm_add_to_free_list(heap, (zend_mm_free_block *) mm_block);
  1784. }
  1785. HANDLE_UNBLOCK_INTERRUPTIONS();
  1786. }
  1787. static void *_zend_mm_realloc_int(zend_mm_heap *heap, void *p, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1788. {
  1789. zend_mm_block *mm_block = ZEND_MM_HEADER_OF(p);
  1790. zend_mm_block *next_block;
  1791. size_t true_size;
  1792. size_t orig_size;
  1793. void *ptr;
  1794. if (UNEXPECTED(!p) || !ZEND_MM_VALID_PTR(p)) {
  1795. return _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1796. }
  1797. mm_block = ZEND_MM_HEADER_OF(p);
  1798. true_size = ZEND_MM_TRUE_SIZE(size);
  1799. orig_size = ZEND_MM_BLOCK_SIZE(mm_block);
  1800. ZEND_MM_CHECK_PROTECTION(mm_block);
  1801. if (UNEXPECTED(true_size < size)) {
  1802. goto out_of_memory;
  1803. }
  1804. if (true_size <= orig_size) {
  1805. size_t remaining_size = orig_size - true_size;
  1806. if (remaining_size >= ZEND_MM_ALIGNED_MIN_HEADER_SIZE) {
  1807. zend_mm_free_block *new_free_block;
  1808. HANDLE_BLOCK_INTERRUPTIONS();
  1809. next_block = ZEND_MM_BLOCK_AT(mm_block, orig_size);
  1810. if (ZEND_MM_IS_FREE_BLOCK(next_block)) {
  1811. remaining_size += ZEND_MM_FREE_BLOCK_SIZE(next_block);
  1812. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block);
  1813. }
  1814. /* prepare new free block */
  1815. ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size);
  1816. new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size);
  1817. ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size);
  1818. /* add the new free block to the free list */
  1819. zend_mm_add_to_free_list(heap, new_free_block);
  1820. heap->size += (true_size - orig_size);
  1821. HANDLE_UNBLOCK_INTERRUPTIONS();
  1822. }
  1823. ZEND_MM_SET_DEBUG_INFO(mm_block, size, 0, 0);
  1824. return p;
  1825. }
  1826. #if ZEND_MM_CACHE
  1827. if (ZEND_MM_SMALL_SIZE(true_size)) {
  1828. size_t index = ZEND_MM_BUCKET_INDEX(true_size);
  1829. if (heap->cache[index] != NULL) {
  1830. zend_mm_free_block *best_fit;
  1831. zend_mm_free_block **cache;
  1832. #if ZEND_MM_CACHE_STAT
  1833. heap->cache_stat[index].count--;
  1834. heap->cache_stat[index].hit++;
  1835. #endif
  1836. best_fit = heap->cache[index];
  1837. heap->cache[index] = best_fit->prev_free_block;
  1838. ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_CACHED);
  1839. ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 0);
  1840. ptr = ZEND_MM_DATA_OF(best_fit);
  1841. #if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION
  1842. memcpy(ptr, p, mm_block->debug.size);
  1843. #else
  1844. memcpy(ptr, p, orig_size - ZEND_MM_ALIGNED_HEADER_SIZE);
  1845. #endif
  1846. heap->cached -= true_size - orig_size;
  1847. index = ZEND_MM_BUCKET_INDEX(orig_size);
  1848. cache = &heap->cache[index];
  1849. ((zend_mm_free_block*)mm_block)->prev_free_block = *cache;
  1850. *cache = (zend_mm_free_block*)mm_block;
  1851. ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_CACHED);
  1852. #if ZEND_MM_CACHE_STAT
  1853. if (++heap->cache_stat[index].count > heap->cache_stat[index].max_count) {
  1854. heap->cache_stat[index].max_count = heap->cache_stat[index].count;
  1855. }
  1856. #endif
  1857. return ptr;
  1858. }
  1859. }
  1860. #endif
  1861. next_block = ZEND_MM_BLOCK_AT(mm_block, orig_size);
  1862. if (ZEND_MM_IS_FREE_BLOCK(next_block)) {
  1863. ZEND_MM_CHECK_COOKIE(next_block);
  1864. ZEND_MM_CHECK_BLOCK_LINKAGE(next_block);
  1865. if (orig_size + ZEND_MM_FREE_BLOCK_SIZE(next_block) >= true_size) {
  1866. size_t block_size = orig_size + ZEND_MM_FREE_BLOCK_SIZE(next_block);
  1867. size_t remaining_size = block_size - true_size;
  1868. HANDLE_BLOCK_INTERRUPTIONS();
  1869. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block);
  1870. if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) {
  1871. true_size = block_size;
  1872. ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size);
  1873. } else {
  1874. zend_mm_free_block *new_free_block;
  1875. /* prepare new free block */
  1876. ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size);
  1877. new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size);
  1878. ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size);
  1879. /* add the new free block to the free list */
  1880. if (ZEND_MM_IS_FIRST_BLOCK(mm_block) &&
  1881. ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(new_free_block, remaining_size))) {
  1882. zend_mm_add_to_rest_list(heap, new_free_block);
  1883. } else {
  1884. zend_mm_add_to_free_list(heap, new_free_block);
  1885. }
  1886. }
  1887. ZEND_MM_SET_DEBUG_INFO(mm_block, size, 0, 0);
  1888. heap->size = heap->size + true_size - orig_size;
  1889. if (heap->peak < heap->size) {
  1890. heap->peak = heap->size;
  1891. }
  1892. HANDLE_UNBLOCK_INTERRUPTIONS();
  1893. return p;
  1894. } else if (ZEND_MM_IS_FIRST_BLOCK(mm_block) &&
  1895. ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(next_block, ZEND_MM_FREE_BLOCK_SIZE(next_block)))) {
  1896. HANDLE_BLOCK_INTERRUPTIONS();
  1897. zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block);
  1898. goto realloc_segment;
  1899. }
  1900. } else if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && ZEND_MM_IS_GUARD_BLOCK(next_block)) {
  1901. zend_mm_segment *segment;
  1902. zend_mm_segment *segment_copy;
  1903. size_t segment_size;
  1904. size_t block_size;
  1905. size_t remaining_size;
  1906. HANDLE_BLOCK_INTERRUPTIONS();
  1907. realloc_segment:
  1908. /* segment size, size of block and size of guard block */
  1909. if (true_size > heap->block_size - (ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE)) {
  1910. segment_size = true_size+ZEND_MM_ALIGNED_SEGMENT_SIZE+ZEND_MM_ALIGNED_HEADER_SIZE;
  1911. segment_size = (segment_size + (heap->block_size-1)) & ~(heap->block_size-1);
  1912. } else {
  1913. segment_size = heap->block_size;
  1914. }
  1915. segment_copy = (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1916. if (segment_size < true_size ||
  1917. heap->real_size + segment_size - segment_copy->size > heap->limit) {
  1918. if (ZEND_MM_IS_FREE_BLOCK(next_block)) {
  1919. zend_mm_add_to_free_list(heap, (zend_mm_free_block *) next_block);
  1920. }
  1921. #if ZEND_MM_CACHE
  1922. zend_mm_free_cache(heap);
  1923. #endif
  1924. HANDLE_UNBLOCK_INTERRUPTIONS();
  1925. #if ZEND_DEBUG
  1926. zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %ld bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1927. #else
  1928. zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %ld bytes)", heap->limit, size);
  1929. #endif
  1930. return NULL;
  1931. }
  1932. segment = ZEND_MM_STORAGE_REALLOC(segment_copy, segment_size);
  1933. if (!segment) {
  1934. #if ZEND_MM_CACHE
  1935. zend_mm_free_cache(heap);
  1936. #endif
  1937. HANDLE_UNBLOCK_INTERRUPTIONS();
  1938. out_of_memory:
  1939. #if ZEND_DEBUG
  1940. zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %ld bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  1941. #else
  1942. zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %ld bytes)", heap->real_size, size);
  1943. #endif
  1944. return NULL;
  1945. }
  1946. heap->real_size += segment_size - segment->size;
  1947. if (heap->real_size > heap->real_peak) {
  1948. heap->real_peak = heap->real_size;
  1949. }
  1950. segment->size = segment_size;
  1951. if (segment != segment_copy) {
  1952. zend_mm_segment **seg = &heap->segments_list;
  1953. while (*seg != segment_copy) {
  1954. seg = &(*seg)->next_segment;
  1955. }
  1956. *seg = segment;
  1957. mm_block = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE);
  1958. ZEND_MM_MARK_FIRST_BLOCK(mm_block);
  1959. }
  1960. block_size = segment_size - ZEND_MM_ALIGNED_SEGMENT_SIZE - ZEND_MM_ALIGNED_HEADER_SIZE;
  1961. remaining_size = block_size - true_size;
  1962. /* setup guard block */
  1963. ZEND_MM_LAST_BLOCK(ZEND_MM_BLOCK_AT(mm_block, block_size));
  1964. if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) {
  1965. true_size = block_size;
  1966. ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size);
  1967. } else {
  1968. zend_mm_free_block *new_free_block;
  1969. /* prepare new free block */
  1970. ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size);
  1971. new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size);
  1972. ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size);
  1973. /* add the new free block to the free list */
  1974. zend_mm_add_to_rest_list(heap, new_free_block);
  1975. }
  1976. ZEND_MM_SET_DEBUG_INFO(mm_block, size, 1, 1);
  1977. heap->size = heap->size + true_size - orig_size;
  1978. if (heap->peak < heap->size) {
  1979. heap->peak = heap->size;
  1980. }
  1981. HANDLE_UNBLOCK_INTERRUPTIONS();
  1982. return ZEND_MM_DATA_OF(mm_block);
  1983. }
  1984. ptr = _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1985. #if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION
  1986. memcpy(ptr, p, mm_block->debug.size);
  1987. #else
  1988. memcpy(ptr, p, orig_size - ZEND_MM_ALIGNED_HEADER_SIZE);
  1989. #endif
  1990. _zend_mm_free_int(heap, p ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1991. return ptr;
  1992. }
  1993. ZEND_API void *_zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1994. {
  1995. return _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1996. }
  1997. ZEND_API void _zend_mm_free(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1998. {
  1999. _zend_mm_free_int(heap, p ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2000. }
  2001. ZEND_API void *_zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2002. {
  2003. return _zend_mm_realloc_int(heap, ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2004. }
  2005. ZEND_API size_t _zend_mm_block_size(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2006. {
  2007. zend_mm_block *mm_block;
  2008. if (!ZEND_MM_VALID_PTR(p)) {
  2009. return 0;
  2010. }
  2011. mm_block = ZEND_MM_HEADER_OF(p);
  2012. ZEND_MM_CHECK_PROTECTION(mm_block);
  2013. #if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION
  2014. return mm_block->debug.size;
  2015. #else
  2016. return ZEND_MM_BLOCK_SIZE(mm_block);
  2017. #endif
  2018. }
  2019. /**********************/
  2020. /* Allocation Manager */
  2021. /**********************/
  2022. typedef struct _zend_alloc_globals {
  2023. zend_mm_heap *mm_heap;
  2024. } zend_alloc_globals;
  2025. #ifdef ZTS
  2026. static int alloc_globals_id;
  2027. # define AG(v) TSRMG(alloc_globals_id, zend_alloc_globals *, v)
  2028. #else
  2029. # define AG(v) (alloc_globals.v)
  2030. static zend_alloc_globals alloc_globals;
  2031. #endif
  2032. ZEND_API int is_zend_mm(TSRMLS_D)
  2033. {
  2034. return AG(mm_heap)->use_zend_alloc;
  2035. }
  2036. ZEND_API void *_emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2037. {
  2038. TSRMLS_FETCH();
  2039. if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) {
  2040. return AG(mm_heap)->_malloc(size);
  2041. }
  2042. return _zend_mm_alloc_int(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2043. }
  2044. ZEND_API void _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2045. {
  2046. TSRMLS_FETCH();
  2047. if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) {
  2048. AG(mm_heap)->_free(ptr);
  2049. return;
  2050. }
  2051. _zend_mm_free_int(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2052. }
  2053. ZEND_API void *_erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2054. {
  2055. TSRMLS_FETCH();
  2056. if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) {
  2057. return AG(mm_heap)->_realloc(ptr, size);
  2058. }
  2059. return _zend_mm_realloc_int(AG(mm_heap), ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2060. }
  2061. ZEND_API size_t _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2062. {
  2063. if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) {
  2064. return 0;
  2065. }
  2066. return _zend_mm_block_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2067. }
  2068. #if defined(__GNUC__) && defined(i386)
  2069. static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
  2070. {
  2071. size_t res = nmemb;
  2072. unsigned long overflow = 0;
  2073. __asm__ ("mull %3\n\taddl %4,%0\n\tadcl %1,%1"
  2074. : "=&a"(res), "=&d" (overflow)
  2075. : "%0"(res),
  2076. "rm"(size),
  2077. "rm"(offset));
  2078. if (UNEXPECTED(overflow)) {
  2079. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
  2080. return 0;
  2081. }
  2082. return res;
  2083. }
  2084. #elif defined(__GNUC__) && defined(__x86_64__)
  2085. static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
  2086. {
  2087. size_t res = nmemb;
  2088. unsigned long overflow = 0;
  2089. __asm__ ("mulq %3\n\taddq %4,%0\n\tadcq %1,%1"
  2090. : "=&a"(res), "=&d" (overflow)
  2091. : "%0"(res),
  2092. "rm"(size),
  2093. "rm"(offset));
  2094. if (UNEXPECTED(overflow)) {
  2095. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
  2096. return 0;
  2097. }
  2098. return res;
  2099. }
  2100. #elif SIZEOF_SIZE_T == 4 && defined(HAVE_ZEND_LONG64)
  2101. static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
  2102. {
  2103. zend_ulong64 res = (zend_ulong64)nmemb * (zend_ulong64)size + (zend_ulong64)offset;
  2104. if (UNEXPECTED(res > (zend_ulong64)0xFFFFFFFFL)) {
  2105. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
  2106. return 0;
  2107. }
  2108. return (size_t) res;
  2109. }
  2110. #else
  2111. static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
  2112. {
  2113. size_t res = nmemb * size + offset;
  2114. double _d = (double)nmemb * (double)size + (double)offset;
  2115. double _delta = (double)res - _d;
  2116. if (UNEXPECTED((_d + _delta ) != _d)) {
  2117. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
  2118. return 0;
  2119. }
  2120. return res;
  2121. }
  2122. #endif
  2123. ZEND_API void *_safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2124. {
  2125. return emalloc_rel(safe_address(nmemb, size, offset));
  2126. }
  2127. ZEND_API void *_safe_malloc(size_t nmemb, size_t size, size_t offset)
  2128. {
  2129. return pemalloc(safe_address(nmemb, size, offset), 1);
  2130. }
  2131. ZEND_API void *_safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2132. {
  2133. return erealloc_rel(ptr, safe_address(nmemb, size, offset));
  2134. }
  2135. ZEND_API void *_safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
  2136. {
  2137. return perealloc(ptr, safe_address(nmemb, size, offset), 1);
  2138. }
  2139. ZEND_API void *_ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2140. {
  2141. void *p;
  2142. p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2143. if (UNEXPECTED(p == NULL)) {
  2144. return p;
  2145. }
  2146. memset(p, 0, size * nmemb);
  2147. return p;
  2148. }
  2149. ZEND_API char *_estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2150. {
  2151. int length;
  2152. char *p;
  2153. length = strlen(s)+1;
  2154. p = (char *) _emalloc(length ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2155. if (UNEXPECTED(p == NULL)) {
  2156. return p;
  2157. }
  2158. memcpy(p, s, length);
  2159. return p;
  2160. }
  2161. ZEND_API char *_estrndup(const char *s, uint length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2162. {
  2163. char *p;
  2164. p = (char *) _emalloc(length+1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2165. if (UNEXPECTED(p == NULL)) {
  2166. return p;
  2167. }
  2168. memcpy(p, s, length);
  2169. p[length] = 0;
  2170. return p;
  2171. }
  2172. ZEND_API char *zend_strndup(const char *s, uint length)
  2173. {
  2174. char *p;
  2175. p = (char *) malloc(length+1);
  2176. if (UNEXPECTED(p == NULL)) {
  2177. return p;
  2178. }
  2179. if (length) {
  2180. memcpy(p, s, length);
  2181. }
  2182. p[length] = 0;
  2183. return p;
  2184. }
  2185. ZEND_API int zend_set_memory_limit(size_t memory_limit)
  2186. {
  2187. TSRMLS_FETCH();
  2188. AG(mm_heap)->limit = (memory_limit >= AG(mm_heap)->block_size) ? memory_limit : AG(mm_heap)->block_size;
  2189. return SUCCESS;
  2190. }
  2191. ZEND_API size_t zend_memory_usage(int real_usage TSRMLS_DC)
  2192. {
  2193. if (real_usage) {
  2194. return AG(mm_heap)->real_size;
  2195. } else {
  2196. size_t usage = AG(mm_heap)->size;
  2197. #if ZEND_MM_CACHE
  2198. usage -= AG(mm_heap)->cached;
  2199. #endif
  2200. return usage;
  2201. }
  2202. }
  2203. ZEND_API size_t zend_memory_peak_usage(int real_usage TSRMLS_DC)
  2204. {
  2205. if (real_usage) {
  2206. return AG(mm_heap)->real_peak;
  2207. } else {
  2208. return AG(mm_heap)->peak;
  2209. }
  2210. }
  2211. ZEND_API void shutdown_memory_manager(int silent, int full_shutdown TSRMLS_DC)
  2212. {
  2213. zend_mm_shutdown(AG(mm_heap), full_shutdown, silent TSRMLS_CC);
  2214. }
  2215. static void alloc_globals_ctor(zend_alloc_globals *alloc_globals TSRMLS_DC)
  2216. {
  2217. char *tmp;
  2218. alloc_globals->mm_heap = zend_mm_startup();
  2219. tmp = getenv("USE_ZEND_ALLOC");
  2220. if (tmp) {
  2221. alloc_globals->mm_heap->use_zend_alloc = zend_atoi(tmp, 0);
  2222. if (!alloc_globals->mm_heap->use_zend_alloc) {
  2223. alloc_globals->mm_heap->_malloc = malloc;
  2224. alloc_globals->mm_heap->_free = free;
  2225. alloc_globals->mm_heap->_realloc = realloc;
  2226. }
  2227. }
  2228. }
  2229. #ifdef ZTS
  2230. static void alloc_globals_dtor(zend_alloc_globals *alloc_globals TSRMLS_DC)
  2231. {
  2232. shutdown_memory_manager(1, 1 TSRMLS_CC);
  2233. }
  2234. #endif
  2235. ZEND_API void start_memory_manager(TSRMLS_D)
  2236. {
  2237. #ifdef ZTS
  2238. ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
  2239. #else
  2240. alloc_globals_ctor(&alloc_globals);
  2241. #endif
  2242. }
  2243. ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap TSRMLS_DC)
  2244. {
  2245. zend_mm_heap *old_heap;
  2246. old_heap = AG(mm_heap);
  2247. AG(mm_heap) = new_heap;
  2248. return old_heap;
  2249. }
  2250. ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
  2251. {
  2252. return heap->storage;
  2253. }
  2254. ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
  2255. void* (*_malloc)(size_t),
  2256. void (*_free)(void*),
  2257. void* (*_realloc)(void*, size_t))
  2258. {
  2259. heap->use_zend_alloc = 0;
  2260. heap->_malloc = _malloc;
  2261. heap->_free = _free;
  2262. heap->_realloc = _realloc;
  2263. }
  2264. #if ZEND_DEBUG
  2265. ZEND_API int _mem_block_check(void *ptr, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2266. {
  2267. TSRMLS_FETCH();
  2268. if (!AG(mm_heap)->use_zend_alloc) {
  2269. return 1;
  2270. }
  2271. return zend_mm_check_ptr(AG(mm_heap), ptr, silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2272. }
  2273. ZEND_API void _full_mem_check(int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2274. {
  2275. int errors;
  2276. TSRMLS_FETCH();
  2277. if (!AG(mm_heap)->use_zend_alloc) {
  2278. return;
  2279. }
  2280. zend_debug_alloc_output("------------------------------------------------\n");
  2281. zend_debug_alloc_output("Full Memory Check at %s:%d\n" ZEND_FILE_LINE_RELAY_CC);
  2282. errors = zend_mm_check_heap(AG(mm_heap), silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2283. zend_debug_alloc_output("End of full memory check %s:%d (%d errors)\n" ZEND_FILE_LINE_RELAY_CC, errors);
  2284. zend_debug_alloc_output("------------------------------------------------\n");
  2285. }
  2286. #endif
  2287. /*
  2288. * Local variables:
  2289. * tab-width: 4
  2290. * c-basic-offset: 4
  2291. * indent-tabs-mode: t
  2292. * End:
  2293. */