PageRenderTime 61ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/Zend/zend_alloc.c

http://github.com/php/php-src
C | 3023 lines | 2512 code | 293 blank | 218 comment | 492 complexity | 1793377e467f135e1b541e26914c0eef MD5 | raw file
Possible License(s): BSD-2-Clause, BSD-3-Clause, MPL-2.0-no-copyleft-exception, LGPL-2.1
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. /*
  21. * zend_alloc is designed to be a modern CPU cache friendly memory manager
  22. * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
  23. *
  24. * All allocations are split into 3 categories:
  25. *
  26. * Huge - the size is greater than CHUNK size (~2M by default), allocation is
  27. * performed using mmap(). The result is aligned on 2M boundary.
  28. *
  29. * Large - a number of 4096K pages inside a CHUNK. Large blocks
  30. * are always aligned on page boundary.
  31. *
  32. * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
  33. * greater predefined small size (there are 30 predefined sizes:
  34. * 8, 16, 24, 32, ... 3072). Small blocks are allocated from
  35. * RUNs. Each RUN is allocated as a single or few following pages.
  36. * Allocation inside RUNs implemented using linked list of free
  37. * elements. The result is aligned to 8 bytes.
  38. *
  39. * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
  40. * blocks are always aligned to CHUNK boundary. So it's very easy to determine
  41. * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
  42. * page at start for special purpose. It contains bitset of free pages,
  43. * few bitset for available runs of predefined small sizes, map of pages that
  44. * keeps information about usage of each page in this CHUNK, etc.
  45. *
  46. * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
  47. * provides specialized and optimized routines to allocate blocks of predefined
  48. * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
  49. * The library uses C preprocessor tricks that substitute calls to emalloc()
  50. * with more specialized routines when the requested size is known.
  51. */
  52. #include "zend.h"
  53. #include "zend_alloc.h"
  54. #include "zend_globals.h"
  55. #include "zend_operators.h"
  56. #include "zend_multiply.h"
  57. #include "zend_bitset.h"
  58. #include <signal.h>
  59. #ifdef HAVE_UNISTD_H
  60. # include <unistd.h>
  61. #endif
  62. #ifdef ZEND_WIN32
  63. # include <wincrypt.h>
  64. # include <process.h>
  65. # include "win32/winutil.h"
  66. #endif
  67. #include <stdio.h>
  68. #include <stdlib.h>
  69. #include <string.h>
  70. #include <sys/types.h>
  71. #include <sys/stat.h>
  72. #include <limits.h>
  73. #include <fcntl.h>
  74. #include <errno.h>
  75. #ifndef _WIN32
  76. # include <sys/mman.h>
  77. # ifndef MAP_ANON
  78. # ifdef MAP_ANONYMOUS
  79. # define MAP_ANON MAP_ANONYMOUS
  80. # endif
  81. # endif
  82. # ifndef MAP_FAILED
  83. # define MAP_FAILED ((void*)-1)
  84. # endif
  85. # ifndef MAP_POPULATE
  86. # define MAP_POPULATE 0
  87. # endif
  88. # if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
  89. # define REAL_PAGE_SIZE _real_page_size
  90. static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
  91. # endif
  92. # ifdef MAP_ALIGNED_SUPER
  93. # define MAP_HUGETLB MAP_ALIGNED_SUPER
  94. # endif
  95. #endif
  96. #ifndef REAL_PAGE_SIZE
  97. # define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
  98. #endif
  99. /* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
  100. * so pretend it doesn't exist. */
  101. #ifndef __linux__
  102. # undef HAVE_MREMAP
  103. #endif
  104. #ifndef __APPLE__
  105. # define ZEND_MM_FD -1
  106. #else
  107. /* Mac allows to track anonymous page via vmmap per TAG id.
  108. * user land applications are allowed to take from 240 to 255.
  109. */
  110. # define ZEND_MM_FD (250<<24)
  111. #endif
  112. #ifndef ZEND_MM_STAT
  113. # define ZEND_MM_STAT 1 /* track current and peak memory usage */
  114. #endif
  115. #ifndef ZEND_MM_LIMIT
  116. # define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
  117. #endif
  118. #ifndef ZEND_MM_CUSTOM
  119. # define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
  120. /* USE_ZEND_ALLOC=0 may switch to system malloc() */
  121. #endif
  122. #ifndef ZEND_MM_STORAGE
  123. # define ZEND_MM_STORAGE 1 /* support for custom memory storage */
  124. #endif
  125. #ifndef ZEND_MM_ERROR
  126. # define ZEND_MM_ERROR 1 /* report system errors */
  127. #endif
  128. #ifndef ZEND_MM_CHECK
  129. # define ZEND_MM_CHECK(condition, message) do { \
  130. if (UNEXPECTED(!(condition))) { \
  131. zend_mm_panic(message); \
  132. } \
  133. } while (0)
  134. #endif
  135. typedef uint32_t zend_mm_page_info; /* 4-byte integer */
  136. typedef zend_ulong zend_mm_bitset; /* 4-byte or 8-byte integer */
  137. #define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
  138. (((size_t)(size)) & ((alignment) - 1))
  139. #define ZEND_MM_ALIGNED_BASE(size, alignment) \
  140. (((size_t)(size)) & ~((alignment) - 1))
  141. #define ZEND_MM_SIZE_TO_NUM(size, alignment) \
  142. (((size_t)(size) + ((alignment) - 1)) / (alignment))
  143. #define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
  144. #define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
  145. typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
  146. #define ZEND_MM_IS_FRUN 0x00000000
  147. #define ZEND_MM_IS_LRUN 0x40000000
  148. #define ZEND_MM_IS_SRUN 0x80000000
  149. #define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
  150. #define ZEND_MM_LRUN_PAGES_OFFSET 0
  151. #define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
  152. #define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
  153. #define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
  154. #define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
  155. #define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
  156. #define ZEND_MM_NRUN_OFFSET_OFFSET 16
  157. #define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
  158. #define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
  159. #define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
  160. #define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
  161. #define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
  162. #define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
  163. #define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
  164. #define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
  165. #define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
  166. #define ZEND_MM_BINS 30
  167. typedef struct _zend_mm_page zend_mm_page;
  168. typedef struct _zend_mm_bin zend_mm_bin;
  169. typedef struct _zend_mm_free_slot zend_mm_free_slot;
  170. typedef struct _zend_mm_chunk zend_mm_chunk;
  171. typedef struct _zend_mm_huge_list zend_mm_huge_list;
  172. int zend_mm_use_huge_pages = 0;
  173. /*
  174. * Memory is retrieved from OS by chunks of fixed size 2MB.
  175. * Inside chunk it's managed by pages of fixed size 4096B.
  176. * So each chunk consists from 512 pages.
  177. * The first page of each chunk is reserved for chunk header.
  178. * It contains service information about all pages.
  179. *
  180. * free_pages - current number of free pages in this chunk
  181. *
  182. * free_tail - number of continuous free pages at the end of chunk
  183. *
  184. * free_map - bitset (a bit for each page). The bit is set if the corresponding
  185. * page is allocated. Allocator for "lage sizes" may easily find a
  186. * free page (or a continuous number of pages) searching for zero
  187. * bits.
  188. *
  189. * map - contains service information for each page. (32-bits for each
  190. * page).
  191. * usage:
  192. * (2 bits)
  193. * FRUN - free page,
  194. * LRUN - first page of "large" allocation
  195. * SRUN - first page of a bin used for "small" allocation
  196. *
  197. * lrun_pages:
  198. * (10 bits) number of allocated pages
  199. *
  200. * srun_bin_num:
  201. * (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
  202. * 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
  203. */
  204. struct _zend_mm_heap {
  205. #if ZEND_MM_CUSTOM
  206. int use_custom_heap;
  207. #endif
  208. #if ZEND_MM_STORAGE
  209. zend_mm_storage *storage;
  210. #endif
  211. #if ZEND_MM_STAT
  212. size_t size; /* current memory usage */
  213. size_t peak; /* peak memory usage */
  214. #endif
  215. zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
  216. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  217. size_t real_size; /* current size of allocated pages */
  218. #endif
  219. #if ZEND_MM_STAT
  220. size_t real_peak; /* peak size of allocated pages */
  221. #endif
  222. #if ZEND_MM_LIMIT
  223. size_t limit; /* memory limit */
  224. int overflow; /* memory overflow flag */
  225. #endif
  226. zend_mm_huge_list *huge_list; /* list of huge allocated blocks */
  227. zend_mm_chunk *main_chunk;
  228. zend_mm_chunk *cached_chunks; /* list of unused chunks */
  229. int chunks_count; /* number of allocated chunks */
  230. int peak_chunks_count; /* peak number of allocated chunks for current request */
  231. int cached_chunks_count; /* number of cached chunks */
  232. double avg_chunks_count; /* average number of chunks allocated per request */
  233. int last_chunks_delete_boundary; /* number of chunks after last deletion */
  234. int last_chunks_delete_count; /* number of deletion over the last boundary */
  235. #if ZEND_MM_CUSTOM
  236. union {
  237. struct {
  238. void *(*_malloc)(size_t);
  239. void (*_free)(void*);
  240. void *(*_realloc)(void*, size_t);
  241. } std;
  242. struct {
  243. void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  244. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  245. void *(*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  246. } debug;
  247. } custom_heap;
  248. HashTable *tracked_allocs;
  249. #endif
  250. };
  251. struct _zend_mm_chunk {
  252. zend_mm_heap *heap;
  253. zend_mm_chunk *next;
  254. zend_mm_chunk *prev;
  255. uint32_t free_pages; /* number of free pages */
  256. uint32_t free_tail; /* number of free pages at the end of chunk */
  257. uint32_t num;
  258. char reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
  259. zend_mm_heap heap_slot; /* used only in main chunk */
  260. zend_mm_page_map free_map; /* 512 bits or 64 bytes */
  261. zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */
  262. };
  263. struct _zend_mm_page {
  264. char bytes[ZEND_MM_PAGE_SIZE];
  265. };
  266. /*
  267. * bin - is one or few continuous pages (up to 8) used for allocation of
  268. * a particular "small size".
  269. */
  270. struct _zend_mm_bin {
  271. char bytes[ZEND_MM_PAGE_SIZE * 8];
  272. };
  273. struct _zend_mm_free_slot {
  274. zend_mm_free_slot *next_free_slot;
  275. };
  276. struct _zend_mm_huge_list {
  277. void *ptr;
  278. size_t size;
  279. zend_mm_huge_list *next;
  280. #if ZEND_DEBUG
  281. zend_mm_debug_info dbg;
  282. #endif
  283. };
  284. #define ZEND_MM_PAGE_ADDR(chunk, page_num) \
  285. ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
  286. #define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
  287. static const uint32_t bin_data_size[] = {
  288. ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
  289. };
  290. #define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
  291. static const uint32_t bin_elements[] = {
  292. ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
  293. };
  294. #define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
  295. static const uint32_t bin_pages[] = {
  296. ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
  297. };
  298. #if ZEND_DEBUG
  299. ZEND_COLD void zend_debug_alloc_output(char *format, ...)
  300. {
  301. char output_buf[256];
  302. va_list args;
  303. va_start(args, format);
  304. vsprintf(output_buf, format, args);
  305. va_end(args);
  306. #ifdef ZEND_WIN32
  307. OutputDebugString(output_buf);
  308. #else
  309. fprintf(stderr, "%s", output_buf);
  310. #endif
  311. }
  312. #endif
  313. static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
  314. {
  315. fprintf(stderr, "%s\n", message);
  316. /* See http://support.microsoft.com/kb/190351 */
  317. #ifdef ZEND_WIN32
  318. fflush(stderr);
  319. #endif
  320. #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
  321. kill(getpid(), SIGSEGV);
  322. #endif
  323. exit(1);
  324. }
  325. static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
  326. const char *format,
  327. size_t limit,
  328. #if ZEND_DEBUG
  329. const char *filename,
  330. uint32_t lineno,
  331. #endif
  332. size_t size)
  333. {
  334. heap->overflow = 1;
  335. zend_try {
  336. zend_error_noreturn(E_ERROR,
  337. format,
  338. limit,
  339. #if ZEND_DEBUG
  340. filename,
  341. lineno,
  342. #endif
  343. size);
  344. } zend_catch {
  345. } zend_end_try();
  346. heap->overflow = 0;
  347. zend_bailout();
  348. exit(1);
  349. }
  350. #ifdef _WIN32
  351. void
  352. stderr_last_error(char *msg)
  353. {
  354. DWORD err = GetLastError();
  355. char *buf = php_win32_error_to_msg(err);
  356. if (!buf[0]) {
  357. fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
  358. }
  359. else {
  360. fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
  361. }
  362. php_win32_error_msg_free(buf);
  363. }
  364. #endif
  365. /*****************/
  366. /* OS Allocation */
  367. /*****************/
  368. #ifndef HAVE_MREMAP
  369. static void *zend_mm_mmap_fixed(void *addr, size_t size)
  370. {
  371. #ifdef _WIN32
  372. return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  373. #else
  374. int flags = MAP_PRIVATE | MAP_ANON;
  375. #if defined(MAP_EXCL)
  376. flags |= MAP_FIXED | MAP_EXCL;
  377. #endif
  378. /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
  379. void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
  380. if (ptr == MAP_FAILED) {
  381. #if ZEND_MM_ERROR && !defined(MAP_EXCL)
  382. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  383. #endif
  384. return NULL;
  385. } else if (ptr != addr) {
  386. if (munmap(ptr, size) != 0) {
  387. #if ZEND_MM_ERROR
  388. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  389. #endif
  390. }
  391. return NULL;
  392. }
  393. return ptr;
  394. #endif
  395. }
  396. #endif
  397. static void *zend_mm_mmap(size_t size)
  398. {
  399. #ifdef _WIN32
  400. void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  401. if (ptr == NULL) {
  402. #if ZEND_MM_ERROR
  403. stderr_last_error("VirtualAlloc() failed");
  404. #endif
  405. return NULL;
  406. }
  407. return ptr;
  408. #else
  409. void *ptr;
  410. #ifdef MAP_HUGETLB
  411. if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
  412. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
  413. if (ptr != MAP_FAILED) {
  414. return ptr;
  415. }
  416. }
  417. #endif
  418. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
  419. if (ptr == MAP_FAILED) {
  420. #if ZEND_MM_ERROR
  421. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  422. #endif
  423. return NULL;
  424. }
  425. return ptr;
  426. #endif
  427. }
  428. static void zend_mm_munmap(void *addr, size_t size)
  429. {
  430. #ifdef _WIN32
  431. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
  432. #if ZEND_MM_ERROR
  433. stderr_last_error("VirtualFree() failed");
  434. #endif
  435. }
  436. #else
  437. if (munmap(addr, size) != 0) {
  438. #if ZEND_MM_ERROR
  439. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  440. #endif
  441. }
  442. #endif
  443. }
  444. /***********/
  445. /* Bitmask */
  446. /***********/
  447. /* number of trailing set (1) bits */
  448. static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
  449. {
  450. #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
  451. return __builtin_ctzl(~bitset);
  452. #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
  453. return __builtin_ctzll(~bitset);
  454. #elif defined(_WIN32)
  455. unsigned long index;
  456. #if defined(_WIN64)
  457. if (!BitScanForward64(&index, ~bitset)) {
  458. #else
  459. if (!BitScanForward(&index, ~bitset)) {
  460. #endif
  461. /* undefined behavior */
  462. return 32;
  463. }
  464. return (int)index;
  465. #else
  466. int n;
  467. if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
  468. n = 0;
  469. #if SIZEOF_ZEND_LONG == 8
  470. if (sizeof(zend_mm_bitset) == 8) {
  471. if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
  472. }
  473. #endif
  474. if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
  475. if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;}
  476. if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;}
  477. if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;}
  478. return n + (bitset & 1);
  479. #endif
  480. }
  481. static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
  482. {
  483. return ZEND_BIT_TEST(bitset, bit);
  484. }
  485. static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
  486. {
  487. bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  488. }
  489. static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
  490. {
  491. bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  492. }
  493. static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
  494. {
  495. if (len == 1) {
  496. zend_mm_bitset_set_bit(bitset, start);
  497. } else {
  498. int pos = start / ZEND_MM_BITSET_LEN;
  499. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  500. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  501. zend_mm_bitset tmp;
  502. if (pos != end) {
  503. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  504. tmp = (zend_mm_bitset)-1 << bit;
  505. bitset[pos++] |= tmp;
  506. while (pos != end) {
  507. /* set all bits */
  508. bitset[pos++] = (zend_mm_bitset)-1;
  509. }
  510. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  511. /* set bits from "0" to "end" */
  512. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  513. bitset[pos] |= tmp;
  514. } else {
  515. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  516. /* set bits from "bit" to "end" */
  517. tmp = (zend_mm_bitset)-1 << bit;
  518. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  519. bitset[pos] |= tmp;
  520. }
  521. }
  522. }
  523. static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
  524. {
  525. if (len == 1) {
  526. zend_mm_bitset_reset_bit(bitset, start);
  527. } else {
  528. int pos = start / ZEND_MM_BITSET_LEN;
  529. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  530. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  531. zend_mm_bitset tmp;
  532. if (pos != end) {
  533. /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  534. tmp = ~((Z_UL(1) << bit) - 1);
  535. bitset[pos++] &= ~tmp;
  536. while (pos != end) {
  537. /* set all bits */
  538. bitset[pos++] = 0;
  539. }
  540. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  541. /* reset bits from "0" to "end" */
  542. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  543. bitset[pos] &= ~tmp;
  544. } else {
  545. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  546. /* reset bits from "bit" to "end" */
  547. tmp = (zend_mm_bitset)-1 << bit;
  548. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  549. bitset[pos] &= ~tmp;
  550. }
  551. }
  552. }
  553. static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
  554. {
  555. if (len == 1) {
  556. return !zend_mm_bitset_is_set(bitset, start);
  557. } else {
  558. int pos = start / ZEND_MM_BITSET_LEN;
  559. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  560. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  561. zend_mm_bitset tmp;
  562. if (pos != end) {
  563. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  564. tmp = (zend_mm_bitset)-1 << bit;
  565. if ((bitset[pos++] & tmp) != 0) {
  566. return 0;
  567. }
  568. while (pos != end) {
  569. /* set all bits */
  570. if (bitset[pos++] != 0) {
  571. return 0;
  572. }
  573. }
  574. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  575. /* set bits from "0" to "end" */
  576. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  577. return (bitset[pos] & tmp) == 0;
  578. } else {
  579. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  580. /* set bits from "bit" to "end" */
  581. tmp = (zend_mm_bitset)-1 << bit;
  582. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  583. return (bitset[pos] & tmp) == 0;
  584. }
  585. }
  586. }
  587. /**********/
  588. /* Chunks */
  589. /**********/
  590. static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
  591. {
  592. void *ptr = zend_mm_mmap(size);
  593. if (ptr == NULL) {
  594. return NULL;
  595. } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
  596. #ifdef MADV_HUGEPAGE
  597. if (zend_mm_use_huge_pages) {
  598. madvise(ptr, size, MADV_HUGEPAGE);
  599. }
  600. #endif
  601. return ptr;
  602. } else {
  603. size_t offset;
  604. /* chunk has to be aligned */
  605. zend_mm_munmap(ptr, size);
  606. ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
  607. #ifdef _WIN32
  608. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  609. zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
  610. ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
  611. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  612. if (offset != 0) {
  613. zend_mm_munmap(ptr, size);
  614. return NULL;
  615. }
  616. return ptr;
  617. #else
  618. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  619. if (offset != 0) {
  620. offset = alignment - offset;
  621. zend_mm_munmap(ptr, offset);
  622. ptr = (char*)ptr + offset;
  623. alignment -= offset;
  624. }
  625. if (alignment > REAL_PAGE_SIZE) {
  626. zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
  627. }
  628. # ifdef MADV_HUGEPAGE
  629. if (zend_mm_use_huge_pages) {
  630. madvise(ptr, size, MADV_HUGEPAGE);
  631. }
  632. # endif
  633. #endif
  634. return ptr;
  635. }
  636. }
  637. static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
  638. {
  639. #if ZEND_MM_STORAGE
  640. if (UNEXPECTED(heap->storage)) {
  641. void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
  642. ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
  643. return ptr;
  644. }
  645. #endif
  646. return zend_mm_chunk_alloc_int(size, alignment);
  647. }
  648. static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
  649. {
  650. #if ZEND_MM_STORAGE
  651. if (UNEXPECTED(heap->storage)) {
  652. heap->storage->handlers.chunk_free(heap->storage, addr, size);
  653. return;
  654. }
  655. #endif
  656. zend_mm_munmap(addr, size);
  657. }
  658. static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  659. {
  660. #if ZEND_MM_STORAGE
  661. if (UNEXPECTED(heap->storage)) {
  662. if (heap->storage->handlers.chunk_truncate) {
  663. return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
  664. } else {
  665. return 0;
  666. }
  667. }
  668. #endif
  669. #ifndef _WIN32
  670. zend_mm_munmap((char*)addr + new_size, old_size - new_size);
  671. return 1;
  672. #else
  673. return 0;
  674. #endif
  675. }
  676. static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  677. {
  678. #if ZEND_MM_STORAGE
  679. if (UNEXPECTED(heap->storage)) {
  680. if (heap->storage->handlers.chunk_extend) {
  681. return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
  682. } else {
  683. return 0;
  684. }
  685. }
  686. #endif
  687. #ifdef HAVE_MREMAP
  688. /* We don't use MREMAP_MAYMOVE due to alignment requirements. */
  689. void *ptr = mremap(addr, old_size, new_size, 0);
  690. if (ptr == MAP_FAILED) {
  691. return 0;
  692. }
  693. /* Sanity check: The mapping shouldn't have moved. */
  694. ZEND_ASSERT(ptr == addr);
  695. return 1;
  696. #elif !defined(_WIN32)
  697. return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
  698. #else
  699. return 0;
  700. #endif
  701. }
  702. static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
  703. {
  704. chunk->heap = heap;
  705. chunk->next = heap->main_chunk;
  706. chunk->prev = heap->main_chunk->prev;
  707. chunk->prev->next = chunk;
  708. chunk->next->prev = chunk;
  709. /* mark first pages as allocated */
  710. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  711. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  712. /* the younger chunks have bigger number */
  713. chunk->num = chunk->prev->num + 1;
  714. /* mark first pages as allocated */
  715. chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  716. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  717. }
  718. /***********************/
  719. /* Huge Runs (forward) */
  720. /***********************/
  721. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  722. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  723. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  724. #if ZEND_DEBUG
  725. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  726. #else
  727. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  728. #endif
  729. /**************/
  730. /* Large Runs */
  731. /**************/
  732. #if ZEND_DEBUG
  733. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  734. #else
  735. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  736. #endif
  737. {
  738. zend_mm_chunk *chunk = heap->main_chunk;
  739. uint32_t page_num, len;
  740. int steps = 0;
  741. while (1) {
  742. if (UNEXPECTED(chunk->free_pages < pages_count)) {
  743. goto not_found;
  744. #if 0
  745. } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
  746. if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
  747. goto not_found;
  748. } else {
  749. page_num = chunk->free_tail;
  750. goto found;
  751. }
  752. } else if (0) {
  753. /* First-Fit Search */
  754. int free_tail = chunk->free_tail;
  755. zend_mm_bitset *bitset = chunk->free_map;
  756. zend_mm_bitset tmp = *(bitset++);
  757. int i = 0;
  758. while (1) {
  759. /* skip allocated blocks */
  760. while (tmp == (zend_mm_bitset)-1) {
  761. i += ZEND_MM_BITSET_LEN;
  762. if (i == ZEND_MM_PAGES) {
  763. goto not_found;
  764. }
  765. tmp = *(bitset++);
  766. }
  767. /* find first 0 bit */
  768. page_num = i + zend_mm_bitset_nts(tmp);
  769. /* reset bits from 0 to "bit" */
  770. tmp &= tmp + 1;
  771. /* skip free blocks */
  772. while (tmp == 0) {
  773. i += ZEND_MM_BITSET_LEN;
  774. len = i - page_num;
  775. if (len >= pages_count) {
  776. goto found;
  777. } else if (i >= free_tail) {
  778. goto not_found;
  779. }
  780. tmp = *(bitset++);
  781. }
  782. /* find first 1 bit */
  783. len = (i + zend_ulong_ntz(tmp)) - page_num;
  784. if (len >= pages_count) {
  785. goto found;
  786. }
  787. /* set bits from 0 to "bit" */
  788. tmp |= tmp - 1;
  789. }
  790. #endif
  791. } else {
  792. /* Best-Fit Search */
  793. int best = -1;
  794. uint32_t best_len = ZEND_MM_PAGES;
  795. uint32_t free_tail = chunk->free_tail;
  796. zend_mm_bitset *bitset = chunk->free_map;
  797. zend_mm_bitset tmp = *(bitset++);
  798. uint32_t i = 0;
  799. while (1) {
  800. /* skip allocated blocks */
  801. while (tmp == (zend_mm_bitset)-1) {
  802. i += ZEND_MM_BITSET_LEN;
  803. if (i == ZEND_MM_PAGES) {
  804. if (best > 0) {
  805. page_num = best;
  806. goto found;
  807. } else {
  808. goto not_found;
  809. }
  810. }
  811. tmp = *(bitset++);
  812. }
  813. /* find first 0 bit */
  814. page_num = i + zend_mm_bitset_nts(tmp);
  815. /* reset bits from 0 to "bit" */
  816. tmp &= tmp + 1;
  817. /* skip free blocks */
  818. while (tmp == 0) {
  819. i += ZEND_MM_BITSET_LEN;
  820. if (i >= free_tail || i == ZEND_MM_PAGES) {
  821. len = ZEND_MM_PAGES - page_num;
  822. if (len >= pages_count && len < best_len) {
  823. chunk->free_tail = page_num + pages_count;
  824. goto found;
  825. } else {
  826. /* set accurate value */
  827. chunk->free_tail = page_num;
  828. if (best > 0) {
  829. page_num = best;
  830. goto found;
  831. } else {
  832. goto not_found;
  833. }
  834. }
  835. }
  836. tmp = *(bitset++);
  837. }
  838. /* find first 1 bit */
  839. len = i + zend_ulong_ntz(tmp) - page_num;
  840. if (len >= pages_count) {
  841. if (len == pages_count) {
  842. goto found;
  843. } else if (len < best_len) {
  844. best_len = len;
  845. best = page_num;
  846. }
  847. }
  848. /* set bits from 0 to "bit" */
  849. tmp |= tmp - 1;
  850. }
  851. }
  852. not_found:
  853. if (chunk->next == heap->main_chunk) {
  854. get_chunk:
  855. if (heap->cached_chunks) {
  856. heap->cached_chunks_count--;
  857. chunk = heap->cached_chunks;
  858. heap->cached_chunks = chunk->next;
  859. } else {
  860. #if ZEND_MM_LIMIT
  861. if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
  862. if (zend_mm_gc(heap)) {
  863. goto get_chunk;
  864. } else if (heap->overflow == 0) {
  865. #if ZEND_DEBUG
  866. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  867. #else
  868. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
  869. #endif
  870. return NULL;
  871. }
  872. }
  873. #endif
  874. chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  875. if (UNEXPECTED(chunk == NULL)) {
  876. /* insufficient memory */
  877. if (zend_mm_gc(heap) &&
  878. (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
  879. /* pass */
  880. } else {
  881. #if !ZEND_MM_LIMIT
  882. zend_mm_safe_error(heap, "Out of memory");
  883. #elif ZEND_DEBUG
  884. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  885. #else
  886. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
  887. #endif
  888. return NULL;
  889. }
  890. }
  891. #if ZEND_MM_STAT
  892. do {
  893. size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
  894. size_t peak = MAX(heap->real_peak, size);
  895. heap->real_size = size;
  896. heap->real_peak = peak;
  897. } while (0);
  898. #elif ZEND_MM_LIMIT
  899. heap->real_size += ZEND_MM_CHUNK_SIZE;
  900. #endif
  901. }
  902. heap->chunks_count++;
  903. if (heap->chunks_count > heap->peak_chunks_count) {
  904. heap->peak_chunks_count = heap->chunks_count;
  905. }
  906. zend_mm_chunk_init(heap, chunk);
  907. page_num = ZEND_MM_FIRST_PAGE;
  908. len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  909. goto found;
  910. } else {
  911. chunk = chunk->next;
  912. steps++;
  913. }
  914. }
  915. found:
  916. if (steps > 2 && pages_count < 8) {
  917. /* move chunk into the head of the linked-list */
  918. chunk->prev->next = chunk->next;
  919. chunk->next->prev = chunk->prev;
  920. chunk->next = heap->main_chunk->next;
  921. chunk->prev = heap->main_chunk;
  922. chunk->prev->next = chunk;
  923. chunk->next->prev = chunk;
  924. }
  925. /* mark run as allocated */
  926. chunk->free_pages -= pages_count;
  927. zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
  928. chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
  929. if (page_num == chunk->free_tail) {
  930. chunk->free_tail = page_num + pages_count;
  931. }
  932. return ZEND_MM_PAGE_ADDR(chunk, page_num);
  933. }
  934. static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  935. {
  936. int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
  937. #if ZEND_DEBUG
  938. void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  939. #else
  940. void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  941. #endif
  942. #if ZEND_MM_STAT
  943. do {
  944. size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
  945. size_t peak = MAX(heap->peak, size);
  946. heap->size = size;
  947. heap->peak = peak;
  948. } while (0);
  949. #endif
  950. return ptr;
  951. }
  952. #if ZEND_DEBUG
  953. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  954. {
  955. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  956. }
  957. #else
  958. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  959. {
  960. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  961. }
  962. #endif
  963. static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
  964. {
  965. chunk->next->prev = chunk->prev;
  966. chunk->prev->next = chunk->next;
  967. heap->chunks_count--;
  968. if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
  969. || (heap->chunks_count == heap->last_chunks_delete_boundary
  970. && heap->last_chunks_delete_count >= 4)) {
  971. /* delay deletion */
  972. heap->cached_chunks_count++;
  973. chunk->next = heap->cached_chunks;
  974. heap->cached_chunks = chunk;
  975. } else {
  976. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  977. heap->real_size -= ZEND_MM_CHUNK_SIZE;
  978. #endif
  979. if (!heap->cached_chunks) {
  980. if (heap->chunks_count != heap->last_chunks_delete_boundary) {
  981. heap->last_chunks_delete_boundary = heap->chunks_count;
  982. heap->last_chunks_delete_count = 0;
  983. } else {
  984. heap->last_chunks_delete_count++;
  985. }
  986. }
  987. if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
  988. zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
  989. } else {
  990. //TODO: select the best chunk to delete???
  991. chunk->next = heap->cached_chunks->next;
  992. zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
  993. heap->cached_chunks = chunk;
  994. }
  995. }
  996. }
  997. static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
  998. {
  999. chunk->free_pages += pages_count;
  1000. zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
  1001. chunk->map[page_num] = 0;
  1002. if (chunk->free_tail == page_num + pages_count) {
  1003. /* this setting may be not accurate */
  1004. chunk->free_tail = page_num;
  1005. }
  1006. if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1007. zend_mm_delete_chunk(heap, chunk);
  1008. }
  1009. }
  1010. static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1011. {
  1012. zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
  1013. }
  1014. static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1015. {
  1016. #if ZEND_MM_STAT
  1017. heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
  1018. #endif
  1019. zend_mm_free_pages(heap, chunk, page_num, pages_count);
  1020. }
  1021. /**************/
  1022. /* Small Runs */
  1023. /**************/
  1024. /* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
  1025. static zend_always_inline int zend_mm_small_size_to_bit(int size)
  1026. {
  1027. #if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  1028. return (__builtin_clz(size) ^ 0x1f) + 1;
  1029. #elif defined(_WIN32)
  1030. unsigned long index;
  1031. if (!BitScanReverse(&index, (unsigned long)size)) {
  1032. /* undefined behavior */
  1033. return 64;
  1034. }
  1035. return (((31 - (int)index) ^ 0x1f) + 1);
  1036. #else
  1037. int n = 16;
  1038. if (size <= 0x00ff) {n -= 8; size = size << 8;}
  1039. if (size <= 0x0fff) {n -= 4; size = size << 4;}
  1040. if (size <= 0x3fff) {n -= 2; size = size << 2;}
  1041. if (size <= 0x7fff) {n -= 1;}
  1042. return n;
  1043. #endif
  1044. }
  1045. #ifndef MAX
  1046. # define MAX(a, b) (((a) > (b)) ? (a) : (b))
  1047. #endif
  1048. #ifndef MIN
  1049. # define MIN(a, b) (((a) < (b)) ? (a) : (b))
  1050. #endif
  1051. static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
  1052. {
  1053. #if 0
  1054. int n;
  1055. /*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
  1056. static const int f1[] = { 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9};
  1057. static const int f2[] = { 0, 0, 0, 0, 0, 0, 0, 4, 8, 12, 16, 20, 24};
  1058. if (UNEXPECTED(size <= 2)) return 0;
  1059. n = zend_mm_small_size_to_bit(size - 1);
  1060. return ((size-1) >> f1[n]) + f2[n];
  1061. #else
  1062. unsigned int t1, t2;
  1063. if (size <= 64) {
  1064. /* we need to support size == 0 ... */
  1065. return (size - !!size) >> 3;
  1066. } else {
  1067. t1 = size - 1;
  1068. t2 = zend_mm_small_size_to_bit(t1) - 3;
  1069. t1 = t1 >> t2;
  1070. t2 = t2 - 3;
  1071. t2 = t2 << 2;
  1072. return (int)(t1 + t2);
  1073. }
  1074. #endif
  1075. }
  1076. #define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
  1077. static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1078. {
  1079. zend_mm_chunk *chunk;
  1080. int page_num;
  1081. zend_mm_bin *bin;
  1082. zend_mm_free_slot *p, *end;
  1083. #if ZEND_DEBUG
  1084. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1085. #else
  1086. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1087. #endif
  1088. if (UNEXPECTED(bin == NULL)) {
  1089. /* insufficient memory */
  1090. return NULL;
  1091. }
  1092. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
  1093. page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
  1094. chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
  1095. if (bin_pages[bin_num] > 1) {
  1096. uint32_t i = 1;
  1097. do {
  1098. chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
  1099. i++;
  1100. } while (i < bin_pages[bin_num]);
  1101. }
  1102. /* create a linked list of elements from 1 to last */
  1103. end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
  1104. heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
  1105. do {
  1106. p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1107. #if ZEND_DEBUG
  1108. do {
  1109. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1110. dbg->size = 0;
  1111. } while (0);
  1112. #endif
  1113. p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1114. } while (p != end);
  1115. /* terminate list using NULL */
  1116. p->next_free_slot = NULL;
  1117. #if ZEND_DEBUG
  1118. do {
  1119. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1120. dbg->size = 0;
  1121. } while (0);
  1122. #endif
  1123. /* return first element */
  1124. return (char*)bin;
  1125. }
  1126. static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1127. {
  1128. #if ZEND_MM_STAT
  1129. do {
  1130. size_t size = heap->size + bin_data_size[bin_num];
  1131. size_t peak = MAX(heap->peak, size);
  1132. heap->size = size;
  1133. heap->peak = peak;
  1134. } while (0);
  1135. #endif
  1136. if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
  1137. zend_mm_free_slot *p = heap->free_slot[bin_num];
  1138. heap->free_slot[bin_num] = p->next_free_slot;
  1139. return (void*)p;
  1140. } else {
  1141. return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1142. }
  1143. }
  1144. static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
  1145. {
  1146. zend_mm_free_slot *p;
  1147. #if ZEND_MM_STAT
  1148. heap->size -= bin_data_size[bin_num];
  1149. #endif
  1150. #if ZEND_DEBUG
  1151. do {
  1152. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1153. dbg->size = 0;
  1154. } while (0);
  1155. #endif
  1156. p = (zend_mm_free_slot*)ptr;
  1157. p->next_free_slot = heap->free_slot[bin_num];
  1158. heap->free_slot[bin_num] = p;
  1159. }
  1160. /********/
  1161. /* Heap */
  1162. /********/
  1163. #if ZEND_DEBUG
  1164. static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
  1165. {
  1166. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1167. zend_mm_chunk *chunk;
  1168. int page_num;
  1169. zend_mm_page_info info;
  1170. ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
  1171. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1172. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1173. info = chunk->map[page_num];
  1174. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1175. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1176. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1177. return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1178. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1179. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1180. return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1181. }
  1182. }
  1183. #endif
  1184. static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1185. {
  1186. void *ptr;
  1187. #if ZEND_DEBUG
  1188. size_t real_size = size;
  1189. zend_mm_debug_info *dbg;
  1190. /* special handling for zero-size allocation */
  1191. size = MAX(size, 1);
  1192. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1193. if (UNEXPECTED(size < real_size)) {
  1194. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1195. return NULL;
  1196. }
  1197. #endif
  1198. if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
  1199. ptr = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1200. #if ZEND_DEBUG
  1201. dbg = zend_mm_get_debug_info(heap, ptr);
  1202. dbg->size = real_size;
  1203. dbg->filename = __zend_filename;
  1204. dbg->orig_filename = __zend_orig_filename;
  1205. dbg->lineno = __zend_lineno;
  1206. dbg->orig_lineno = __zend_orig_lineno;
  1207. #endif
  1208. return ptr;
  1209. } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
  1210. ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1211. #if ZEND_DEBUG
  1212. dbg = zend_mm_get_debug_info(heap, ptr);
  1213. dbg->size = real_size;
  1214. dbg->filename = __zend_filename;
  1215. dbg->orig_filename = __zend_orig_filename;
  1216. dbg->lineno = __zend_lineno;
  1217. dbg->orig_lineno = __zend_orig_lineno;
  1218. #endif
  1219. return ptr;
  1220. } else {
  1221. #if ZEND_DEBUG
  1222. size = real_size;
  1223. #endif
  1224. return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1225. }
  1226. }
  1227. static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1228. {
  1229. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1230. if (UNEXPECTED(page_offset == 0)) {
  1231. if (ptr != NULL) {
  1232. zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1233. }
  1234. } else {
  1235. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1236. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1237. zend_mm_page_info info = chunk->map[page_num];
  1238. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1239. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1240. zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
  1241. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1242. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1243. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1244. zend_mm_free_large(heap, chunk, page_num, pages_count);
  1245. }
  1246. }
  1247. }
  1248. static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1249. {
  1250. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1251. if (UNEXPECTED(page_offset == 0)) {
  1252. return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1253. } else {
  1254. zend_mm_chunk *chunk;
  1255. #if 0 && ZEND_DEBUG
  1256. zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
  1257. return dbg->size;
  1258. #else
  1259. int page_num;
  1260. zend_mm_page_info info;
  1261. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1262. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1263. info = chunk->map[page_num];
  1264. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1265. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1266. return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
  1267. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1268. return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1269. }
  1270. #endif
  1271. }
  1272. }
  1273. static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1274. {
  1275. void *ret;
  1276. #if ZEND_MM_STAT
  1277. do {
  1278. size_t orig_peak = heap->peak;
  1279. #endif
  1280. ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1281. memcpy(ret, ptr, copy_size);
  1282. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1283. #if ZEND_MM_STAT
  1284. heap->peak = MAX(orig_peak, heap->size);
  1285. } while (0);
  1286. #endif
  1287. return ret;
  1288. }
  1289. static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1290. {
  1291. size_t old_size;
  1292. size_t new_size;
  1293. #if ZEND_DEBUG
  1294. size_t real_size;
  1295. #endif
  1296. old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1297. #if ZEND_DEBUG
  1298. real_size = size;
  1299. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1300. #endif
  1301. if (size > ZEND_MM_MAX_LARGE_SIZE) {
  1302. #if ZEND_DEBUG
  1303. size = real_size;
  1304. #endif
  1305. #ifdef ZEND_WIN32
  1306. /* On Windows we don't have ability to extend huge blocks in-place.
  1307. * We allocate them with 2MB size granularity, to avoid many
  1308. * reallocations when they are extended by small pieces
  1309. */
  1310. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
  1311. #else
  1312. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
  1313. #endif
  1314. if (new_size == old_size) {
  1315. #if ZEND_DEBUG
  1316. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1317. #else
  1318. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1319. #endif
  1320. return ptr;
  1321. } else if (new_size < old_size) {
  1322. /* unmup tail */
  1323. if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
  1324. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1325. heap->real_size -= old_size - new_size;
  1326. #endif
  1327. #if ZEND_MM_STAT
  1328. heap->size -= old_size - new_size;
  1329. #endif
  1330. #if ZEND_DEBUG
  1331. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1332. #else
  1333. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1334. #endif
  1335. return ptr;
  1336. }
  1337. } else /* if (new_size > old_size) */ {
  1338. #if ZEND_MM_LIMIT
  1339. if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
  1340. if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
  1341. /* pass */
  1342. } else if (heap->overflow == 0) {
  1343. #if ZEND_DEBUG
  1344. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1345. #else
  1346. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1347. #endif
  1348. return NULL;
  1349. }
  1350. }
  1351. #endif
  1352. /* try to map tail right after this block */
  1353. if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
  1354. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1355. heap->real_size += new_size - old_size;
  1356. #endif
  1357. #if ZEND_MM_STAT
  1358. heap->real_peak = MAX(heap->real_peak, heap->real_size);
  1359. heap->size += new_size - old_size;
  1360. heap->peak = MAX(heap->peak, heap->size);
  1361. #endif
  1362. #if ZEND_DEBUG
  1363. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1364. #else
  1365. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1366. #endif
  1367. return ptr;
  1368. }
  1369. }
  1370. }
  1371. return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1372. }
  1373. static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, zend_bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1374. {
  1375. size_t page_offset;
  1376. size_t old_size;
  1377. size_t new_size;
  1378. void *ret;
  1379. #if ZEND_DEBUG
  1380. zend_mm_debug_info *dbg;
  1381. #endif
  1382. page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1383. if (UNEXPECTED(page_offset == 0)) {
  1384. if (EXPECTED(ptr == NULL)) {
  1385. return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1386. } else {
  1387. return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1388. }
  1389. } else {
  1390. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1391. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1392. zend_mm_page_info info = chunk->map[page_num];
  1393. #if ZEND_DEBUG
  1394. size_t real_size = size;
  1395. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1396. #endif
  1397. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1398. if (info & ZEND_MM_IS_SRUN) {
  1399. int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1400. do {
  1401. old_size = bin_data_size[old_bin_num];
  1402. /* Check if requested size fits into current bin */
  1403. if (size <= old_size) {
  1404. /* Check if truncation is necessary */
  1405. if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
  1406. /* truncation */
  1407. ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1408. copy_size = use_copy_size ? MIN(size, copy_size) : size;
  1409. memcpy(ret, ptr, copy_size);
  1410. zend_mm_free_small(heap, ptr, old_bin_num);
  1411. } else {
  1412. /* reallocation in-place */
  1413. ret = ptr;
  1414. }
  1415. } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
  1416. /* small extension */
  1417. #if ZEND_MM_STAT
  1418. do {
  1419. size_t orig_peak = heap->peak;
  1420. #endif
  1421. ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1422. copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
  1423. memcpy(ret, ptr, copy_size);
  1424. zend_mm_free_small(heap, ptr, old_bin_num);
  1425. #if ZEND_MM_STAT
  1426. heap->peak = MAX(orig_peak, heap->size);
  1427. } while (0);
  1428. #endif
  1429. } else {
  1430. /* slow reallocation */
  1431. break;
  1432. }
  1433. #if ZEND_DEBUG
  1434. dbg = zend_mm_get_debug_info(heap, ret);
  1435. dbg->size = real_size;
  1436. dbg->filename = __zend_filename;
  1437. dbg->orig_filename = __zend_orig_filename;
  1438. dbg->lineno = __zend_lineno;
  1439. dbg->orig_lineno = __zend_orig_lineno;
  1440. #endif
  1441. return ret;
  1442. } while (0);
  1443. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1444. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1445. old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1446. if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
  1447. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
  1448. if (new_size == old_size) {
  1449. #if ZEND_DEBUG
  1450. dbg = zend_mm_get_debug_info(heap, ptr);
  1451. dbg->size = real_size;
  1452. dbg->filename = __zend_filename;
  1453. dbg->orig_filename = __zend_orig_filename;
  1454. dbg->lineno = __zend_lineno;
  1455. dbg->orig_lineno = __zend_orig_lineno;
  1456. #endif
  1457. return ptr;
  1458. } else if (new_size < old_size) {
  1459. /* free tail pages */
  1460. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1461. int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
  1462. #if ZEND_MM_STAT
  1463. heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
  1464. #endif
  1465. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1466. chunk->free_pages += rest_pages_count;
  1467. zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
  1468. #if ZEND_DEBUG
  1469. dbg = zend_mm_get_debug_info(heap, ptr);
  1470. dbg->size = real_size;
  1471. dbg->filename = __zend_filename;
  1472. dbg->orig_filename = __zend_orig_filename;
  1473. dbg->lineno = __zend_lineno;
  1474. dbg->orig_lineno = __zend_orig_lineno;
  1475. #endif
  1476. return ptr;
  1477. } else /* if (new_size > old_size) */ {
  1478. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1479. int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
  1480. /* try to allocate tail pages after this block */
  1481. if (page_num + new_pages_count <= ZEND_MM_PAGES &&
  1482. zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
  1483. #if ZEND_MM_STAT
  1484. do {
  1485. size_t size = heap->size + (new_size - old_size);
  1486. size_t peak = MAX(heap->peak, size);
  1487. heap->size = size;
  1488. heap->peak = peak;
  1489. } while (0);
  1490. #endif
  1491. chunk->free_pages -= new_pages_count - old_pages_count;
  1492. zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
  1493. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1494. #if ZEND_DEBUG
  1495. dbg = zend_mm_get_debug_info(heap, ptr);
  1496. dbg->size = real_size;
  1497. dbg->filename = __zend_filename;
  1498. dbg->orig_filename = __zend_orig_filename;
  1499. dbg->lineno = __zend_lineno;
  1500. dbg->orig_lineno = __zend_orig_lineno;
  1501. #endif
  1502. return ptr;
  1503. }
  1504. }
  1505. }
  1506. }
  1507. #if ZEND_DEBUG
  1508. size = real_size;
  1509. #endif
  1510. }
  1511. copy_size = MIN(old_size, copy_size);
  1512. return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1513. }
  1514. /*********************/
  1515. /* Huge Runs (again) */
  1516. /*********************/
  1517. #if ZEND_DEBUG
  1518. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1519. #else
  1520. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1521. #endif
  1522. {
  1523. zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1524. list->ptr = ptr;
  1525. list->size = size;
  1526. list->next = heap->huge_list;
  1527. #if ZEND_DEBUG
  1528. list->dbg.size = dbg_size;
  1529. list->dbg.filename = __zend_filename;
  1530. list->dbg.orig_filename = __zend_orig_filename;
  1531. list->dbg.lineno = __zend_lineno;
  1532. list->dbg.orig_lineno = __zend_orig_lineno;
  1533. #endif
  1534. heap->huge_list = list;
  1535. }
  1536. static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1537. {
  1538. zend_mm_huge_list *prev = NULL;
  1539. zend_mm_huge_list *list = heap->huge_list;
  1540. while (list != NULL) {
  1541. if (list->ptr == ptr) {
  1542. size_t size;
  1543. if (prev) {
  1544. prev->next = list->next;
  1545. } else {
  1546. heap->huge_list = list->next;
  1547. }
  1548. size = list->size;
  1549. zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1550. return size;
  1551. }
  1552. prev = list;
  1553. list = list->next;
  1554. }
  1555. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1556. return 0;
  1557. }
  1558. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1559. {
  1560. zend_mm_huge_list *list = heap->huge_list;
  1561. while (list != NULL) {
  1562. if (list->ptr == ptr) {
  1563. return list->size;
  1564. }
  1565. list = list->next;
  1566. }
  1567. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1568. return 0;
  1569. }
  1570. #if ZEND_DEBUG
  1571. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1572. #else
  1573. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1574. #endif
  1575. {
  1576. zend_mm_huge_list *list = heap->huge_list;
  1577. while (list != NULL) {
  1578. if (list->ptr == ptr) {
  1579. list->size = size;
  1580. #if ZEND_DEBUG
  1581. list->dbg.size = dbg_size;
  1582. list->dbg.filename = __zend_filename;
  1583. list->dbg.orig_filename = __zend_orig_filename;
  1584. list->dbg.lineno = __zend_lineno;
  1585. list->dbg.orig_lineno = __zend_orig_lineno;
  1586. #endif
  1587. return;
  1588. }
  1589. list = list->next;
  1590. }
  1591. }
  1592. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1593. {
  1594. #ifdef ZEND_WIN32
  1595. /* On Windows we don't have ability to extend huge blocks in-place.
  1596. * We allocate them with 2MB size granularity, to avoid many
  1597. * reallocations when they are extended by small pieces
  1598. */
  1599. size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
  1600. #else
  1601. size_t alignment = REAL_PAGE_SIZE;
  1602. #endif
  1603. size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
  1604. void *ptr;
  1605. if (UNEXPECTED(new_size < size)) {
  1606. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
  1607. }
  1608. #if ZEND_MM_LIMIT
  1609. if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
  1610. if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
  1611. /* pass */
  1612. } else if (heap->overflow == 0) {
  1613. #if ZEND_DEBUG
  1614. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1615. #else
  1616. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1617. #endif
  1618. return NULL;
  1619. }
  1620. }
  1621. #endif
  1622. ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
  1623. if (UNEXPECTED(ptr == NULL)) {
  1624. /* insufficient memory */
  1625. if (zend_mm_gc(heap) &&
  1626. (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
  1627. /* pass */
  1628. } else {
  1629. #if !ZEND_MM_LIMIT
  1630. zend_mm_safe_error(heap, "Out of memory");
  1631. #elif ZEND_DEBUG
  1632. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  1633. #else
  1634. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
  1635. #endif
  1636. return NULL;
  1637. }
  1638. }
  1639. #if ZEND_DEBUG
  1640. zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1641. #else
  1642. zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1643. #endif
  1644. #if ZEND_MM_STAT
  1645. do {
  1646. size_t size = heap->real_size + new_size;
  1647. size_t peak = MAX(heap->real_peak, size);
  1648. heap->real_size = size;
  1649. heap->real_peak = peak;
  1650. } while (0);
  1651. do {
  1652. size_t size = heap->size + new_size;
  1653. size_t peak = MAX(heap->peak, size);
  1654. heap->size = size;
  1655. heap->peak = peak;
  1656. } while (0);
  1657. #elif ZEND_MM_LIMIT
  1658. heap->real_size += new_size;
  1659. #endif
  1660. return ptr;
  1661. }
  1662. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1663. {
  1664. size_t size;
  1665. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
  1666. size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1667. zend_mm_chunk_free(heap, ptr, size);
  1668. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1669. heap->real_size -= size;
  1670. #endif
  1671. #if ZEND_MM_STAT
  1672. heap->size -= size;
  1673. #endif
  1674. }
  1675. /******************/
  1676. /* Initialization */
  1677. /******************/
  1678. static zend_mm_heap *zend_mm_init(void)
  1679. {
  1680. zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  1681. zend_mm_heap *heap;
  1682. if (UNEXPECTED(chunk == NULL)) {
  1683. #if ZEND_MM_ERROR
  1684. #ifdef _WIN32
  1685. stderr_last_error("Can't initialize heap");
  1686. #else
  1687. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  1688. #endif
  1689. #endif
  1690. return NULL;
  1691. }
  1692. heap = &chunk->heap_slot;
  1693. chunk->heap = heap;
  1694. chunk->next = chunk;
  1695. chunk->prev = chunk;
  1696. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  1697. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  1698. chunk->num = 0;
  1699. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  1700. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  1701. heap->main_chunk = chunk;
  1702. heap->cached_chunks = NULL;
  1703. heap->chunks_count = 1;
  1704. heap->peak_chunks_count = 1;
  1705. heap->cached_chunks_count = 0;
  1706. heap->avg_chunks_count = 1.0;
  1707. heap->last_chunks_delete_boundary = 0;
  1708. heap->last_chunks_delete_count = 0;
  1709. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1710. heap->real_size = ZEND_MM_CHUNK_SIZE;
  1711. #endif
  1712. #if ZEND_MM_STAT
  1713. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  1714. heap->size = 0;
  1715. heap->peak = 0;
  1716. #endif
  1717. #if ZEND_MM_LIMIT
  1718. heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
  1719. heap->overflow = 0;
  1720. #endif
  1721. #if ZEND_MM_CUSTOM
  1722. heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  1723. #endif
  1724. #if ZEND_MM_STORAGE
  1725. heap->storage = NULL;
  1726. #endif
  1727. heap->huge_list = NULL;
  1728. return heap;
  1729. }
  1730. ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
  1731. {
  1732. zend_mm_free_slot *p, **q;
  1733. zend_mm_chunk *chunk;
  1734. size_t page_offset;
  1735. int page_num;
  1736. zend_mm_page_info info;
  1737. uint32_t i, free_counter;
  1738. int has_free_pages;
  1739. size_t collected = 0;
  1740. #if ZEND_MM_CUSTOM
  1741. if (heap->use_custom_heap) {
  1742. return 0;
  1743. }
  1744. #endif
  1745. for (i = 0; i < ZEND_MM_BINS; i++) {
  1746. has_free_pages = 0;
  1747. p = heap->free_slot[i];
  1748. while (p != NULL) {
  1749. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1750. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1751. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1752. ZEND_ASSERT(page_offset != 0);
  1753. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1754. info = chunk->map[page_num];
  1755. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1756. if (info & ZEND_MM_IS_LRUN) {
  1757. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1758. info = chunk->map[page_num];
  1759. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1760. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1761. }
  1762. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1763. free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
  1764. if (free_counter == bin_elements[i]) {
  1765. has_free_pages = 1;
  1766. }
  1767. chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
  1768. p = p->next_free_slot;
  1769. }
  1770. if (!has_free_pages) {
  1771. continue;
  1772. }
  1773. q = &heap->free_slot[i];
  1774. p = *q;
  1775. while (p != NULL) {
  1776. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1777. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1778. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1779. ZEND_ASSERT(page_offset != 0);
  1780. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1781. info = chunk->map[page_num];
  1782. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1783. if (info & ZEND_MM_IS_LRUN) {
  1784. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1785. info = chunk->map[page_num];
  1786. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1787. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1788. }
  1789. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1790. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
  1791. /* remove from cache */
  1792. p = p->next_free_slot;
  1793. *q = p;
  1794. } else {
  1795. q = &p->next_free_slot;
  1796. p = *q;
  1797. }
  1798. }
  1799. }
  1800. chunk = heap->main_chunk;
  1801. do {
  1802. i = ZEND_MM_FIRST_PAGE;
  1803. while (i < chunk->free_tail) {
  1804. if (zend_mm_bitset_is_set(chunk->free_map, i)) {
  1805. info = chunk->map[i];
  1806. if (info & ZEND_MM_IS_SRUN) {
  1807. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1808. int pages_count = bin_pages[bin_num];
  1809. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
  1810. /* all elements are free */
  1811. zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
  1812. collected += pages_count;
  1813. } else {
  1814. /* reset counter */
  1815. chunk->map[i] = ZEND_MM_SRUN(bin_num);
  1816. }
  1817. i += bin_pages[bin_num];
  1818. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1819. i += ZEND_MM_LRUN_PAGES(info);
  1820. }
  1821. } else {
  1822. i++;
  1823. }
  1824. }
  1825. if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1826. zend_mm_chunk *next_chunk = chunk->next;
  1827. zend_mm_delete_chunk(heap, chunk);
  1828. chunk = next_chunk;
  1829. } else {
  1830. chunk = chunk->next;
  1831. }
  1832. } while (chunk != heap->main_chunk);
  1833. return collected * ZEND_MM_PAGE_SIZE;
  1834. }
  1835. #if ZEND_DEBUG
  1836. /******************/
  1837. /* Leak detection */
  1838. /******************/
  1839. static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
  1840. {
  1841. int empty = 1;
  1842. zend_long count = 0;
  1843. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1844. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1845. while (j < bin_elements[bin_num]) {
  1846. if (dbg->size != 0) {
  1847. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1848. count++;
  1849. dbg->size = 0;
  1850. dbg->filename = NULL;
  1851. dbg->lineno = 0;
  1852. } else {
  1853. empty = 0;
  1854. }
  1855. }
  1856. j++;
  1857. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  1858. }
  1859. if (empty) {
  1860. zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
  1861. }
  1862. return count;
  1863. }
  1864. static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
  1865. {
  1866. zend_long count = 0;
  1867. do {
  1868. while (i < p->free_tail) {
  1869. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1870. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1871. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1872. count += zend_mm_find_leaks_small(p, i, 0, leak);
  1873. i += bin_pages[bin_num];
  1874. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  1875. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  1876. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1877. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1878. count++;
  1879. }
  1880. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  1881. i += pages_count;
  1882. }
  1883. } else {
  1884. i++;
  1885. }
  1886. }
  1887. p = p->next;
  1888. i = ZEND_MM_FIRST_PAGE;
  1889. } while (p != heap->main_chunk);
  1890. return count;
  1891. }
  1892. static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
  1893. {
  1894. zend_long count = 0;
  1895. zend_mm_huge_list *prev = list;
  1896. zend_mm_huge_list *p = list->next;
  1897. while (p) {
  1898. if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
  1899. prev->next = p->next;
  1900. zend_mm_chunk_free(heap, p->ptr, p->size);
  1901. zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
  1902. count++;
  1903. } else {
  1904. prev = p;
  1905. }
  1906. p = prev->next;
  1907. }
  1908. return count;
  1909. }
  1910. static void zend_mm_check_leaks(zend_mm_heap *heap)
  1911. {
  1912. zend_mm_huge_list *list;
  1913. zend_mm_chunk *p;
  1914. zend_leak_info leak;
  1915. zend_long repeated = 0;
  1916. uint32_t total = 0;
  1917. uint32_t i, j;
  1918. /* find leaked huge blocks and free them */
  1919. list = heap->huge_list;
  1920. while (list) {
  1921. zend_mm_huge_list *q = list;
  1922. leak.addr = list->ptr;
  1923. leak.size = list->dbg.size;
  1924. leak.filename = list->dbg.filename;
  1925. leak.orig_filename = list->dbg.orig_filename;
  1926. leak.lineno = list->dbg.lineno;
  1927. leak.orig_lineno = list->dbg.orig_lineno;
  1928. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1929. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1930. repeated = zend_mm_find_leaks_huge(heap, list);
  1931. total += 1 + repeated;
  1932. if (repeated) {
  1933. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1934. }
  1935. heap->huge_list = list = list->next;
  1936. zend_mm_chunk_free(heap, q->ptr, q->size);
  1937. zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
  1938. }
  1939. /* for each chunk */
  1940. p = heap->main_chunk;
  1941. do {
  1942. i = ZEND_MM_FIRST_PAGE;
  1943. while (i < p->free_tail) {
  1944. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1945. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1946. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1947. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1948. j = 0;
  1949. while (j < bin_elements[bin_num]) {
  1950. if (dbg->size != 0) {
  1951. leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
  1952. leak.size = dbg->size;
  1953. leak.filename = dbg->filename;
  1954. leak.orig_filename = dbg->orig_filename;
  1955. leak.lineno = dbg->lineno;
  1956. leak.orig_lineno = dbg->orig_lineno;
  1957. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1958. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1959. dbg->size = 0;
  1960. dbg->filename = NULL;
  1961. dbg->lineno = 0;
  1962. repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
  1963. zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
  1964. total += 1 + repeated;
  1965. if (repeated) {
  1966. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1967. }
  1968. }
  1969. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  1970. j++;
  1971. }
  1972. i += bin_pages[bin_num];
  1973. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  1974. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  1975. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1976. leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
  1977. leak.size = dbg->size;
  1978. leak.filename = dbg->filename;
  1979. leak.orig_filename = dbg->orig_filename;
  1980. leak.lineno = dbg->lineno;
  1981. leak.orig_lineno = dbg->orig_lineno;
  1982. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1983. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1984. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  1985. repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
  1986. total += 1 + repeated;
  1987. if (repeated) {
  1988. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1989. }
  1990. i += pages_count;
  1991. }
  1992. } else {
  1993. i++;
  1994. }
  1995. }
  1996. p = p->next;
  1997. } while (p != heap->main_chunk);
  1998. if (total) {
  1999. zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
  2000. }
  2001. }
  2002. #endif
  2003. #if ZEND_MM_CUSTOM
  2004. static void *tracked_malloc(size_t size);
  2005. static void tracked_free_all();
  2006. #endif
  2007. void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
  2008. {
  2009. zend_mm_chunk *p;
  2010. zend_mm_huge_list *list;
  2011. #if ZEND_MM_CUSTOM
  2012. if (heap->use_custom_heap) {
  2013. if (heap->custom_heap.std._malloc == tracked_malloc) {
  2014. if (silent) {
  2015. tracked_free_all();
  2016. }
  2017. zend_hash_clean(heap->tracked_allocs);
  2018. if (full) {
  2019. zend_hash_destroy(heap->tracked_allocs);
  2020. free(heap->tracked_allocs);
  2021. /* Make sure the heap free below does not use tracked_free(). */
  2022. heap->custom_heap.std._free = free;
  2023. }
  2024. }
  2025. if (full) {
  2026. if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2027. heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  2028. } else {
  2029. heap->custom_heap.std._free(heap);
  2030. }
  2031. }
  2032. return;
  2033. }
  2034. #endif
  2035. #if ZEND_DEBUG
  2036. if (!silent) {
  2037. zend_mm_check_leaks(heap);
  2038. }
  2039. #endif
  2040. /* free huge blocks */
  2041. list = heap->huge_list;
  2042. heap->huge_list = NULL;
  2043. while (list) {
  2044. zend_mm_huge_list *q = list;
  2045. list = list->next;
  2046. zend_mm_chunk_free(heap, q->ptr, q->size);
  2047. }
  2048. /* move all chunks except of the first one into the cache */
  2049. p = heap->main_chunk->next;
  2050. while (p != heap->main_chunk) {
  2051. zend_mm_chunk *q = p->next;
  2052. p->next = heap->cached_chunks;
  2053. heap->cached_chunks = p;
  2054. p = q;
  2055. heap->chunks_count--;
  2056. heap->cached_chunks_count++;
  2057. }
  2058. if (full) {
  2059. /* free all cached chunks */
  2060. while (heap->cached_chunks) {
  2061. p = heap->cached_chunks;
  2062. heap->cached_chunks = p->next;
  2063. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2064. }
  2065. /* free the first chunk */
  2066. zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
  2067. } else {
  2068. /* free some cached chunks to keep average count */
  2069. heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
  2070. while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
  2071. heap->cached_chunks) {
  2072. p = heap->cached_chunks;
  2073. heap->cached_chunks = p->next;
  2074. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2075. heap->cached_chunks_count--;
  2076. }
  2077. /* clear cached chunks */
  2078. p = heap->cached_chunks;
  2079. while (p != NULL) {
  2080. zend_mm_chunk *q = p->next;
  2081. memset(p, 0, sizeof(zend_mm_chunk));
  2082. p->next = q;
  2083. p = q;
  2084. }
  2085. /* reinitialize the first chunk and heap */
  2086. p = heap->main_chunk;
  2087. p->heap = &p->heap_slot;
  2088. p->next = p;
  2089. p->prev = p;
  2090. p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2091. p->free_tail = ZEND_MM_FIRST_PAGE;
  2092. p->num = 0;
  2093. #if ZEND_MM_STAT
  2094. heap->size = heap->peak = 0;
  2095. #endif
  2096. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2097. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2098. heap->real_size = ZEND_MM_CHUNK_SIZE;
  2099. #endif
  2100. #if ZEND_MM_STAT
  2101. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  2102. #endif
  2103. heap->chunks_count = 1;
  2104. heap->peak_chunks_count = 1;
  2105. heap->last_chunks_delete_boundary = 0;
  2106. heap->last_chunks_delete_count = 0;
  2107. memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
  2108. p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  2109. p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2110. }
  2111. }
  2112. /**************/
  2113. /* PUBLIC API */
  2114. /**************/
  2115. ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2116. {
  2117. return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2118. }
  2119. ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2120. {
  2121. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2122. }
  2123. void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2124. {
  2125. return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2126. }
  2127. void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2128. {
  2129. return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2130. }
  2131. ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2132. {
  2133. return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2134. }
  2135. /**********************/
  2136. /* Allocation Manager */
  2137. /**********************/
  2138. typedef struct _zend_alloc_globals {
  2139. zend_mm_heap *mm_heap;
  2140. } zend_alloc_globals;
  2141. #ifdef ZTS
  2142. static int alloc_globals_id;
  2143. static size_t alloc_globals_offset;
  2144. # define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
  2145. #else
  2146. # define AG(v) (alloc_globals.v)
  2147. static zend_alloc_globals alloc_globals;
  2148. #endif
  2149. ZEND_API int is_zend_mm(void)
  2150. {
  2151. #if ZEND_MM_CUSTOM
  2152. return !AG(mm_heap)->use_custom_heap;
  2153. #else
  2154. return 1;
  2155. #endif
  2156. }
  2157. ZEND_API int is_zend_ptr(const void *ptr)
  2158. {
  2159. #if ZEND_MM_CUSTOM
  2160. if (AG(mm_heap)->use_custom_heap) {
  2161. return 0;
  2162. }
  2163. #endif
  2164. if (AG(mm_heap)->main_chunk) {
  2165. zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
  2166. do {
  2167. if (ptr >= (void*)chunk
  2168. && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
  2169. return 1;
  2170. }
  2171. chunk = chunk->next;
  2172. } while (chunk != AG(mm_heap)->main_chunk);
  2173. }
  2174. if (AG(mm_heap)->huge_list) {
  2175. zend_mm_huge_list *block = AG(mm_heap)->huge_list;
  2176. do {
  2177. if (ptr >= (void*)block
  2178. && ptr < (void*)((char*)block + block->size)) {
  2179. return 1;
  2180. }
  2181. block = block->next;
  2182. } while (block != AG(mm_heap)->huge_list);
  2183. }
  2184. return 0;
  2185. }
  2186. #if ZEND_MM_CUSTOM
  2187. static ZEND_COLD void* ZEND_FASTCALL _malloc_custom(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2188. {
  2189. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2190. return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2191. } else {
  2192. return AG(mm_heap)->custom_heap.std._malloc(size);
  2193. }
  2194. }
  2195. static ZEND_COLD void ZEND_FASTCALL _efree_custom(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2196. {
  2197. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2198. AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2199. } else {
  2200. AG(mm_heap)->custom_heap.std._free(ptr);
  2201. }
  2202. }
  2203. static ZEND_COLD void* ZEND_FASTCALL _realloc_custom(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2204. {
  2205. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2206. return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2207. } else {
  2208. return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
  2209. }
  2210. }
  2211. #endif
  2212. #if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
  2213. #undef _emalloc
  2214. #if ZEND_MM_CUSTOM
  2215. # define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
  2216. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2217. return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2218. } \
  2219. } while (0)
  2220. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
  2221. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2222. _efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2223. return; \
  2224. } \
  2225. } while (0)
  2226. #else
  2227. # define ZEND_MM_CUSTOM_ALLOCATOR(size)
  2228. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
  2229. #endif
  2230. # define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
  2231. ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
  2232. ZEND_MM_CUSTOM_ALLOCATOR(_size); \
  2233. return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2234. }
  2235. ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
  2236. ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2237. {
  2238. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2239. return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2240. }
  2241. ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
  2242. {
  2243. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2244. return zend_mm_alloc_huge(AG(mm_heap), size);
  2245. }
  2246. #if ZEND_DEBUG
  2247. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2248. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2249. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2250. { \
  2251. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
  2252. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2253. int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
  2254. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2255. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
  2256. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
  2257. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2258. } \
  2259. }
  2260. #else
  2261. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2262. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2263. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2264. { \
  2265. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2266. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2267. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2268. } \
  2269. }
  2270. #endif
  2271. ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
  2272. ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
  2273. {
  2274. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2275. {
  2276. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  2277. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  2278. int page_num = page_offset / ZEND_MM_PAGE_SIZE;
  2279. uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
  2280. ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  2281. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
  2282. ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
  2283. zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
  2284. }
  2285. }
  2286. ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
  2287. {
  2288. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2289. zend_mm_free_huge(AG(mm_heap), ptr);
  2290. }
  2291. #endif
  2292. ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2293. {
  2294. #if ZEND_MM_CUSTOM
  2295. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2296. return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2297. }
  2298. #endif
  2299. return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2300. }
  2301. ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2302. {
  2303. #if ZEND_MM_CUSTOM
  2304. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2305. _efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2306. return;
  2307. }
  2308. #endif
  2309. zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2310. }
  2311. ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2312. {
  2313. #if ZEND_MM_CUSTOM
  2314. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2315. return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2316. }
  2317. #endif
  2318. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2319. }
  2320. ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2321. {
  2322. #if ZEND_MM_CUSTOM
  2323. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2324. return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2325. }
  2326. #endif
  2327. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2328. }
  2329. ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2330. {
  2331. #if ZEND_MM_CUSTOM
  2332. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2333. return 0;
  2334. }
  2335. #endif
  2336. return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2337. }
  2338. ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2339. {
  2340. return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2341. }
  2342. ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
  2343. {
  2344. return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
  2345. }
  2346. ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2347. {
  2348. return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2349. }
  2350. ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
  2351. {
  2352. return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
  2353. }
  2354. ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2355. {
  2356. void *p;
  2357. size = zend_safe_address_guarded(nmemb, size, 0);
  2358. p = _emalloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2359. memset(p, 0, size);
  2360. return p;
  2361. }
  2362. ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2363. {
  2364. size_t length;
  2365. char *p;
  2366. length = strlen(s);
  2367. if (UNEXPECTED(length + 1 == 0)) {
  2368. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2369. }
  2370. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2371. memcpy(p, s, length+1);
  2372. return p;
  2373. }
  2374. ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2375. {
  2376. char *p;
  2377. if (UNEXPECTED(length + 1 == 0)) {
  2378. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2379. }
  2380. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2381. memcpy(p, s, length);
  2382. p[length] = 0;
  2383. return p;
  2384. }
  2385. ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
  2386. {
  2387. char *p;
  2388. if (UNEXPECTED(length + 1 == 0)) {
  2389. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2390. }
  2391. p = (char *) malloc(length + 1);
  2392. if (UNEXPECTED(p == NULL)) {
  2393. return p;
  2394. }
  2395. if (EXPECTED(length)) {
  2396. memcpy(p, s, length);
  2397. }
  2398. p[length] = 0;
  2399. return p;
  2400. }
  2401. ZEND_API int zend_set_memory_limit(size_t memory_limit)
  2402. {
  2403. #if ZEND_MM_LIMIT
  2404. AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
  2405. #endif
  2406. return SUCCESS;
  2407. }
  2408. ZEND_API size_t zend_memory_usage(int real_usage)
  2409. {
  2410. #if ZEND_MM_STAT
  2411. if (real_usage) {
  2412. return AG(mm_heap)->real_size;
  2413. } else {
  2414. size_t usage = AG(mm_heap)->size;
  2415. return usage;
  2416. }
  2417. #endif
  2418. return 0;
  2419. }
  2420. ZEND_API size_t zend_memory_peak_usage(int real_usage)
  2421. {
  2422. #if ZEND_MM_STAT
  2423. if (real_usage) {
  2424. return AG(mm_heap)->real_peak;
  2425. } else {
  2426. return AG(mm_heap)->peak;
  2427. }
  2428. #endif
  2429. return 0;
  2430. }
  2431. ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
  2432. {
  2433. zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
  2434. }
  2435. #if ZEND_MM_CUSTOM
  2436. static void *tracked_malloc(size_t size)
  2437. {
  2438. zend_mm_heap *heap = AG(mm_heap);
  2439. if (size > heap->limit) {
  2440. #if ZEND_DEBUG
  2441. zend_mm_safe_error(heap,
  2442. "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
  2443. heap->limit, "file", 0, size);
  2444. #else
  2445. zend_mm_safe_error(heap,
  2446. "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
  2447. heap->limit, size);
  2448. #endif
  2449. }
  2450. void *ptr = __zend_malloc(size);
  2451. zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2452. ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
  2453. zend_hash_index_add_empty_element(heap->tracked_allocs, h);
  2454. return ptr;
  2455. }
  2456. static void tracked_free(void *ptr) {
  2457. zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2458. zend_hash_index_del(AG(mm_heap)->tracked_allocs, h);
  2459. free(ptr);
  2460. }
  2461. static void *tracked_realloc(void *ptr, size_t new_size) {
  2462. zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2463. zend_hash_index_del(AG(mm_heap)->tracked_allocs, h);
  2464. ptr = __zend_realloc(ptr, new_size);
  2465. h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2466. ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
  2467. zend_hash_index_add_empty_element(AG(mm_heap)->tracked_allocs, h);
  2468. return ptr;
  2469. }
  2470. static void tracked_free_all() {
  2471. HashTable *tracked_allocs = AG(mm_heap)->tracked_allocs;
  2472. zend_ulong h;
  2473. ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
  2474. void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
  2475. free(ptr);
  2476. } ZEND_HASH_FOREACH_END();
  2477. }
  2478. #endif
  2479. static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
  2480. {
  2481. char *tmp;
  2482. #if ZEND_MM_CUSTOM
  2483. tmp = getenv("USE_ZEND_ALLOC");
  2484. if (tmp && !zend_atoi(tmp, 0)) {
  2485. zend_bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && zend_atoi(tmp, 0);
  2486. zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
  2487. memset(mm_heap, 0, sizeof(zend_mm_heap));
  2488. mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2489. mm_heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
  2490. mm_heap->overflow = 0;
  2491. if (!tracked) {
  2492. /* Use system allocator. */
  2493. mm_heap->custom_heap.std._malloc = __zend_malloc;
  2494. mm_heap->custom_heap.std._free = free;
  2495. mm_heap->custom_heap.std._realloc = __zend_realloc;
  2496. } else {
  2497. /* Use system allocator and track allocations for auto-free. */
  2498. mm_heap->custom_heap.std._malloc = tracked_malloc;
  2499. mm_heap->custom_heap.std._free = tracked_free;
  2500. mm_heap->custom_heap.std._realloc = tracked_realloc;
  2501. mm_heap->tracked_allocs = malloc(sizeof(HashTable));
  2502. zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
  2503. }
  2504. return;
  2505. }
  2506. #endif
  2507. tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
  2508. if (tmp && zend_atoi(tmp, 0)) {
  2509. zend_mm_use_huge_pages = 1;
  2510. }
  2511. alloc_globals->mm_heap = zend_mm_init();
  2512. }
  2513. #ifdef ZTS
  2514. static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
  2515. {
  2516. zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
  2517. }
  2518. #endif
  2519. ZEND_API void start_memory_manager(void)
  2520. {
  2521. #ifdef ZTS
  2522. ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
  2523. #else
  2524. alloc_globals_ctor(&alloc_globals);
  2525. #endif
  2526. #ifndef _WIN32
  2527. # if defined(_SC_PAGESIZE)
  2528. REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
  2529. # elif defined(_SC_PAGE_SIZE)
  2530. REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
  2531. # endif
  2532. #endif
  2533. }
  2534. ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
  2535. {
  2536. zend_mm_heap *old_heap;
  2537. old_heap = AG(mm_heap);
  2538. AG(mm_heap) = (zend_mm_heap*)new_heap;
  2539. return (zend_mm_heap*)old_heap;
  2540. }
  2541. ZEND_API zend_mm_heap *zend_mm_get_heap(void)
  2542. {
  2543. return AG(mm_heap);
  2544. }
  2545. ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
  2546. {
  2547. #if ZEND_MM_CUSTOM
  2548. return AG(mm_heap)->use_custom_heap;
  2549. #else
  2550. return 0;
  2551. #endif
  2552. }
  2553. ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
  2554. void* (*_malloc)(size_t),
  2555. void (*_free)(void*),
  2556. void* (*_realloc)(void*, size_t))
  2557. {
  2558. #if ZEND_MM_CUSTOM
  2559. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2560. if (!_malloc && !_free && !_realloc) {
  2561. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  2562. } else {
  2563. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2564. _heap->custom_heap.std._malloc = _malloc;
  2565. _heap->custom_heap.std._free = _free;
  2566. _heap->custom_heap.std._realloc = _realloc;
  2567. }
  2568. #endif
  2569. }
  2570. ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
  2571. void* (**_malloc)(size_t),
  2572. void (**_free)(void*),
  2573. void* (**_realloc)(void*, size_t))
  2574. {
  2575. #if ZEND_MM_CUSTOM
  2576. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2577. if (heap->use_custom_heap) {
  2578. *_malloc = _heap->custom_heap.std._malloc;
  2579. *_free = _heap->custom_heap.std._free;
  2580. *_realloc = _heap->custom_heap.std._realloc;
  2581. } else {
  2582. *_malloc = NULL;
  2583. *_free = NULL;
  2584. *_realloc = NULL;
  2585. }
  2586. #else
  2587. *_malloc = NULL;
  2588. *_free = NULL;
  2589. *_realloc = NULL;
  2590. #endif
  2591. }
  2592. #if ZEND_DEBUG
  2593. ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
  2594. void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2595. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2596. void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
  2597. {
  2598. #if ZEND_MM_CUSTOM
  2599. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2600. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
  2601. _heap->custom_heap.debug._malloc = _malloc;
  2602. _heap->custom_heap.debug._free = _free;
  2603. _heap->custom_heap.debug._realloc = _realloc;
  2604. #endif
  2605. }
  2606. #endif
  2607. ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
  2608. {
  2609. #if ZEND_MM_STORAGE
  2610. return heap->storage;
  2611. #else
  2612. return NULL
  2613. #endif
  2614. }
  2615. ZEND_API zend_mm_heap *zend_mm_startup(void)
  2616. {
  2617. return zend_mm_init();
  2618. }
  2619. ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
  2620. {
  2621. #if ZEND_MM_STORAGE
  2622. zend_mm_storage tmp_storage, *storage;
  2623. zend_mm_chunk *chunk;
  2624. zend_mm_heap *heap;
  2625. memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
  2626. tmp_storage.data = data;
  2627. chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  2628. if (UNEXPECTED(chunk == NULL)) {
  2629. #if ZEND_MM_ERROR
  2630. #ifdef _WIN32
  2631. stderr_last_error("Can't initialize heap");
  2632. #else
  2633. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2634. #endif
  2635. #endif
  2636. return NULL;
  2637. }
  2638. heap = &chunk->heap_slot;
  2639. chunk->heap = heap;
  2640. chunk->next = chunk;
  2641. chunk->prev = chunk;
  2642. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2643. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  2644. chunk->num = 0;
  2645. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  2646. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2647. heap->main_chunk = chunk;
  2648. heap->cached_chunks = NULL;
  2649. heap->chunks_count = 1;
  2650. heap->peak_chunks_count = 1;
  2651. heap->cached_chunks_count = 0;
  2652. heap->avg_chunks_count = 1.0;
  2653. heap->last_chunks_delete_boundary = 0;
  2654. heap->last_chunks_delete_count = 0;
  2655. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2656. heap->real_size = ZEND_MM_CHUNK_SIZE;
  2657. #endif
  2658. #if ZEND_MM_STAT
  2659. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  2660. heap->size = 0;
  2661. heap->peak = 0;
  2662. #endif
  2663. #if ZEND_MM_LIMIT
  2664. heap->limit = (Z_L(-1) >> Z_L(1));
  2665. heap->overflow = 0;
  2666. #endif
  2667. #if ZEND_MM_CUSTOM
  2668. heap->use_custom_heap = 0;
  2669. #endif
  2670. heap->storage = &tmp_storage;
  2671. heap->huge_list = NULL;
  2672. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2673. storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
  2674. if (!storage) {
  2675. handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
  2676. #if ZEND_MM_ERROR
  2677. #ifdef _WIN32
  2678. stderr_last_error("Can't initialize heap");
  2679. #else
  2680. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2681. #endif
  2682. #endif
  2683. return NULL;
  2684. }
  2685. memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
  2686. if (data) {
  2687. storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
  2688. memcpy(storage->data, data, data_size);
  2689. }
  2690. heap->storage = storage;
  2691. return heap;
  2692. #else
  2693. return NULL;
  2694. #endif
  2695. }
  2696. static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
  2697. {
  2698. fprintf(stderr, "Out of memory\n");
  2699. exit(1);
  2700. }
  2701. ZEND_API void * __zend_malloc(size_t len)
  2702. {
  2703. void *tmp = malloc(len);
  2704. if (EXPECTED(tmp || !len)) {
  2705. return tmp;
  2706. }
  2707. zend_out_of_memory();
  2708. }
  2709. ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
  2710. {
  2711. void *tmp;
  2712. len = zend_safe_address_guarded(nmemb, len, 0);
  2713. tmp = __zend_malloc(len);
  2714. memset(tmp, 0, len);
  2715. return tmp;
  2716. }
  2717. ZEND_API void * __zend_realloc(void *p, size_t len)
  2718. {
  2719. p = realloc(p, len);
  2720. if (EXPECTED(p || !len)) {
  2721. return p;
  2722. }
  2723. zend_out_of_memory();
  2724. }
  2725. #ifdef ZTS
  2726. size_t zend_mm_globals_size(void)
  2727. {
  2728. return sizeof(zend_alloc_globals);
  2729. }
  2730. #endif