/src/memory/tlsf/tlsf.c

https://bitbucket.org/vivkin/gam3b00bs/ · C · 938 lines · 622 code · 145 blank · 171 comment · 92 complexity · 2fa059899d3c3d15b48e05666beceb97 MD5 · raw file

  1. #include <assert.h>
  2. #include <limits.h>
  3. #include <stddef.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include "tlsf.h"
  8. #include "tlsfbits.h"
  9. #pragma warning(push)
  10. #pragma warning(disable : 4127)
  11. /*
  12. ** Constants.
  13. */
  14. /* Public constants: may be modified. */
  15. enum tlsf_public
  16. {
  17. /* log2 of number of linear subdivisions of block sizes. */
  18. SL_INDEX_COUNT_LOG2 = 5,
  19. };
  20. /* Private constants: do not modify. */
  21. enum tlsf_private
  22. {
  23. /* All allocation sizes and addresses are aligned to 4 bytes. */
  24. ALIGN_SIZE_LOG2 = 2,
  25. ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
  26. /*
  27. ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
  28. ** However, because we linearly subdivide the second-level lists, and
  29. ** our minimum size granularity is 4 bytes, it doesn't make sense to
  30. ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
  31. ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
  32. ** trying to split size ranges into more slots than we have available.
  33. ** Instead, we calculate the minimum threshold size, and place all
  34. ** blocks below that size into the 0th first-level list.
  35. */
  36. FL_INDEX_MAX = 30,
  37. SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
  38. FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
  39. FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
  40. SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
  41. };
  42. /*
  43. ** Cast and min/max macros.
  44. */
  45. #define tlsf_cast(t, exp) ((t) (exp))
  46. #define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
  47. #define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
  48. /*
  49. ** Set assert macro, if it has not been provided by the user.
  50. */
  51. #if !defined (tlsf_assert)
  52. #define tlsf_assert assert
  53. #endif
  54. /*
  55. ** Static assertion mechanism.
  56. */
  57. #define _tlsf_glue2(x, y) x ## y
  58. #define _tlsf_glue(x, y) _tlsf_glue2(x, y)
  59. #define tlsf_static_assert(exp) \
  60. typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
  61. /* FIXME: This code only currently supports 32-bit architectures. */
  62. tlsf_static_assert(sizeof(size_t) * CHAR_BIT == 32);
  63. /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
  64. tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
  65. /* sizeof fl_bitmap must be >= FL_INDEX_COUNT. */
  66. tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= FL_INDEX_COUNT);
  67. /* Ensure we've properly tuned our sizes. */
  68. tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
  69. /*
  70. ** Data structures and associated constants.
  71. */
  72. /*
  73. ** Block header structure.
  74. **
  75. ** There are several implementation subtleties involved:
  76. ** - The prev_phys_block field is only valid if the previous block is free.
  77. ** - The prev_phys_block field is actually stored in the last word of the
  78. ** previous block. It appears at the beginning of this structure only to
  79. ** simplify the implementation.
  80. ** - The next_free / prev_free fields are only valid if the block is free.
  81. */
  82. typedef struct block_header_t
  83. {
  84. /* Points to the previous physical block. */
  85. struct block_header_t* prev_phys_block;
  86. /* The size of this block, excluding the block header. */
  87. size_t size;
  88. /* Next and previous free blocks. */
  89. struct block_header_t* next_free;
  90. struct block_header_t* prev_free;
  91. } block_header_t;
  92. /*
  93. ** Since block sizes are always a multiple of 4, the two least significant
  94. ** bits of the size field are used to store the block status:
  95. ** - bit 0: whether block is busy or free
  96. ** - bit 1: whether previous block is busy or free
  97. */
  98. static const size_t block_header_free_bit = 1 << 0;
  99. static const size_t block_header_prev_free_bit = 1 << 1;
  100. /*
  101. ** The size of the block header exposed to used blocks is the size field.
  102. ** The prev_phys_block field is stored *inside* the previous free block.
  103. */
  104. static const size_t block_header_overhead = sizeof(size_t);
  105. /* User data starts directly after the size field in a used block. */
  106. static const size_t block_start_offset =
  107. offsetof(block_header_t, size) + sizeof(size_t);
  108. /*
  109. ** A free block must be large enough to store its header minus the size of
  110. ** the prev_phys_block field, and no larger than the number of addressable
  111. ** bits for FL_INDEX.
  112. */
  113. static const size_t block_size_min =
  114. sizeof(block_header_t) - sizeof(block_header_t*);
  115. static const size_t block_size_max = 1 << FL_INDEX_MAX;
  116. /* Empty lists point at this block to indicate they are free. */
  117. static block_header_t block_null;
  118. /* The TLSF pool structure. */
  119. typedef struct pool_t
  120. {
  121. /* Bitmaps for free lists. */
  122. unsigned int fl_bitmap;
  123. unsigned int sl_bitmap[FL_INDEX_COUNT];
  124. /* Head of free lists. */
  125. block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
  126. /* Memory statistics */
  127. size_t pool_size;
  128. size_t pool_free_size;
  129. } pool_t;
  130. /* A type used for casting when doing pointer arithmetic. */
  131. typedef unsigned int tlsfptr_t;
  132. /*
  133. ** block_header_t member functions.
  134. */
  135. static size_t block_size(const block_header_t* block)
  136. {
  137. return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
  138. }
  139. static void block_set_size(block_header_t* block, size_t size)
  140. {
  141. const size_t oldsize = block->size;
  142. block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
  143. }
  144. static int block_is_last(const block_header_t* block)
  145. {
  146. return 0 == block_size(block);
  147. }
  148. static int block_is_free(const block_header_t* block)
  149. {
  150. return block->size & block_header_free_bit;
  151. }
  152. static void block_set_free(block_header_t* block)
  153. {
  154. block->size |= block_header_free_bit;
  155. }
  156. static void block_set_used(block_header_t* block)
  157. {
  158. block->size &= ~block_header_free_bit;
  159. }
  160. static int block_is_prev_free(const block_header_t* block)
  161. {
  162. return block->size & block_header_prev_free_bit;
  163. }
  164. static void block_set_prev_free(block_header_t* block)
  165. {
  166. block->size |= block_header_prev_free_bit;
  167. }
  168. static void block_set_prev_used(block_header_t* block)
  169. {
  170. block->size &= ~block_header_prev_free_bit;
  171. }
  172. static block_header_t* block_from_ptr(const void* ptr)
  173. {
  174. return tlsf_cast(block_header_t*,
  175. tlsf_cast(unsigned char*, ptr) - block_start_offset);
  176. }
  177. static void* block_to_ptr(const block_header_t* block)
  178. {
  179. return tlsf_cast(void*,
  180. tlsf_cast(unsigned char*, block) + block_start_offset);
  181. }
  182. /* Return location of next block after block of given size. */
  183. static block_header_t* offset_to_block(const void* ptr, size_t size)
  184. {
  185. return tlsf_cast(block_header_t*,
  186. tlsf_cast(unsigned char*, ptr) + size);
  187. }
  188. /* Return location of previous block. */
  189. static block_header_t* block_prev(const block_header_t* block)
  190. {
  191. return block->prev_phys_block;
  192. }
  193. /* Return location of next existing block. */
  194. static block_header_t* block_next(const block_header_t* block)
  195. {
  196. block_header_t* next = offset_to_block(block_to_ptr(block),
  197. block_size(block) - block_header_overhead);
  198. tlsf_assert(!block_is_last(block));
  199. return next;
  200. }
  201. /* Link a new block with its physical neighbor, return the neighbor. */
  202. static block_header_t* block_link_next(block_header_t* block)
  203. {
  204. block_header_t* next = block_next(block);
  205. next->prev_phys_block = block;
  206. return next;
  207. }
  208. static void block_mark_as_free(block_header_t* block)
  209. {
  210. /* Link the block to the next block, first. */
  211. block_header_t* next = block_link_next(block);
  212. block_set_prev_free(next);
  213. block_set_free(block);
  214. }
  215. static void block_mark_as_used(block_header_t* block)
  216. {
  217. block_header_t* next = block_next(block);
  218. block_set_prev_used(next);
  219. block_set_used(block);
  220. }
  221. static size_t align_up(size_t x, size_t align)
  222. {
  223. tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
  224. return (x + (align - 1)) & ~(align - 1);
  225. }
  226. static size_t align_down(size_t x, size_t align)
  227. {
  228. tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
  229. return x - (x & (align - 1));
  230. }
  231. static void* align_ptr(void* ptr, size_t align)
  232. {
  233. const tlsfptr_t aligned =
  234. (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
  235. tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
  236. return tlsf_cast(void*, aligned);
  237. }
  238. /*
  239. ** Adjust an allocation size to be aligned to word size, and no smaller
  240. ** than internal minimum.
  241. */
  242. static size_t adjust_request_size(size_t size, size_t align)
  243. {
  244. size_t adjust = 0;
  245. if (size && size < block_size_max)
  246. {
  247. const size_t aligned = align_up(size, align);
  248. adjust = tlsf_max(aligned, block_size_min);
  249. }
  250. return adjust;
  251. }
  252. /*
  253. ** TLSF utility functions. In most cases, these are direct translations of
  254. ** the documentation found in the white paper.
  255. */
  256. static void mapping_insert(size_t size, int* fli, int* sli)
  257. {
  258. int fl, sl;
  259. if (size < SMALL_BLOCK_SIZE)
  260. {
  261. /* Store small blocks in first list. */
  262. fl = 0;
  263. sl = size / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
  264. }
  265. else
  266. {
  267. fl = tlsf_fls(size);
  268. sl = (size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
  269. fl -= (FL_INDEX_SHIFT - 1);
  270. }
  271. *fli = fl;
  272. *sli = sl;
  273. }
  274. /* This version rounds up to the next block size (for allocations) */
  275. static void mapping_search(size_t size, int* fli, int* sli)
  276. {
  277. if (size >= (1 << SL_INDEX_COUNT_LOG2))
  278. {
  279. const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
  280. size += round;
  281. }
  282. mapping_insert(size, fli, sli);
  283. }
  284. static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli)
  285. {
  286. int fl = *fli;
  287. int sl = *sli;
  288. /*
  289. ** First, search for a block in the list associated with the given
  290. ** fl/sl index.
  291. */
  292. unsigned int sl_map = pool->sl_bitmap[fl] & (0xffffffff << sl);
  293. if (!sl_map)
  294. {
  295. /* No block exists. Search in the next largest first-level list. */
  296. const unsigned int fl_map = pool->fl_bitmap & (0xffffffff << (fl + 1));
  297. if (!fl_map)
  298. {
  299. /* No free blocks available, memory has been exhausted. */
  300. return 0;
  301. }
  302. fl = tlsf_ffs(fl_map);
  303. *fli = fl;
  304. sl_map = pool->sl_bitmap[fl];
  305. }
  306. tlsf_assert(sl_map && "internal error - second level bitmap is null");
  307. sl = tlsf_ffs(sl_map);
  308. *sli = sl;
  309. /* Return the first block in the free list. */
  310. return pool->blocks[fl][sl];
  311. }
  312. /* Remove a free block from the free list.*/
  313. static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
  314. {
  315. block_header_t* prev = block->prev_free;
  316. block_header_t* next = block->next_free;
  317. tlsf_assert(prev && "prev_free field can not be null");
  318. tlsf_assert(next && "next_free field can not be null");
  319. next->prev_free = prev;
  320. prev->next_free = next;
  321. /* If this block is the head of the free list, set new head. */
  322. if (pool->blocks[fl][sl] == block)
  323. {
  324. pool->blocks[fl][sl] = next;
  325. /* If the new head is null, clear the bitmap. */
  326. if (next == &block_null)
  327. {
  328. pool->sl_bitmap[fl] &= ~(1 << sl);
  329. /* If the second bitmap is now empty, clear the fl bitmap. */
  330. if (!pool->sl_bitmap[fl])
  331. {
  332. pool->fl_bitmap &= ~(1 << fl);
  333. }
  334. }
  335. }
  336. }
  337. /* Insert a free block into the free block list. */
  338. static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
  339. {
  340. block_header_t* current = pool->blocks[fl][sl];
  341. tlsf_assert(current && "free list cannot have a null entry");
  342. tlsf_assert(block && "cannot insert a null entry into the free list");
  343. block->next_free = current;
  344. block->prev_free = &block_null;
  345. current->prev_free = block;
  346. /*
  347. ** Insert the new block at the head of the list, and mark the first-
  348. ** and second-level bitmaps appropriately.
  349. */
  350. pool->blocks[fl][sl] = block;
  351. pool->fl_bitmap |= (1 << fl);
  352. pool->sl_bitmap[fl] |= (1 << sl);
  353. }
  354. /* Remove a given block from the free list. */
  355. static void block_remove(pool_t* pool, block_header_t* block)
  356. {
  357. int fl, sl;
  358. mapping_insert(block_size(block), &fl, &sl);
  359. remove_free_block(pool, block, fl, sl);
  360. }
  361. /* Insert a given block into the free list. */
  362. static void block_insert(pool_t* pool, block_header_t* block)
  363. {
  364. int fl, sl;
  365. mapping_insert(block_size(block), &fl, &sl);
  366. insert_free_block(pool, block, fl, sl);
  367. }
  368. static int block_can_split(block_header_t* block, size_t size)
  369. {
  370. return block_size(block) >= sizeof(block_header_t) + size;
  371. }
  372. /* Split a block into two, the second of which is free. */
  373. static block_header_t* block_split(block_header_t* block, size_t size)
  374. {
  375. /* Calculate the amount of space left in the remaining block. */
  376. block_header_t* remaining =
  377. offset_to_block(block_to_ptr(block), size - block_header_overhead);
  378. const size_t remain_size = block_size(block) - (size + block_header_overhead);
  379. tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
  380. block_set_size(remaining, remain_size);
  381. tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
  382. block_set_size(block, size);
  383. block_mark_as_free(remaining);
  384. return remaining;
  385. }
  386. /* Absorb a free block's storage into an adjacent previous free block. */
  387. static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
  388. {
  389. tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
  390. /* Note: Leaves flags untouched. */
  391. prev->size += block_size(block) + block_header_overhead;
  392. block_link_next(prev);
  393. return prev;
  394. }
  395. /* Merge a just-freed block with an adjacent previous free block. */
  396. static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block)
  397. {
  398. if (block_is_prev_free(block))
  399. {
  400. block_header_t* prev = block_prev(block);
  401. tlsf_assert(prev && "prev physical block can't be null");
  402. tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
  403. block_remove(pool, prev);
  404. block = block_absorb(prev, block);
  405. }
  406. return block;
  407. }
  408. /* Merge a just-freed block with an adjacent free block. */
  409. static block_header_t* block_merge_next(pool_t* pool, block_header_t* block)
  410. {
  411. block_header_t* next = block_next(block);
  412. tlsf_assert(next && "next physical block can't be null");
  413. if (block_is_free(next))
  414. {
  415. tlsf_assert(!block_is_last(block) && "previous block can't be last!");
  416. block_remove(pool, next);
  417. block = block_absorb(block, next);
  418. }
  419. return block;
  420. }
  421. /* Trim any trailing block space off the end of a block, return to pool. */
  422. static void block_trim_free(pool_t* pool, block_header_t* block, size_t size)
  423. {
  424. tlsf_assert(block_is_free(block) && "block must be free");
  425. if (block_can_split(block, size))
  426. {
  427. block_header_t* remaining_block = block_split(block, size);
  428. block_link_next(block);
  429. block_set_prev_free(remaining_block);
  430. block_insert(pool, remaining_block);
  431. }
  432. }
  433. /* Trim any trailing block space off the end of a used block, return to pool. */
  434. static void block_trim_used(pool_t* pool, block_header_t* block, size_t size)
  435. {
  436. tlsf_assert(!block_is_free(block) && "block must be used");
  437. if (block_can_split(block, size))
  438. {
  439. /* If the next block is free, we must coalesce. */
  440. block_header_t* remaining_block = block_split(block, size);
  441. block_set_prev_used(remaining_block);
  442. remaining_block = block_merge_next(pool, remaining_block);
  443. block_insert(pool, remaining_block);
  444. }
  445. }
  446. static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size)
  447. {
  448. block_header_t* remaining_block = block;
  449. if (block_can_split(block, size))
  450. {
  451. /* We want the 2nd block. */
  452. remaining_block = block_split(block, size - block_header_overhead);
  453. block_set_prev_free(remaining_block);
  454. block_link_next(block);
  455. block_insert(pool, block);
  456. }
  457. return remaining_block;
  458. }
  459. static block_header_t* block_locate_free(pool_t* pool, size_t size)
  460. {
  461. int fl = 0;
  462. int sl = 0;
  463. block_header_t* block = 0;
  464. if (size)
  465. {
  466. mapping_search(size, &fl, &sl);
  467. block = search_suitable_block(pool, &fl, &sl);
  468. }
  469. if (block)
  470. {
  471. tlsf_assert(block_size(block) >= size);
  472. remove_free_block(pool, block, fl, sl);
  473. }
  474. return block;
  475. }
  476. static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size)
  477. {
  478. void* p = 0;
  479. if (block)
  480. {
  481. block_trim_free(pool, block, size);
  482. block_mark_as_used(block);
  483. pool->pool_free_size -= block_size(block);
  484. p = block_to_ptr(block);
  485. }
  486. return p;
  487. }
  488. /* Clear structure and point all empty lists at the null block. */
  489. static void pool_construct(pool_t* pool)
  490. {
  491. int i, j;
  492. block_null.next_free = &block_null;
  493. block_null.prev_free = &block_null;
  494. pool->fl_bitmap = 0;
  495. for (i = 0; i < FL_INDEX_COUNT; ++i)
  496. {
  497. pool->sl_bitmap[i] = 0;
  498. for (j = 0; j < SL_INDEX_COUNT; ++j)
  499. {
  500. pool->blocks[i][j] = &block_null;
  501. }
  502. }
  503. pool->pool_size = pool->pool_free_size = 0;
  504. }
  505. /*
  506. ** Debugging utilities.
  507. */
  508. typedef struct integrity_t
  509. {
  510. int prev_status;
  511. int status;
  512. } integrity_t;
  513. #define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
  514. static void integrity_walker(void* ptr, size_t size, int used, void* user)
  515. {
  516. block_header_t* block = block_from_ptr(ptr);
  517. integrity_t* integ = tlsf_cast(integrity_t*, user);
  518. const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
  519. const int this_status = block_is_free(block) ? 1 : 0;
  520. const size_t this_block_size = block_size(block);
  521. int status = 0;
  522. tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
  523. tlsf_insist(size == this_block_size && "block size incorrect");
  524. integ->prev_status = this_status;
  525. integ->status += status;
  526. used = 0;
  527. }
  528. int tlsf_check_heap(tlsf_pool tlsf)
  529. {
  530. int i, j;
  531. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  532. int status = 0;
  533. /* Check that the blocks are physically correct. */
  534. integrity_t integ = { 0, 0 };
  535. tlsf_walk_heap(tlsf, integrity_walker, &integ);
  536. status = integ.status;
  537. /* Check that the free lists and bitmaps are accurate. */
  538. for (i = 0; i < FL_INDEX_COUNT; ++i)
  539. {
  540. for (j = 0; j < SL_INDEX_COUNT; ++j)
  541. {
  542. const int fl_map = pool->fl_bitmap & (1 << i);
  543. const int sl_list = pool->sl_bitmap[i];
  544. const int sl_map = sl_list & (1 << j);
  545. const block_header_t* block = pool->blocks[i][j];
  546. /* Check that first- and second-level lists agree. */
  547. if (!fl_map)
  548. {
  549. tlsf_insist(!sl_map && "second-level map must be null");
  550. }
  551. if (!sl_map)
  552. {
  553. tlsf_insist(block == &block_null && "block list must be null");
  554. continue;
  555. }
  556. /* Check that there is at least one free block. */
  557. tlsf_insist(sl_list && "no free blocks in second-level map");
  558. tlsf_insist(block != &block_null && "block should not be null");
  559. while (block != &block_null)
  560. {
  561. int fli, sli;
  562. tlsf_insist(block_is_free(block) && "block should be free");
  563. tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
  564. tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
  565. tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
  566. tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
  567. mapping_insert(block_size(block), &fli, &sli);
  568. tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
  569. block = block->next_free;
  570. }
  571. }
  572. }
  573. return status;
  574. }
  575. #undef tlsf_insist
  576. static void default_walker(void* ptr, size_t size, int used, void* user)
  577. {
  578. (void)user;
  579. printf("\t%p %s size: %x\n", ptr, used ? "used" : "free", size);
  580. }
  581. void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user)
  582. {
  583. tlsf_walker heap_walker = walker ? walker : default_walker;
  584. block_header_t* block =
  585. offset_to_block(pool, sizeof(pool_t) - block_header_overhead);
  586. while (block && !block_is_last(block))
  587. {
  588. heap_walker(
  589. block_to_ptr(block),
  590. block_size(block),
  591. !block_is_free(block),
  592. user);
  593. block = block_next(block);
  594. }
  595. }
  596. size_t tlsf_block_size(void* ptr)
  597. {
  598. size_t size = 0;
  599. if (ptr)
  600. {
  601. const block_header_t* block = block_from_ptr(ptr);
  602. size = block_size(block);
  603. }
  604. return size;
  605. }
  606. /*
  607. ** Overhead of the TLSF structures in a given memory block passed to
  608. ** tlsf_create, equal to the size of a pool_t plus overhead of the initial
  609. ** free block and the sentinel block.
  610. */
  611. size_t tlsf_overhead()
  612. {
  613. const size_t pool_overhead = sizeof(pool_t) + 2 * block_header_overhead;
  614. return pool_overhead;
  615. }
  616. /*
  617. ** TLSF main interface. Right out of the white paper.
  618. */
  619. tlsf_pool tlsf_create(void* mem, size_t bytes)
  620. {
  621. block_header_t* block;
  622. block_header_t* next;
  623. const size_t pool_overhead = tlsf_overhead();
  624. const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
  625. pool_t* pool = tlsf_cast(pool_t*, mem);
  626. if (pool_bytes < block_size_min || pool_bytes > block_size_max)
  627. {
  628. printf("tlsf_create: Pool size must be between %d and %d bytes.\n",
  629. pool_overhead + block_size_min, pool_overhead + block_size_max);
  630. return 0;
  631. }
  632. /* Construct a valid pool object. */
  633. pool_construct(pool);
  634. /*
  635. ** Create the main free block. Offset the start of the block slightly
  636. ** so that the prev_phys_block field falls inside of the pool
  637. ** structure - it will never be used.
  638. */
  639. block = offset_to_block(
  640. tlsf_cast(void*, pool), sizeof(pool_t) - block_header_overhead);
  641. block_set_size(block, align_down(pool_bytes, ALIGN_SIZE));
  642. block_set_free(block);
  643. block_set_prev_used(block);
  644. block_insert(pool, block);
  645. /* Split the block to create a zero-size pool sentinel block. */
  646. next = block_link_next(block);
  647. block_set_size(next, 0);
  648. block_set_used(next);
  649. block_set_prev_free(next);
  650. pool->pool_size = pool->pool_free_size = pool_bytes;
  651. return tlsf_cast(tlsf_pool, pool);
  652. }
  653. void tlsf_destroy(tlsf_pool pool)
  654. {
  655. /* Nothing to do. */
  656. pool = pool;
  657. }
  658. void* tlsf_malloc(tlsf_pool tlsf, size_t size)
  659. {
  660. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  661. const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
  662. block_header_t* block = block_locate_free(pool, adjust);
  663. return block_prepare_used(pool, block, adjust);
  664. }
  665. void* tlsf_memalign(tlsf_pool tlsf, size_t align, size_t size)
  666. {
  667. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  668. const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
  669. /*
  670. ** We must allocate an additional minimum block size bytes so that if
  671. ** our free block will leave an alignment gap which is smaller, we can
  672. ** trim a leading free block and release it back to the heap. We must
  673. ** do this because the previous physical block is in use, therefore
  674. ** the prev_phys_block field is not valid, and we can't simply adjust
  675. ** the size of that block.
  676. */
  677. const ptrdiff_t gap_minimum = tlsf_cast(ptrdiff_t, sizeof(block_header_t));
  678. const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
  679. /* If alignment is less than or equals base alignment, we're done. */
  680. const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
  681. block_header_t* block = block_locate_free(pool, aligned_size);
  682. /* This can't be a static assert. */
  683. tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
  684. if (block)
  685. {
  686. void* ptr = block_to_ptr(block);
  687. void* aligned = align_ptr(ptr, align);
  688. ptrdiff_t gap = tlsf_cast(ptrdiff_t,
  689. tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
  690. /* If gap size is too small, offset to next aligned boundary. */
  691. if (gap && gap < gap_minimum)
  692. {
  693. const ptrdiff_t gap_remain = gap_minimum - gap;
  694. const ptrdiff_t offset = tlsf_max(gap_remain, tlsf_cast(ptrdiff_t, align));
  695. void* next_aligned = tlsf_cast(void*,
  696. tlsf_cast(tlsfptr_t, aligned) + tlsf_cast(tlsfptr_t, offset));
  697. aligned = align_ptr(next_aligned, align);
  698. gap = tlsf_cast(ptrdiff_t,
  699. tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
  700. }
  701. if (gap)
  702. {
  703. tlsf_assert(gap >= gap_minimum && "gap size too small");
  704. block = block_trim_free_leading(pool, block, gap);
  705. }
  706. }
  707. return block_prepare_used(pool, block, adjust);
  708. }
  709. void tlsf_free(tlsf_pool tlsf, void* ptr)
  710. {
  711. /* Don't attempt to free a NULL pointer. */
  712. if (ptr)
  713. {
  714. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  715. block_header_t* block = block_from_ptr(ptr);
  716. tlsf_assert(block_is_free(block));
  717. pool->pool_free_size += block_size(block);
  718. block_mark_as_free(block);
  719. block = block_merge_prev(pool, block);
  720. block = block_merge_next(pool, block);
  721. block_insert(pool, block);
  722. }
  723. }
  724. /*
  725. ** The TLSF block information provides us with enough information to
  726. ** provide a reasonably intelligent implementation of realloc, growing or
  727. ** shrinking the currently allocated block as required.
  728. **
  729. ** This routine handles the somewhat esoteric edge cases of realloc:
  730. ** - a non-zero size with a null pointer will behave like malloc
  731. ** - a zero size with a non-null pointer will behave like free
  732. ** - a request that cannot be satisfied will leave the original buffer
  733. ** untouched
  734. ** - an extended buffer size will leave the newly-allocated area with
  735. ** contents undefined
  736. */
  737. void* tlsf_realloc(tlsf_pool tlsf, void* ptr, size_t size)
  738. {
  739. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  740. void* p = 0;
  741. /* Zero-size requests are treated as free. */
  742. if (ptr && size == 0)
  743. {
  744. tlsf_free(tlsf, ptr);
  745. }
  746. /* Requests with NULL pointers are treated as malloc. */
  747. else if (!ptr)
  748. {
  749. p = tlsf_malloc(tlsf, size);
  750. }
  751. else
  752. {
  753. block_header_t* block = block_from_ptr(ptr);
  754. block_header_t* next = block_next(block);
  755. const size_t cursize = block_size(block);
  756. const size_t combined = cursize + block_size(next) + block_header_overhead;
  757. const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
  758. /*
  759. ** If the next block is used, or when combined with the current
  760. ** block, does not offer enough space, we must reallocate and copy.
  761. */
  762. if (adjust > cursize && (!block_is_free(next) || adjust > combined))
  763. {
  764. p = tlsf_malloc(tlsf, size);
  765. if (p)
  766. {
  767. const size_t minsize = tlsf_min(cursize, size);
  768. memcpy(p, ptr, minsize);
  769. tlsf_free(tlsf, ptr);
  770. }
  771. }
  772. else
  773. {
  774. /* Do we need to expand to the next block? */
  775. if (adjust > cursize)
  776. {
  777. block_merge_next(pool, block);
  778. block_mark_as_used(block);
  779. }
  780. /* Trim the resulting block and return the original pointer. */
  781. block_trim_used(pool, block, adjust);
  782. p = ptr;
  783. }
  784. }
  785. return p;
  786. }
  787. void tlsf_statistics(tlsf_pool tlsf, size_t* total_size, size_t* free_size)
  788. {
  789. pool_t* pool = tlsf_cast(pool_t*, tlsf);
  790. *total_size = pool->pool_size;
  791. *free_size = pool->pool_free_size;
  792. }
  793. #pragma warning(pop)