PageRenderTime 79ms CodeModel.GetById 15ms app.highlight 57ms RepoModel.GetById 1ms app.codeStats 0ms

/src/memory/tlsf/tlsf.c

https://bitbucket.org/vivkin/gam3b00bs/
C | 938 lines | 622 code | 145 blank | 171 comment | 92 complexity | 2fa059899d3c3d15b48e05666beceb97 MD5 | raw file
  1#include <assert.h>
  2#include <limits.h>
  3#include <stddef.h>
  4#include <stdio.h>
  5#include <stdlib.h>
  6#include <string.h>
  7
  8#include "tlsf.h"
  9#include "tlsfbits.h"
 10
 11#pragma warning(push)
 12#pragma warning(disable : 4127)
 13
 14/*
 15** Constants.
 16*/
 17
 18/* Public constants: may be modified. */
 19enum tlsf_public
 20{
 21	/* log2 of number of linear subdivisions of block sizes. */
 22	SL_INDEX_COUNT_LOG2 = 5,
 23};
 24
 25/* Private constants: do not modify. */
 26enum tlsf_private
 27{
 28	/* All allocation sizes and addresses are aligned to 4 bytes. */
 29	ALIGN_SIZE_LOG2 = 2,
 30	ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
 31
 32	/*
 33	** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
 34	** However, because we linearly subdivide the second-level lists, and
 35	** our minimum size granularity is 4 bytes, it doesn't make sense to
 36	** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
 37	** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
 38	** trying to split size ranges into more slots than we have available.
 39	** Instead, we calculate the minimum threshold size, and place all
 40	** blocks below that size into the 0th first-level list.
 41	*/
 42	FL_INDEX_MAX = 30,
 43	SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
 44	FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
 45	FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
 46
 47	SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
 48};
 49
 50/*
 51** Cast and min/max macros.
 52*/
 53
 54#define tlsf_cast(t, exp)	((t) (exp))
 55#define tlsf_min(a, b)		((a) < (b) ? (a) : (b))
 56#define tlsf_max(a, b)		((a) > (b) ? (a) : (b))
 57
 58/*
 59** Set assert macro, if it has not been provided by the user.
 60*/
 61#if !defined (tlsf_assert)
 62#define tlsf_assert assert
 63#endif
 64
 65/*
 66** Static assertion mechanism.
 67*/
 68
 69#define _tlsf_glue2(x, y) x ## y
 70#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
 71#define tlsf_static_assert(exp) \
 72	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
 73
 74/* FIXME: This code only currently supports 32-bit architectures. */
 75tlsf_static_assert(sizeof(size_t) * CHAR_BIT == 32);
 76
 77/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
 78tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
 79
 80/* sizeof fl_bitmap must be >= FL_INDEX_COUNT. */
 81tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= FL_INDEX_COUNT);
 82
 83/* Ensure we've properly tuned our sizes. */
 84tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
 85
 86/*
 87** Data structures and associated constants.
 88*/
 89
 90/*
 91** Block header structure.
 92**
 93** There are several implementation subtleties involved:
 94** - The prev_phys_block field is only valid if the previous block is free.
 95** - The prev_phys_block field is actually stored in the last word of the
 96**   previous block. It appears at the beginning of this structure only to
 97**   simplify the implementation.
 98** - The next_free / prev_free fields are only valid if the block is free.
 99*/
100typedef struct block_header_t
101{
102	/* Points to the previous physical block. */
103	struct block_header_t* prev_phys_block;
104
105	/* The size of this block, excluding the block header. */
106	size_t size;
107
108	/* Next and previous free blocks. */
109	struct block_header_t* next_free;
110	struct block_header_t* prev_free;
111} block_header_t;
112
113/*
114** Since block sizes are always a multiple of 4, the two least significant
115** bits of the size field are used to store the block status:
116** - bit 0: whether block is busy or free
117** - bit 1: whether previous block is busy or free
118*/
119static const size_t block_header_free_bit = 1 << 0;
120static const size_t block_header_prev_free_bit = 1 << 1;
121
122/*
123** The size of the block header exposed to used blocks is the size field.
124** The prev_phys_block field is stored *inside* the previous free block.
125*/
126static const size_t block_header_overhead = sizeof(size_t);
127
128/* User data starts directly after the size field in a used block. */
129static const size_t block_start_offset =
130	offsetof(block_header_t, size) + sizeof(size_t);
131
132/*
133** A free block must be large enough to store its header minus the size of
134** the prev_phys_block field, and no larger than the number of addressable
135** bits for FL_INDEX.
136*/
137static const size_t block_size_min = 
138	sizeof(block_header_t) - sizeof(block_header_t*);
139static const size_t block_size_max = 1 << FL_INDEX_MAX;
140
141/* Empty lists point at this block to indicate they are free. */
142static block_header_t block_null;
143
144/* The TLSF pool structure. */
145typedef struct pool_t
146{
147	/* Bitmaps for free lists. */
148	unsigned int fl_bitmap;
149	unsigned int sl_bitmap[FL_INDEX_COUNT];
150
151	/* Head of free lists. */
152	block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
153
154	/* Memory statistics */
155	size_t pool_size;
156	size_t pool_free_size;
157} pool_t;
158
159/* A type used for casting when doing pointer arithmetic. */
160typedef unsigned int tlsfptr_t;
161
162/*
163** block_header_t member functions.
164*/
165
166static size_t block_size(const block_header_t* block)
167{
168	return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
169}
170
171static void block_set_size(block_header_t* block, size_t size)
172{
173	const size_t oldsize = block->size;
174	block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
175}
176
177static int block_is_last(const block_header_t* block)
178{
179	return 0 == block_size(block);
180}
181
182static int block_is_free(const block_header_t* block)
183{
184	return block->size & block_header_free_bit;
185}
186
187static void block_set_free(block_header_t* block)
188{
189	block->size |= block_header_free_bit;
190}
191
192static void block_set_used(block_header_t* block)
193{
194	block->size &= ~block_header_free_bit;
195}
196
197static int block_is_prev_free(const block_header_t* block)
198{
199	return block->size & block_header_prev_free_bit;
200}
201
202static void block_set_prev_free(block_header_t* block)
203{
204	block->size |= block_header_prev_free_bit;
205}
206
207static void block_set_prev_used(block_header_t* block)
208{
209	block->size &= ~block_header_prev_free_bit;
210}
211
212static block_header_t* block_from_ptr(const void* ptr)
213{
214	return tlsf_cast(block_header_t*,
215		tlsf_cast(unsigned char*, ptr) - block_start_offset);
216}
217
218static void* block_to_ptr(const block_header_t* block)
219{
220	return tlsf_cast(void*,
221		tlsf_cast(unsigned char*, block) + block_start_offset);
222}
223
224/* Return location of next block after block of given size. */
225static block_header_t* offset_to_block(const void* ptr, size_t size)
226{
227	return tlsf_cast(block_header_t*,
228		tlsf_cast(unsigned char*, ptr) + size);
229}
230
231/* Return location of previous block. */
232static block_header_t* block_prev(const block_header_t* block)
233{
234	return block->prev_phys_block;
235}
236
237/* Return location of next existing block. */
238static block_header_t* block_next(const block_header_t* block)
239{
240	block_header_t* next = offset_to_block(block_to_ptr(block),
241		block_size(block) - block_header_overhead);
242	tlsf_assert(!block_is_last(block));
243	return next;
244}
245
246/* Link a new block with its physical neighbor, return the neighbor. */
247static block_header_t* block_link_next(block_header_t* block)
248{
249	block_header_t* next = block_next(block);
250	next->prev_phys_block = block;
251	return next;
252}
253
254static void block_mark_as_free(block_header_t* block)
255{
256	/* Link the block to the next block, first. */
257	block_header_t* next = block_link_next(block);
258	block_set_prev_free(next);
259	block_set_free(block);
260}
261
262static void block_mark_as_used(block_header_t* block)
263{
264	block_header_t* next = block_next(block);
265	block_set_prev_used(next);
266	block_set_used(block);
267}
268
269static size_t align_up(size_t x, size_t align)
270{
271	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
272	return (x + (align - 1)) & ~(align - 1);
273}
274
275static size_t align_down(size_t x, size_t align)
276{
277	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
278	return x - (x & (align - 1));
279}
280
281static void* align_ptr(void* ptr, size_t align)
282{
283	const tlsfptr_t aligned =
284		(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
285	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
286	return tlsf_cast(void*, aligned);
287}
288
289/*
290** Adjust an allocation size to be aligned to word size, and no smaller
291** than internal minimum.
292*/
293static size_t adjust_request_size(size_t size, size_t align)
294{
295	size_t adjust = 0;
296	if (size && size < block_size_max)
297	{
298		const size_t aligned = align_up(size, align);
299		adjust = tlsf_max(aligned, block_size_min);
300	}
301	return adjust;
302}
303
304/*
305** TLSF utility functions. In most cases, these are direct translations of
306** the documentation found in the white paper.
307*/
308
309static void mapping_insert(size_t size, int* fli, int* sli)
310{
311	int fl, sl;
312	if (size < SMALL_BLOCK_SIZE)
313	{
314		/* Store small blocks in first list. */
315		fl = 0;
316		sl = size / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
317	}
318	else
319	{
320		fl = tlsf_fls(size);
321		sl = (size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
322		fl -= (FL_INDEX_SHIFT - 1);
323	}
324	*fli = fl;
325	*sli = sl;
326}
327
328/* This version rounds up to the next block size (for allocations) */
329static void mapping_search(size_t size, int* fli, int* sli)
330{
331	if (size >= (1 << SL_INDEX_COUNT_LOG2))
332	{
333		const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
334		size += round;
335	}
336	mapping_insert(size, fli, sli);
337}
338
339static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli)
340{
341	int fl = *fli;
342	int sl = *sli;
343
344	/*
345	** First, search for a block in the list associated with the given
346	** fl/sl index.
347	*/
348	unsigned int sl_map = pool->sl_bitmap[fl] & (0xffffffff << sl);
349	if (!sl_map)
350	{
351		/* No block exists. Search in the next largest first-level list. */
352		const unsigned int fl_map = pool->fl_bitmap & (0xffffffff << (fl + 1));
353		if (!fl_map)
354		{
355			/* No free blocks available, memory has been exhausted. */
356			return 0;
357		}
358
359		fl = tlsf_ffs(fl_map);
360		*fli = fl;
361		sl_map = pool->sl_bitmap[fl];
362	}
363	tlsf_assert(sl_map && "internal error - second level bitmap is null");
364	sl = tlsf_ffs(sl_map);
365	*sli = sl;
366
367	/* Return the first block in the free list. */
368	return pool->blocks[fl][sl];
369}
370
371/* Remove a free block from the free list.*/
372static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
373{
374	block_header_t* prev = block->prev_free;
375	block_header_t* next = block->next_free;
376	tlsf_assert(prev && "prev_free field can not be null");
377	tlsf_assert(next && "next_free field can not be null");
378	next->prev_free = prev;
379	prev->next_free = next;
380
381	/* If this block is the head of the free list, set new head. */
382	if (pool->blocks[fl][sl] == block)
383	{
384		pool->blocks[fl][sl] = next;
385
386		/* If the new head is null, clear the bitmap. */
387		if (next == &block_null)
388		{
389			pool->sl_bitmap[fl] &= ~(1 << sl);
390
391			/* If the second bitmap is now empty, clear the fl bitmap. */
392			if (!pool->sl_bitmap[fl])
393			{
394				pool->fl_bitmap &= ~(1 << fl);
395			}
396		}
397	}
398}
399
400/* Insert a free block into the free block list. */
401static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
402{
403	block_header_t* current = pool->blocks[fl][sl];
404	tlsf_assert(current && "free list cannot have a null entry");
405	tlsf_assert(block && "cannot insert a null entry into the free list");
406	block->next_free = current;
407	block->prev_free = &block_null;
408	current->prev_free = block;
409
410	/*
411	** Insert the new block at the head of the list, and mark the first-
412	** and second-level bitmaps appropriately.
413	*/
414	pool->blocks[fl][sl] = block;
415	pool->fl_bitmap |= (1 << fl);
416	pool->sl_bitmap[fl] |= (1 << sl);
417}
418
419/* Remove a given block from the free list. */
420static void block_remove(pool_t* pool, block_header_t* block)
421{
422	int fl, sl;
423	mapping_insert(block_size(block), &fl, &sl);
424	remove_free_block(pool, block, fl, sl);
425}
426
427/* Insert a given block into the free list. */
428static void block_insert(pool_t* pool, block_header_t* block)
429{
430	int fl, sl;
431	mapping_insert(block_size(block), &fl, &sl);
432	insert_free_block(pool, block, fl, sl);
433}
434
435static int block_can_split(block_header_t* block, size_t size)
436{
437	return block_size(block) >= sizeof(block_header_t) + size;
438}
439
440/* Split a block into two, the second of which is free. */
441static block_header_t* block_split(block_header_t* block, size_t size)
442{
443	/* Calculate the amount of space left in the remaining block. */
444	block_header_t* remaining =
445		offset_to_block(block_to_ptr(block), size - block_header_overhead);
446
447	const size_t remain_size = block_size(block) - (size + block_header_overhead);
448	tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
449	block_set_size(remaining, remain_size);
450	tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
451
452	block_set_size(block, size);
453	block_mark_as_free(remaining);
454
455	return remaining;
456}
457
458/* Absorb a free block's storage into an adjacent previous free block. */
459static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
460{
461	tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
462	/* Note: Leaves flags untouched. */
463	prev->size += block_size(block) + block_header_overhead;
464	block_link_next(prev);
465	return prev;
466}
467
468/* Merge a just-freed block with an adjacent previous free block. */
469static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block)
470{
471	if (block_is_prev_free(block))
472	{
473		block_header_t* prev = block_prev(block);
474		tlsf_assert(prev && "prev physical block can't be null");
475		tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
476		block_remove(pool, prev);
477		block = block_absorb(prev, block);
478	}
479
480	return block;
481}
482
483/* Merge a just-freed block with an adjacent free block. */
484static block_header_t* block_merge_next(pool_t* pool, block_header_t* block)
485{
486	block_header_t* next = block_next(block);
487	tlsf_assert(next && "next physical block can't be null");
488
489	if (block_is_free(next))
490	{
491		tlsf_assert(!block_is_last(block) && "previous block can't be last!");
492		block_remove(pool, next);
493		block = block_absorb(block, next);
494	}
495
496	return block;
497}
498
499/* Trim any trailing block space off the end of a block, return to pool. */
500static void block_trim_free(pool_t* pool, block_header_t* block, size_t size)
501{
502	tlsf_assert(block_is_free(block) && "block must be free");
503	if (block_can_split(block, size))
504	{
505		block_header_t* remaining_block = block_split(block, size);
506		block_link_next(block);
507		block_set_prev_free(remaining_block);
508		block_insert(pool, remaining_block);
509	}
510}
511
512/* Trim any trailing block space off the end of a used block, return to pool. */
513static void block_trim_used(pool_t* pool, block_header_t* block, size_t size)
514{
515	tlsf_assert(!block_is_free(block) && "block must be used");
516	if (block_can_split(block, size))
517	{
518		/* If the next block is free, we must coalesce. */
519		block_header_t* remaining_block = block_split(block, size);
520		block_set_prev_used(remaining_block);
521
522		remaining_block = block_merge_next(pool, remaining_block);
523		block_insert(pool, remaining_block);
524	}
525}
526
527static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size)
528{
529	block_header_t* remaining_block = block;
530	if (block_can_split(block, size))
531	{
532		/* We want the 2nd block. */
533		remaining_block = block_split(block, size - block_header_overhead);
534		block_set_prev_free(remaining_block);
535
536		block_link_next(block);
537		block_insert(pool, block);
538	}
539
540	return remaining_block;
541}
542
543static block_header_t* block_locate_free(pool_t* pool, size_t size)
544{
545	int fl = 0;
546	int sl = 0;
547	block_header_t* block = 0;
548
549	if (size)
550	{
551		mapping_search(size, &fl, &sl);
552		block = search_suitable_block(pool, &fl, &sl);
553	}
554
555	if (block)
556	{
557		tlsf_assert(block_size(block) >= size);
558		remove_free_block(pool, block, fl, sl);
559	}
560
561	return block;
562}
563
564static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size)
565{
566	void* p = 0;
567	if (block)
568	{
569		block_trim_free(pool, block, size);
570		block_mark_as_used(block);
571		pool->pool_free_size -= block_size(block);	
572
573		p = block_to_ptr(block);
574	}
575	return p;
576}
577
578/* Clear structure and point all empty lists at the null block. */
579static void pool_construct(pool_t* pool)
580{
581	int i, j;
582
583	block_null.next_free = &block_null;
584	block_null.prev_free = &block_null;
585
586	pool->fl_bitmap = 0;
587	for (i = 0; i < FL_INDEX_COUNT; ++i)
588	{
589		pool->sl_bitmap[i] = 0;
590		for (j = 0; j < SL_INDEX_COUNT; ++j)
591		{
592			pool->blocks[i][j] = &block_null;
593		}
594	}
595
596	pool->pool_size = pool->pool_free_size = 0;
597}
598
599/*
600** Debugging utilities.
601*/
602
603typedef struct integrity_t
604{
605	int prev_status;
606	int status;
607} integrity_t;
608
609#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
610
611static void integrity_walker(void* ptr, size_t size, int used, void* user)
612{
613	block_header_t* block = block_from_ptr(ptr);
614	integrity_t* integ = tlsf_cast(integrity_t*, user);
615	const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
616	const int this_status = block_is_free(block) ? 1 : 0;
617	const size_t this_block_size = block_size(block);
618
619	int status = 0;
620	tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
621	tlsf_insist(size == this_block_size && "block size incorrect");
622
623	integ->prev_status = this_status;
624	integ->status += status;
625	used = 0;
626}
627
628int tlsf_check_heap(tlsf_pool tlsf)
629{
630	int i, j;
631
632	pool_t* pool = tlsf_cast(pool_t*, tlsf);
633	int status = 0;
634
635	/* Check that the blocks are physically correct. */
636	integrity_t integ = { 0, 0 };
637	tlsf_walk_heap(tlsf, integrity_walker, &integ);
638	status = integ.status;
639
640	/* Check that the free lists and bitmaps are accurate. */
641	for (i = 0; i < FL_INDEX_COUNT; ++i)
642	{
643		for (j = 0; j < SL_INDEX_COUNT; ++j)
644		{
645			const int fl_map = pool->fl_bitmap & (1 << i);
646			const int sl_list = pool->sl_bitmap[i];
647			const int sl_map = sl_list & (1 << j);
648			const block_header_t* block = pool->blocks[i][j];
649
650			/* Check that first- and second-level lists agree. */
651			if (!fl_map)
652			{
653				tlsf_insist(!sl_map && "second-level map must be null");
654			}
655
656			if (!sl_map)
657			{
658				tlsf_insist(block == &block_null && "block list must be null");
659				continue;
660			}
661
662			/* Check that there is at least one free block. */
663			tlsf_insist(sl_list && "no free blocks in second-level map");
664			tlsf_insist(block != &block_null && "block should not be null");
665
666			while (block != &block_null)
667			{
668				int fli, sli;
669				tlsf_insist(block_is_free(block) && "block should be free");
670				tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
671				tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
672				tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
673				tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
674
675				mapping_insert(block_size(block), &fli, &sli);
676				tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
677				block = block->next_free;
678			}
679		}
680	}
681
682	return status;
683}
684
685#undef tlsf_insist
686
687static void default_walker(void* ptr, size_t size, int used, void* user)
688{
689	(void)user;
690	printf("\t%p %s size: %x\n", ptr, used ? "used" : "free", size);
691}
692
693void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user)
694{
695	tlsf_walker heap_walker = walker ? walker : default_walker;
696	block_header_t* block =
697		offset_to_block(pool, sizeof(pool_t) - block_header_overhead);
698
699	while (block && !block_is_last(block))
700	{
701		heap_walker(
702			block_to_ptr(block),
703			block_size(block),
704			!block_is_free(block),
705			user);
706		block = block_next(block);
707	}
708}
709
710size_t tlsf_block_size(void* ptr)
711{
712	size_t size = 0;
713	if (ptr)
714	{
715		const block_header_t* block = block_from_ptr(ptr);
716		size = block_size(block);
717	}
718	return size;
719}
720
721/*
722** Overhead of the TLSF structures in a given memory block passed to
723** tlsf_create, equal to the size of a pool_t plus overhead of the initial
724** free block and the sentinel block.
725*/
726size_t tlsf_overhead()
727{
728	const size_t pool_overhead = sizeof(pool_t) + 2 * block_header_overhead;
729	return pool_overhead;
730}
731
732/*
733** TLSF main interface. Right out of the white paper.
734*/
735
736tlsf_pool tlsf_create(void* mem, size_t bytes)
737{
738	block_header_t* block;
739	block_header_t* next;
740
741	const size_t pool_overhead = tlsf_overhead();
742	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
743	pool_t* pool = tlsf_cast(pool_t*, mem);
744
745	if (pool_bytes < block_size_min || pool_bytes > block_size_max)
746	{
747		printf("tlsf_create: Pool size must be between %d and %d bytes.\n", 
748			pool_overhead + block_size_min, pool_overhead + block_size_max);
749		return 0;
750	}
751
752	/* Construct a valid pool object. */
753	pool_construct(pool);
754
755	/*
756	** Create the main free block. Offset the start of the block slightly
757	** so that the prev_phys_block field falls inside of the pool
758	** structure - it will never be used.
759	*/
760	block = offset_to_block(
761		tlsf_cast(void*, pool), sizeof(pool_t) - block_header_overhead);
762	block_set_size(block, align_down(pool_bytes, ALIGN_SIZE));
763	block_set_free(block);
764	block_set_prev_used(block);
765	block_insert(pool, block);
766
767	/* Split the block to create a zero-size pool sentinel block. */
768	next = block_link_next(block);
769	block_set_size(next, 0);
770	block_set_used(next);
771	block_set_prev_free(next);
772
773	pool->pool_size = pool->pool_free_size = pool_bytes;
774
775	return tlsf_cast(tlsf_pool, pool);
776}
777
778void tlsf_destroy(tlsf_pool pool)
779{
780	/* Nothing to do. */
781	pool = pool;
782}
783
784void* tlsf_malloc(tlsf_pool tlsf, size_t size)
785{
786	pool_t* pool = tlsf_cast(pool_t*, tlsf);
787	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
788	block_header_t* block = block_locate_free(pool, adjust);
789	return block_prepare_used(pool, block, adjust);
790}
791
792void* tlsf_memalign(tlsf_pool tlsf, size_t align, size_t size)
793{
794	pool_t* pool = tlsf_cast(pool_t*, tlsf);
795	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
796
797	/*
798	** We must allocate an additional minimum block size bytes so that if
799	** our free block will leave an alignment gap which is smaller, we can
800	** trim a leading free block and release it back to the heap. We must
801	** do this because the previous physical block is in use, therefore
802	** the prev_phys_block field is not valid, and we can't simply adjust
803	** the size of that block.
804	*/
805	const ptrdiff_t gap_minimum = tlsf_cast(ptrdiff_t, sizeof(block_header_t));
806	const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
807
808	/* If alignment is less than or equals base alignment, we're done. */
809	const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
810
811	block_header_t* block = block_locate_free(pool, aligned_size);
812
813	/* This can't be a static assert. */
814	tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
815
816	if (block)
817	{
818		void* ptr = block_to_ptr(block);
819		void* aligned = align_ptr(ptr, align);
820		ptrdiff_t gap = tlsf_cast(ptrdiff_t,
821			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
822
823		/* If gap size is too small, offset to next aligned boundary. */
824		if (gap && gap < gap_minimum)
825		{
826			const ptrdiff_t gap_remain = gap_minimum - gap;
827			const ptrdiff_t offset = tlsf_max(gap_remain, tlsf_cast(ptrdiff_t, align));
828			void* next_aligned = tlsf_cast(void*,
829				tlsf_cast(tlsfptr_t, aligned) + tlsf_cast(tlsfptr_t, offset));
830			aligned = align_ptr(next_aligned, align);
831			gap = tlsf_cast(ptrdiff_t,
832				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
833		}
834
835		if (gap)
836		{
837			tlsf_assert(gap >= gap_minimum && "gap size too small");
838			block = block_trim_free_leading(pool, block, gap);
839		}
840	}
841
842	return block_prepare_used(pool, block, adjust);
843}
844
845void tlsf_free(tlsf_pool tlsf, void* ptr)
846{
847	/* Don't attempt to free a NULL pointer. */
848	if (ptr)
849	{
850		pool_t* pool = tlsf_cast(pool_t*, tlsf);
851		block_header_t* block = block_from_ptr(ptr);
852		tlsf_assert(block_is_free(block));
853		pool->pool_free_size += block_size(block);
854		block_mark_as_free(block);
855		block = block_merge_prev(pool, block);
856		block = block_merge_next(pool, block);
857		block_insert(pool, block);
858	}
859}
860
861/*
862** The TLSF block information provides us with enough information to
863** provide a reasonably intelligent implementation of realloc, growing or
864** shrinking the currently allocated block as required.
865**
866** This routine handles the somewhat esoteric edge cases of realloc:
867** - a non-zero size with a null pointer will behave like malloc
868** - a zero size with a non-null pointer will behave like free
869** - a request that cannot be satisfied will leave the original buffer
870**   untouched
871** - an extended buffer size will leave the newly-allocated area with
872**   contents undefined
873*/
874void* tlsf_realloc(tlsf_pool tlsf, void* ptr, size_t size)
875{
876	pool_t* pool = tlsf_cast(pool_t*, tlsf);
877	void* p = 0;
878
879	/* Zero-size requests are treated as free. */
880	if (ptr && size == 0)
881	{
882		tlsf_free(tlsf, ptr);
883	}
884	/* Requests with NULL pointers are treated as malloc. */
885	else if (!ptr)
886	{
887		p = tlsf_malloc(tlsf, size);
888	}
889	else
890	{
891		block_header_t* block = block_from_ptr(ptr);
892		block_header_t* next = block_next(block);
893
894		const size_t cursize = block_size(block);
895		const size_t combined = cursize + block_size(next) + block_header_overhead;
896		const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
897
898		/*
899		** If the next block is used, or when combined with the current
900		** block, does not offer enough space, we must reallocate and copy.
901		*/
902		if (adjust > cursize && (!block_is_free(next) || adjust > combined))
903		{
904			p = tlsf_malloc(tlsf, size);
905			if (p)
906			{
907				const size_t minsize = tlsf_min(cursize, size);
908				memcpy(p, ptr, minsize);
909				tlsf_free(tlsf, ptr);
910			}
911		}
912		else
913		{
914			/* Do we need to expand to the next block? */
915			if (adjust > cursize)
916			{
917				block_merge_next(pool, block);
918				block_mark_as_used(block);
919			}
920
921			/* Trim the resulting block and return the original pointer. */
922			block_trim_used(pool, block, adjust);
923			p = ptr;
924		}
925	}
926
927	return p;
928}
929
930void tlsf_statistics(tlsf_pool tlsf, size_t* total_size, size_t* free_size)
931{
932	pool_t* pool = tlsf_cast(pool_t*, tlsf);
933
934	*total_size = pool->pool_size;
935	*free_size = pool->pool_free_size;
936}
937
938#pragma warning(pop)