PageRenderTime 51ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/sys/kern/subr_alist.c

http://github.com/davshao/dflygsocdrm
C | 1044 lines | 621 code | 99 blank | 324 comment | 145 complexity | 606ce4b005c42ac8415ef1f774967bf4 MD5 | raw file
Possible License(s): AGPL-1.0, CC-BY-SA-3.0, LGPL-2.0, GPL-3.0, LGPL-2.1, LGPL-3.0, MPL-2.0-no-copyleft-exception, 0BSD, BSD-3-Clause, GPL-2.0
  1. /*
  2. * ALIST.C - Bitmap allocator/deallocator, using a radix tree with hinting.
  3. * Unlimited-size allocations, power-of-2 only, power-of-2
  4. * aligned results only.
  5. *
  6. * Copyright (c) 2007 The DragonFly Project. All rights reserved.
  7. *
  8. * This code is derived from software contributed to The DragonFly Project
  9. * by Matthew Dillon <dillon@backplane.com>
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in
  19. * the documentation and/or other materials provided with the
  20. * distribution.
  21. * 3. Neither the name of The DragonFly Project nor the names of its
  22. * contributors may be used to endorse or promote products derived
  23. * from this software without specific, prior written permission.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  26. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  27. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  28. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  29. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  30. * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
  31. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  32. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  33. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  34. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  35. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  36. * SUCH DAMAGE.
  37. */
  38. /*
  39. * This module has been adapted from the BLIST module, which was written
  40. * by Matthew Dillon many years ago.
  41. *
  42. * This module implements a general power-of-2 bitmap allocator/deallocator.
  43. * All allocations must be in powers of 2 and will return similarly aligned
  44. * results. The module does not try to interpret the meaning of a 'block'
  45. * other then to return ALIST_BLOCK_NONE on an allocation failure.
  46. *
  47. * A maximum of 2 billion blocks is supported so, for example, if one block
  48. * represented 64 bytes a maximally sized ALIST would represent
  49. * 128 gigabytes.
  50. *
  51. * A radix tree is used to maintain the bitmap and layed out in a manner
  52. * similar to the blist code. Meta nodes use a radix of 16 and 2 bits per
  53. * block while leaf nodes use a radix of 32 and 1 bit per block (stored in
  54. * a 32 bit bitmap field). Both meta and leaf nodes have a hint field.
  55. * This field gives us a hint as to the largest free contiguous range of
  56. * blocks under the node. It may contain a value that is too high, but
  57. * will never contain a value that is too low. When the radix tree is
  58. * searched, allocation failures in subtrees update the hint.
  59. *
  60. * The radix tree is layed out recursively using a linear array. Each meta
  61. * node is immediately followed (layed out sequentially in memory) by
  62. * ALIST_META_RADIX lower level nodes. This is a recursive structure but one
  63. * that can be easily scanned through a very simple 'skip' calculation. In
  64. * order to support large radixes, portions of the tree may reside outside our
  65. * memory allocation. We handle this with an early-terminate optimization
  66. * in the meta-node. The memory allocation is only large enough to cover
  67. * the number of blocks requested at creation time even if it must be
  68. * encompassed in larger root-node radix.
  69. *
  70. * This code can be compiled stand-alone for debugging.
  71. */
  72. #ifdef _KERNEL
  73. #include <sys/param.h>
  74. #include <sys/systm.h>
  75. #include <sys/lock.h>
  76. #include <sys/kernel.h>
  77. #include <sys/alist.h>
  78. #include <sys/malloc.h>
  79. #include <vm/vm.h>
  80. #include <vm/vm_object.h>
  81. #include <vm/vm_kern.h>
  82. #include <vm/vm_extern.h>
  83. #include <vm/vm_page.h>
  84. #else
  85. #ifndef ALIST_NO_DEBUG
  86. #define ALIST_DEBUG
  87. #endif
  88. #include <sys/types.h>
  89. #include <stdio.h>
  90. #include <assert.h>
  91. #include <string.h>
  92. #include <stdlib.h>
  93. #include <stdarg.h>
  94. #define kmalloc(a,b,c) malloc(a)
  95. #define kfree(a,b) free(a)
  96. #define kprintf printf
  97. #define KKASSERT(exp) assert(exp)
  98. struct malloc_type;
  99. #include <sys/alist.h>
  100. void panic(const char *ctl, ...);
  101. #endif
  102. /*
  103. * static support functions
  104. */
  105. static alist_blk_t alst_leaf_alloc(almeta_t *scan, alist_blk_t blk,
  106. alist_blk_t start, alist_blk_t count);
  107. static alist_blk_t alst_meta_alloc(almeta_t *scan, alist_blk_t blk,
  108. alist_blk_t start, alist_blk_t count,
  109. alist_blk_t radix, alist_blk_t skip);
  110. static void alst_leaf_free(almeta_t *scan, alist_blk_t relblk,
  111. alist_blk_t count);
  112. static void alst_meta_free(almeta_t *scan, alist_blk_t freeBlk,
  113. alist_blk_t count, alist_blk_t radix,
  114. alist_blk_t skip, alist_blk_t blk);
  115. static alist_blk_t alst_radix_init(almeta_t *scan, alist_blk_t blk,
  116. alist_blk_t radix, alist_blk_t skip,
  117. alist_blk_t count);
  118. #ifndef _KERNEL
  119. static void alst_radix_print(almeta_t *scan, alist_blk_t blk,
  120. alist_blk_t radix, alist_blk_t skip,
  121. int tab);
  122. #endif
  123. /*
  124. * Create a alist capable of handling up to the specified number of blocks.
  125. * Blocks must be greater then 0 but does not have to be a power of 2.
  126. *
  127. * The smallest alist consists of a single leaf node capable of
  128. * managing ALIST_BMAP_RADIX blocks.
  129. */
  130. alist_t
  131. alist_create(alist_blk_t blocks, struct malloc_type *mtype)
  132. {
  133. alist_t bl;
  134. alist_blk_t radix;
  135. alist_blk_t skip = 0;
  136. /*
  137. * Calculate radix and skip field used for scanning.
  138. */
  139. radix = ALIST_BMAP_RADIX;
  140. while (radix < blocks) {
  141. radix *= ALIST_META_RADIX;
  142. skip = (skip + 1) * ALIST_META_RADIX;
  143. }
  144. bl = kmalloc(sizeof(struct alist), mtype, M_WAITOK | M_ZERO);
  145. bl->bl_blocks = blocks;
  146. bl->bl_radix = radix;
  147. bl->bl_skip = skip;
  148. bl->bl_rootblks = 1 + alst_radix_init(NULL, 0, bl->bl_radix,
  149. bl->bl_skip, blocks);
  150. bl->bl_root = kmalloc(sizeof(almeta_t) * bl->bl_rootblks,
  151. mtype, M_WAITOK);
  152. #if defined(ALIST_DEBUG)
  153. kprintf(
  154. "ALIST representing %d blocks (%d MB of swap)"
  155. ", requiring %dK (%d bytes) of ram\n",
  156. bl->bl_blocks,
  157. bl->bl_blocks * 4 / 1024,
  158. (bl->bl_rootblks * sizeof(almeta_t) + 1023) / 1024,
  159. (bl->bl_rootblks * sizeof(almeta_t))
  160. );
  161. kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
  162. #endif
  163. alst_radix_init(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, blocks);
  164. return(bl);
  165. }
  166. void
  167. alist_init(alist_t bl, alist_blk_t blocks,
  168. almeta_t *records, alist_blk_t nrecords)
  169. {
  170. alist_blk_t radix;
  171. alist_blk_t skip = 0;
  172. /*
  173. * Calculate radix and skip field used for scanning.
  174. */
  175. radix = ALIST_BMAP_RADIX;
  176. while (radix < blocks) {
  177. radix *= ALIST_META_RADIX;
  178. skip = (skip + 1) * ALIST_META_RADIX;
  179. }
  180. bzero(bl, sizeof(*bl));
  181. bl->bl_blocks = blocks;
  182. bl->bl_radix = radix;
  183. bl->bl_skip = skip;
  184. bl->bl_rootblks = 1 + alst_radix_init(NULL, 0, bl->bl_radix,
  185. bl->bl_skip, blocks);
  186. KKASSERT(bl->bl_rootblks <= nrecords);
  187. bl->bl_root = records;
  188. #if defined(ALIST_DEBUG)
  189. kprintf(
  190. "ALIST representing %d blocks (%d MB of swap)"
  191. ", requiring %dK (%d bytes) of ram\n",
  192. bl->bl_blocks,
  193. bl->bl_blocks * 4 / 1024,
  194. (bl->bl_rootblks * sizeof(almeta_t) + 1023) / 1024,
  195. (bl->bl_rootblks * sizeof(almeta_t))
  196. );
  197. kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
  198. #endif
  199. alst_radix_init(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, blocks);
  200. }
  201. void
  202. alist_destroy(alist_t bl, struct malloc_type *mtype)
  203. {
  204. kfree(bl->bl_root, mtype);
  205. kfree(bl, mtype);
  206. }
  207. /*
  208. * Reserve space in the block bitmap. Return the base of a contiguous
  209. * region or ALIST_BLOCK_NONE if space could not be allocated.
  210. *
  211. * This nominally allocates a power-of-2 number of blocks. However,
  212. * non-powers of 2 are accepted and implemented by first allocating
  213. * the nearest power of 2 and then freeing the remainder.
  214. */
  215. alist_blk_t
  216. alist_alloc(alist_t bl, alist_blk_t start, alist_blk_t count)
  217. {
  218. alist_blk_t blk = ALIST_BLOCK_NONE;
  219. /*
  220. * Check non power-of-2
  221. */
  222. KKASSERT(count);
  223. if ((count | (count - 1)) != (count << 1) - 1) {
  224. alist_blk_t ncount = (count < 256) ? 1 : 256;
  225. while (ncount < count)
  226. ncount <<= 1;
  227. blk = alist_alloc(bl, start, ncount);
  228. if (blk != ALIST_BLOCK_NONE)
  229. alist_free(bl, blk + count, ncount - count);
  230. return (blk);
  231. }
  232. /*
  233. * Power of 2
  234. */
  235. if (bl && count < bl->bl_radix) {
  236. if (bl->bl_radix == ALIST_BMAP_RADIX) {
  237. blk = alst_leaf_alloc(bl->bl_root, 0, start, count);
  238. } else {
  239. blk = alst_meta_alloc(bl->bl_root, 0, start, count,
  240. bl->bl_radix, bl->bl_skip);
  241. }
  242. if (blk != ALIST_BLOCK_NONE)
  243. bl->bl_free -= count;
  244. }
  245. return(blk);
  246. }
  247. /*
  248. * Free up space in the block bitmap. The starting block and count do not
  249. * need to be power-of-2 aligned. The related blocks must be in an allocated
  250. * state.
  251. */
  252. void
  253. alist_free(alist_t bl, alist_blk_t blkno, alist_blk_t count)
  254. {
  255. if (bl) {
  256. KKASSERT(blkno + count <= bl->bl_blocks);
  257. if (bl->bl_radix == ALIST_BMAP_RADIX) {
  258. alst_leaf_free(bl->bl_root, blkno, count);
  259. } else {
  260. alst_meta_free(bl->bl_root, blkno, count,
  261. bl->bl_radix, bl->bl_skip, 0);
  262. }
  263. bl->bl_free += count;
  264. }
  265. }
  266. /*
  267. * Returns the current total number of free blocks and the
  268. * approximate trailing largest contiguous free block available.
  269. */
  270. alist_blk_t
  271. alist_free_info(alist_t bl, alist_blk_t *startp, alist_blk_t *countp)
  272. {
  273. alist_blk_t radix = bl->bl_radix;
  274. alist_blk_t skip = bl->bl_skip;
  275. alist_blk_t next_skip;
  276. alist_blk_t i;
  277. alist_bmap_t mask;
  278. almeta_t *scan = bl->bl_root;
  279. *startp = 0;
  280. *countp = 0;
  281. while (radix != ALIST_BMAP_RADIX) {
  282. radix /= ALIST_META_RADIX;
  283. next_skip = skip / ALIST_META_RADIX;
  284. /*
  285. * Find the biggest fully allocated chunk.
  286. */
  287. for (i = ALIST_META_RADIX - 1; i != ALIST_BLOCK_NONE; --i) {
  288. mask = (scan->bm_bitmap >> (i * 2)) & 3;
  289. if (mask == 0) {
  290. /*
  291. * All allocated, continue the loop
  292. */
  293. continue;
  294. }
  295. if (mask == 1) {
  296. /*
  297. * Partially allocated, push into this guy
  298. */
  299. break;
  300. }
  301. if (mask == 2) {
  302. /*
  303. * Unknown state
  304. */
  305. return(bl->bl_free);
  306. }
  307. /*
  308. * All free, we can return the chunk.
  309. */
  310. *startp += i * radix;
  311. *countp = radix;
  312. return(bl->bl_free);
  313. }
  314. /*
  315. * If we failed to find anything stop here, otherwise push
  316. * in.
  317. */
  318. if (i == ALIST_BLOCK_NONE)
  319. return(bl->bl_free);
  320. *startp += i * radix;
  321. scan += 1 + next_skip * i;
  322. skip = next_skip - 1;
  323. }
  324. /*
  325. * If we got all the way down to a leaf node locate the last block,
  326. * power-of-2 aligned and power-of-2 sized. Well, the easiest way
  327. * to deal with this is to just return 1 block.
  328. */
  329. if (radix == ALIST_BMAP_RADIX) {
  330. mask = scan->bm_bitmap;
  331. for (i = ALIST_BMAP_RADIX - 1; i != ALIST_BLOCK_NONE; --i) {
  332. if ((mask & ((alist_bmap_t)1U << i)))
  333. break;
  334. }
  335. /*
  336. * did not find free entry
  337. */
  338. if (i == ALIST_BLOCK_NONE)
  339. return(bl->bl_free);
  340. /*
  341. * Return one block.
  342. */
  343. *startp += i;
  344. *countp = 1;
  345. return(bl->bl_free);
  346. }
  347. return(bl->bl_free);
  348. }
  349. #ifdef ALIST_DEBUG
  350. /*
  351. * alist_print() - dump radix tree
  352. */
  353. void
  354. alist_print(alist_t bl)
  355. {
  356. kprintf("ALIST {\n");
  357. alst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
  358. kprintf("}\n");
  359. }
  360. #endif
  361. /************************************************************************
  362. * ALLOCATION SUPPORT FUNCTIONS *
  363. ************************************************************************
  364. *
  365. * These support functions do all the actual work. They may seem
  366. * rather longish, but that's because I've commented them up. The
  367. * actual code is straight forward.
  368. *
  369. */
  370. /*
  371. * alist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
  372. *
  373. * This is the core of the allocator and is optimized for the 1 block
  374. * and the ALIST_BMAP_RADIX block allocation cases. Other cases are
  375. * somewhat slower. The 1 block allocation case is log2 and extremely
  376. * quick.
  377. *
  378. * mask bit is 0 allocated, not available
  379. * mask bit is 1 free, available for allocation
  380. */
  381. static alist_blk_t
  382. alst_leaf_alloc(almeta_t *scan, alist_blk_t blk, alist_blk_t start,
  383. alist_blk_t count)
  384. {
  385. alist_bmap_t orig = scan->bm_bitmap;
  386. /*
  387. * Allocate only beyond the start point. Mask to 0 the low bits
  388. * below start. If start == blk no bits get cleared so don't
  389. * bother.
  390. */
  391. if (start >= blk + ALIST_BMAP_RADIX)
  392. return(ALIST_BLOCK_NONE);
  393. if (start > blk && start < blk + ALIST_BMAP_RADIX)
  394. orig &= ~(((alist_bmap_t)1U << (start - blk)) - 1);
  395. start &= ALIST_BMAP_RADIX - 1;
  396. /*
  397. * Optimize bitmap all-allocated case. Also, count = 1
  398. * case assumes at least 1 bit is free in the bitmap, so
  399. * we have to take care of this case here.
  400. */
  401. if (orig == 0) {
  402. if (start <= blk)
  403. scan->bm_bighint = 0;
  404. return(ALIST_BLOCK_NONE);
  405. }
  406. /*
  407. * Optimized code to allocate one bit out of the bitmap
  408. */
  409. if (count == 1) {
  410. alist_bmap_t mask;
  411. alist_blk_t j = ALIST_BMAP_RADIX/2;
  412. alist_blk_t r = 0;
  413. mask = (alist_bmap_t)-1 >> (ALIST_BMAP_RADIX/2);
  414. while (j) {
  415. if ((orig & mask) == 0) {
  416. r += j;
  417. orig >>= j;
  418. }
  419. j >>= 1;
  420. mask >>= j;
  421. }
  422. scan->bm_bitmap &= ~(1 << r);
  423. return(blk + r);
  424. }
  425. /*
  426. * non-optimized code to allocate N bits out of the bitmap.
  427. * The more bits, the faster the code runs. It will run
  428. * the slowest allocating 2 bits, but since there aren't any
  429. * memory ops in the core loop (or shouldn't be, anyway),
  430. * you probably won't notice the difference.
  431. *
  432. * Similar to the blist case, the alist code also requires
  433. * allocations to be power-of-2 sized and aligned to the
  434. * size of the allocation, which simplifies the algorithm.
  435. */
  436. {
  437. alist_blk_t j;
  438. alist_blk_t n = ALIST_BMAP_RADIX - count;
  439. alist_bmap_t mask;
  440. mask = (alist_bmap_t)-1 >> n;
  441. for (j = 0; j <= n; j += count) {
  442. if ((orig & mask) == mask) {
  443. scan->bm_bitmap &= ~mask;
  444. return(blk + j);
  445. }
  446. mask = mask << count;
  447. }
  448. }
  449. /*
  450. * We couldn't allocate count in this subtree, update bighint
  451. * if we were able to check the entire node.
  452. */
  453. if (start <= blk)
  454. scan->bm_bighint = count - 1;
  455. return(ALIST_BLOCK_NONE);
  456. }
  457. /*
  458. * Attempt to allocate at a meta node. If we can't, we update
  459. * bighint and return a failure. Updating bighint optimize future
  460. * calls that hit this node. We have to check for our collapse cases
  461. * and we have a few optimizations strewn in as well.
  462. */
  463. static alist_blk_t
  464. alst_meta_alloc(almeta_t *scan, alist_blk_t blk, alist_blk_t start,
  465. alist_blk_t count, alist_blk_t radix, alist_blk_t skip)
  466. {
  467. alist_blk_t i;
  468. alist_bmap_t mask;
  469. alist_bmap_t pmask;
  470. alist_blk_t next_skip = ((u_int)skip / ALIST_META_RADIX);
  471. alist_blk_t orig_blk;
  472. /*
  473. * ALL-ALLOCATED special case
  474. */
  475. if (scan->bm_bitmap == 0) {
  476. scan->bm_bighint = 0;
  477. return(ALIST_BLOCK_NONE);
  478. }
  479. radix /= ALIST_META_RADIX;
  480. /*
  481. * Radix now represents each bitmap entry for this meta node. If
  482. * the number of blocks being allocated can be fully represented,
  483. * we allocate directly out of this meta node.
  484. *
  485. * Meta node bitmaps use 2 bits per block.
  486. *
  487. * 00 ALL-ALLOCATED
  488. * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
  489. * 10 (RESERVED)
  490. * 11 ALL-FREE
  491. */
  492. if (count >= radix) {
  493. alist_blk_t n = count / radix * 2; /* number of bits */
  494. alist_blk_t j;
  495. mask = (alist_bmap_t)-1 >> (ALIST_BMAP_RADIX - n);
  496. for (j = 0; j < ALIST_META_RADIX; j += n / 2) {
  497. if ((scan->bm_bitmap & mask) == mask &&
  498. blk + j * radix >= start) {
  499. scan->bm_bitmap &= ~mask;
  500. return(blk + j * radix);
  501. }
  502. mask <<= n;
  503. }
  504. if (scan->bm_bighint >= count && start <= blk)
  505. scan->bm_bighint = count >> 1;
  506. return(ALIST_BLOCK_NONE);
  507. }
  508. /*
  509. * If not we have to recurse.
  510. */
  511. mask = 0x00000003;
  512. pmask = 0x00000001;
  513. orig_blk = blk;
  514. for (i = 1; i <= skip; i += next_skip) {
  515. if (scan[i].bm_bighint == (alist_blk_t)-1) {
  516. /*
  517. * Terminator
  518. */
  519. break;
  520. }
  521. /*
  522. * If the element is marked completely free (11), initialize
  523. * the recursion.
  524. */
  525. if ((scan->bm_bitmap & mask) == mask) {
  526. scan[i].bm_bitmap = (alist_bmap_t)-1;
  527. scan[i].bm_bighint = radix;
  528. }
  529. if ((scan->bm_bitmap & mask) == 0) {
  530. /*
  531. * Object marked completely allocated, recursion
  532. * contains garbage.
  533. */
  534. /* Skip it */
  535. } else if (blk + radix <= start) {
  536. /*
  537. * Object does not contain or is not beyond our
  538. * start point.
  539. */
  540. /* Skip it */
  541. } else if (count <= scan[i].bm_bighint) {
  542. /*
  543. * count fits in object. If successful and the
  544. * deeper level becomes all allocated, mark our
  545. * level as all-allocated.
  546. */
  547. alist_blk_t r;
  548. if (next_skip == 1) {
  549. r = alst_leaf_alloc(&scan[i], blk, start,
  550. count);
  551. } else {
  552. r = alst_meta_alloc(&scan[i], blk, start,
  553. count,
  554. radix, next_skip - 1);
  555. }
  556. if (r != ALIST_BLOCK_NONE) {
  557. if (scan[i].bm_bitmap == 0) {
  558. scan->bm_bitmap &= ~mask;
  559. } else {
  560. scan->bm_bitmap &= ~mask;
  561. scan->bm_bitmap |= pmask;
  562. }
  563. return(r);
  564. }
  565. }
  566. blk += radix;
  567. mask <<= 2;
  568. pmask <<= 2;
  569. }
  570. /*
  571. * We couldn't allocate count in this subtree, update bighint
  572. * if we were able to check the entire node.
  573. */
  574. if (scan->bm_bighint >= count && start <= orig_blk)
  575. scan->bm_bighint = count >> 1;
  576. return(ALIST_BLOCK_NONE);
  577. }
  578. /*
  579. * Free allocated block from leaf bitmap
  580. */
  581. static void
  582. alst_leaf_free(almeta_t *scan, alist_blk_t blk, alist_blk_t count)
  583. {
  584. /*
  585. * free some data in this bitmap
  586. *
  587. * e.g.
  588. * 0000111111111110000
  589. * \_________/\__/
  590. * v n
  591. */
  592. alist_blk_t n = blk & (ALIST_BMAP_RADIX - 1);
  593. alist_bmap_t mask;
  594. mask = ((alist_bmap_t)-1 << n) &
  595. ((alist_bmap_t)-1 >> (ALIST_BMAP_RADIX - count - n));
  596. if (scan->bm_bitmap & mask)
  597. panic("alst_radix_free: freeing free block");
  598. scan->bm_bitmap |= mask;
  599. /*
  600. * We could probably do a better job here. We are required to make
  601. * bighint at least as large as the biggest contiguous block of
  602. * data. If we just shoehorn it, a little extra overhead will
  603. * be incured on the next allocation (but only that one typically).
  604. */
  605. scan->bm_bighint = ALIST_BMAP_RADIX;
  606. }
  607. /*
  608. * Free allocated blocks from radix tree meta info. This support routine
  609. * frees a range of blocks from the bitmap. The range must be entirely
  610. * enclosed by this radix node. If a meta node, we break the range down
  611. * recursively to free blocks in subnodes (which means that this code
  612. * can free an arbitrary range whereas the allocation code cannot allocate
  613. * an arbitrary range).
  614. */
  615. static void
  616. alst_meta_free(almeta_t *scan, alist_blk_t freeBlk, alist_blk_t count,
  617. alist_blk_t radix, alist_blk_t skip, alist_blk_t blk)
  618. {
  619. alist_blk_t next_skip = ((u_int)skip / ALIST_META_RADIX);
  620. alist_bmap_t mask;
  621. alist_bmap_t pmask;
  622. alist_blk_t i;
  623. /*
  624. * Break the free down into its components. Because it is so easy
  625. * to implement, frees are not limited to power-of-2 sizes.
  626. *
  627. * Each block in a meta-node bitmap takes two bits.
  628. */
  629. radix /= ALIST_META_RADIX;
  630. i = (freeBlk - blk) / radix;
  631. blk += i * radix;
  632. mask = 0x00000003 << (i * 2);
  633. pmask = 0x00000001 << (i * 2);
  634. i = i * next_skip + 1;
  635. while (i <= skip && blk < freeBlk + count) {
  636. alist_blk_t v;
  637. v = blk + radix - freeBlk;
  638. if (v > count)
  639. v = count;
  640. if (scan->bm_bighint == (alist_blk_t)-1)
  641. panic("alst_meta_free: freeing unexpected range");
  642. /*
  643. * WARNING on bighint updates. When we free an element in
  644. * a chunk if the chunk becomes wholely free it is possible
  645. * that the whole node is now free, so bighint must be set
  646. * to cover the whole node. Otherwise address-specific
  647. * allocations may fail.
  648. *
  649. * We don't bother figuring out how much of the node is
  650. * actually free in this case.
  651. */
  652. if (freeBlk == blk && count >= radix) {
  653. /*
  654. * The area being freed covers the entire block,
  655. * assert that we are marked all-allocated and
  656. * then mark it all-free.
  657. */
  658. KKASSERT((scan->bm_bitmap & mask) == 0);
  659. scan->bm_bitmap |= mask;
  660. scan->bm_bighint = radix * ALIST_META_RADIX;
  661. } else {
  662. /*
  663. * If we were previously marked all-allocated, fix-up
  664. * the next layer so we can recurse down into it.
  665. */
  666. if ((scan->bm_bitmap & mask) == 0) {
  667. scan[i].bm_bitmap = (alist_bmap_t)0;
  668. scan[i].bm_bighint = 0;
  669. }
  670. /*
  671. * Recursion case, then either mark all-free or
  672. * partially free.
  673. */
  674. if (next_skip == 1) {
  675. alst_leaf_free(&scan[i], freeBlk, v);
  676. } else {
  677. alst_meta_free(&scan[i], freeBlk, v,
  678. radix, next_skip - 1, blk);
  679. }
  680. if (scan[i].bm_bitmap == (alist_bmap_t)-1) {
  681. scan->bm_bitmap |= mask;
  682. scan->bm_bighint = radix * ALIST_META_RADIX;
  683. } else {
  684. scan->bm_bitmap &= ~mask;
  685. scan->bm_bitmap |= pmask;
  686. if (scan->bm_bighint < scan[i].bm_bighint)
  687. scan->bm_bighint = scan[i].bm_bighint;
  688. }
  689. }
  690. mask <<= 2;
  691. pmask <<= 2;
  692. count -= v;
  693. freeBlk += v;
  694. blk += radix;
  695. i += next_skip;
  696. }
  697. }
  698. /*
  699. * Initialize our meta structures and bitmaps and calculate the exact
  700. * amount of space required to manage 'count' blocks - this space may
  701. * be considerably less then the calculated radix due to the large
  702. * RADIX values we use.
  703. */
  704. static alist_blk_t
  705. alst_radix_init(almeta_t *scan, alist_blk_t blk, alist_blk_t radix,
  706. alist_blk_t skip, alist_blk_t count)
  707. {
  708. alist_blk_t i;
  709. alist_blk_t next_skip;
  710. alist_bmap_t mask;
  711. alist_bmap_t pmask;
  712. alist_blk_t memindex;
  713. /*
  714. * Leaf node
  715. */
  716. if (radix == ALIST_BMAP_RADIX) {
  717. if (scan) {
  718. scan->bm_bighint = 0;
  719. scan->bm_bitmap = 0;
  720. }
  721. return(0);
  722. }
  723. /*
  724. * Meta node. If allocating the entire object we can special
  725. * case it. However, we need to figure out how much memory
  726. * is required to manage 'count' blocks, so we continue on anyway.
  727. */
  728. if (scan) {
  729. scan->bm_bighint = 0;
  730. scan->bm_bitmap = 0;
  731. }
  732. memindex = 0;
  733. radix /= ALIST_META_RADIX;
  734. next_skip = skip / ALIST_META_RADIX;
  735. mask = 0x00000003;
  736. pmask = 0x00000001;
  737. for (i = 1; i <= skip; i += next_skip) {
  738. if (count >= blk + radix) {
  739. /*
  740. * Allocate the entire object
  741. */
  742. memindex += alst_radix_init(((scan) ? &scan[i] : NULL),
  743. blk, radix,
  744. next_skip - 1, count);
  745. /* already marked as wholely allocated */
  746. } else if (count > blk) {
  747. /*
  748. * Allocate a partial object, well it's really
  749. * all-allocated, we just aren't allowed to
  750. * free the whole thing.
  751. */
  752. memindex += alst_radix_init(((scan) ? &scan[i] : NULL),
  753. blk, radix,
  754. next_skip - 1, count);
  755. /* already marked as wholely allocated */
  756. } else {
  757. /*
  758. * Add terminator but continue the loop. Populate
  759. * all terminators.
  760. */
  761. if (scan) {
  762. scan[i].bm_bighint = (alist_blk_t)-1;
  763. scan[i].bm_bitmap = 0;
  764. }
  765. /* already marked as wholely allocated */
  766. }
  767. mask <<= 2;
  768. pmask <<= 2;
  769. blk += radix;
  770. }
  771. memindex += ALIST_META_RADIX;
  772. return(memindex);
  773. }
  774. #ifdef ALIST_DEBUG
  775. static void
  776. alst_radix_print(almeta_t *scan, alist_blk_t blk, alist_blk_t radix,
  777. alist_blk_t skip, int tab)
  778. {
  779. alist_blk_t i;
  780. alist_blk_t next_skip;
  781. alist_bmap_t mask;
  782. if (radix == ALIST_BMAP_RADIX) {
  783. kprintf(
  784. "%*.*s(%04x,%d): bitmap %08x big=%d\n",
  785. tab, tab, "",
  786. blk, radix,
  787. scan->bm_bitmap,
  788. scan->bm_bighint
  789. );
  790. return;
  791. }
  792. if (scan->bm_bitmap == 0) {
  793. kprintf(
  794. "%*.*s(%04x,%d) ALL ALLOCATED\n",
  795. tab, tab, "",
  796. blk,
  797. radix
  798. );
  799. return;
  800. }
  801. if (scan->bm_bitmap == (alist_bmap_t)-1) {
  802. kprintf(
  803. "%*.*s(%04x,%d) ALL FREE\n",
  804. tab, tab, "",
  805. blk,
  806. radix
  807. );
  808. return;
  809. }
  810. kprintf(
  811. "%*.*s(%04x,%d): subtree (%d) bitmap=%08x big=%d {\n",
  812. tab, tab, "",
  813. blk, radix,
  814. radix,
  815. scan->bm_bitmap,
  816. scan->bm_bighint
  817. );
  818. radix /= ALIST_META_RADIX;
  819. next_skip = skip / ALIST_META_RADIX;
  820. tab += 4;
  821. mask = 0x00000003;
  822. for (i = 1; i <= skip; i += next_skip) {
  823. if (scan[i].bm_bighint == (alist_blk_t)-1) {
  824. kprintf(
  825. "%*.*s(%04x,%d): Terminator\n",
  826. tab, tab, "",
  827. blk, radix
  828. );
  829. break;
  830. }
  831. if ((scan->bm_bitmap & mask) == mask) {
  832. kprintf(
  833. "%*.*s(%04x,%d): ALL FREE\n",
  834. tab, tab, "",
  835. blk, radix
  836. );
  837. } else if ((scan->bm_bitmap & mask) == 0) {
  838. kprintf(
  839. "%*.*s(%04x,%d): ALL ALLOCATED\n",
  840. tab, tab, "",
  841. blk, radix
  842. );
  843. } else {
  844. alst_radix_print(
  845. &scan[i],
  846. blk,
  847. radix,
  848. next_skip - 1,
  849. tab
  850. );
  851. }
  852. blk += radix;
  853. mask <<= 2;
  854. }
  855. tab -= 4;
  856. kprintf("%*.*s}\n", tab, tab, "");
  857. }
  858. #endif
  859. #ifdef ALIST_DEBUG
  860. int
  861. main(int ac, char **av)
  862. {
  863. alist_blk_t size = 1024;
  864. alist_blk_t da = 0;
  865. alist_blk_t count = 0;
  866. alist_t bl;
  867. int i;
  868. for (i = 1; i < ac; ++i) {
  869. const char *ptr = av[i];
  870. if (*ptr != '-') {
  871. size = strtol(ptr, NULL, 0);
  872. continue;
  873. }
  874. ptr += 2;
  875. fprintf(stderr, "Bad option: %s\n", ptr - 2);
  876. exit(1);
  877. }
  878. bl = alist_create(size, NULL);
  879. alist_free(bl, 0, size);
  880. for (;;) {
  881. char buf[1024];
  882. alist_blk_t bfree;
  883. kprintf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix);
  884. fflush(stdout);
  885. if (fgets(buf, sizeof(buf), stdin) == NULL)
  886. break;
  887. switch(buf[0]) {
  888. case 'p':
  889. alist_print(bl);
  890. break;
  891. case 'a':
  892. if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
  893. da = alist_alloc(bl, da, count);
  894. kprintf(" R=%04x\n", da);
  895. } else if (sscanf(buf + 1, "%d", &count) == 1) {
  896. da = alist_alloc(bl, 0, count);
  897. kprintf(" R=%04x\n", da);
  898. } else if (count) {
  899. kprintf("alloc 0x%04x/%d\n", da, count);
  900. alist_blk_t blk = alist_alloc(bl, da, count);
  901. kprintf(" R=%04x\n", blk);
  902. } else {
  903. kprintf("?\n");
  904. }
  905. break;
  906. case 'f':
  907. if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
  908. alist_free(bl, da, count);
  909. } else if (count) {
  910. kprintf("free 0x%04x/%d\n", da, count);
  911. alist_free(bl, da, count);
  912. } else {
  913. kprintf("?\n");
  914. }
  915. break;
  916. case '?':
  917. case 'h':
  918. puts("p -print\n"
  919. "a %d -allocate\n"
  920. "f %x %d -free\n"
  921. "h/? -help");
  922. break;
  923. case 'i':
  924. bfree = alist_free_info(bl, &da, &count);
  925. kprintf("info: %d free trailing: 0x%04x/%d\n",
  926. bfree, da, count);
  927. break;
  928. default:
  929. kprintf("?\n");
  930. break;
  931. }
  932. }
  933. return(0);
  934. }
  935. void
  936. panic(const char *ctl, ...)
  937. {
  938. __va_list va;
  939. __va_start(va, ctl);
  940. vfprintf(stderr, ctl, va);
  941. fprintf(stderr, "\n");
  942. __va_end(va);
  943. exit(1);
  944. }
  945. #endif