PageRenderTime 58ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 1ms

/libgc/reclaim.c

https://bitbucket.org/danipen/mono
C | 1061 lines | 797 code | 93 blank | 171 comment | 133 complexity | 62e563f3994c2834866b132defd3b9b4 MD5 | raw file
Possible License(s): Unlicense, Apache-2.0, LGPL-2.0, MPL-2.0-no-copyleft-exception, CC-BY-SA-3.0, GPL-2.0
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. #include <stdio.h>
  17. #include "private/gc_priv.h"
  18. signed_word GC_mem_found = 0;
  19. /* Number of words of memory reclaimed */
  20. #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
  21. word GC_fl_builder_count = 0;
  22. /* Number of threads currently building free lists without */
  23. /* holding GC lock. It is not safe to collect if this is */
  24. /* nonzero. */
  25. #endif /* PARALLEL_MARK */
  26. /* We defer printing of leaked objects until we're done with the GC */
  27. /* cycle, since the routine for printing objects needs to run outside */
  28. /* the collector, e.g. without the allocation lock. */
  29. #define MAX_LEAKED 40
  30. ptr_t GC_leaked[MAX_LEAKED];
  31. unsigned GC_n_leaked = 0;
  32. GC_bool GC_have_errors = FALSE;
  33. void GC_add_leaked(leaked)
  34. ptr_t leaked;
  35. {
  36. if (GC_n_leaked < MAX_LEAKED) {
  37. GC_have_errors = TRUE;
  38. GC_leaked[GC_n_leaked++] = leaked;
  39. /* Make sure it's not reclaimed this cycle */
  40. GC_set_mark_bit(leaked);
  41. }
  42. }
  43. static GC_bool printing_errors = FALSE;
  44. /* Print all objects on the list after printing any smashed objs. */
  45. /* Clear both lists. */
  46. void GC_print_all_errors ()
  47. {
  48. unsigned i;
  49. LOCK();
  50. if (printing_errors) {
  51. UNLOCK();
  52. return;
  53. }
  54. printing_errors = TRUE;
  55. UNLOCK();
  56. if (GC_debugging_started) GC_print_all_smashed();
  57. for (i = 0; i < GC_n_leaked; ++i) {
  58. ptr_t p = GC_leaked[i];
  59. if (HDR(p) -> hb_obj_kind == PTRFREE) {
  60. GC_err_printf0("Leaked atomic object at ");
  61. } else {
  62. GC_err_printf0("Leaked composite object at ");
  63. }
  64. GC_print_heap_obj(p);
  65. GC_err_printf0("\n");
  66. GC_free(p);
  67. GC_leaked[i] = 0;
  68. }
  69. GC_n_leaked = 0;
  70. printing_errors = FALSE;
  71. }
  72. # define FOUND_FREE(hblk, word_no) \
  73. { \
  74. GC_add_leaked((ptr_t)hblk + WORDS_TO_BYTES(word_no)); \
  75. }
  76. /*
  77. * reclaim phase
  78. *
  79. */
  80. /*
  81. * Test whether a block is completely empty, i.e. contains no marked
  82. * objects. This does not require the block to be in physical
  83. * memory.
  84. */
  85. GC_bool GC_block_empty(hhdr)
  86. register hdr * hhdr;
  87. {
  88. /* We treat hb_marks as an array of words here, even if it is */
  89. /* actually an array of bytes. Since we only check for zero, there */
  90. /* are no endian-ness issues. */
  91. register word *p = (word *)(&(hhdr -> hb_marks[0]));
  92. register word * plim =
  93. (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
  94. while (p < plim) {
  95. if (*p++) return(FALSE);
  96. }
  97. return(TRUE);
  98. }
  99. /* The following functions sometimes return a DONT_KNOW value. */
  100. #define DONT_KNOW 2
  101. #ifdef SMALL_CONFIG
  102. # define GC_block_nearly_full1(hhdr, pat1) DONT_KNOW
  103. # define GC_block_nearly_full3(hhdr, pat1, pat2) DONT_KNOW
  104. # define GC_block_nearly_full(hhdr) DONT_KNOW
  105. #endif
  106. #if !defined(SMALL_CONFIG) && defined(USE_MARK_BYTES)
  107. # define GC_block_nearly_full1(hhdr, pat1) GC_block_nearly_full(hhdr)
  108. # define GC_block_nearly_full3(hhdr, pat1, pat2) GC_block_nearly_full(hhdr)
  109. GC_bool GC_block_nearly_full(hhdr)
  110. register hdr * hhdr;
  111. {
  112. /* We again treat hb_marks as an array of words, even though it */
  113. /* isn't. We first sum up all the words, resulting in a word */
  114. /* containing 4 or 8 separate partial sums. */
  115. /* We then sum the bytes in the word of partial sums. */
  116. /* This is still endian independant. This fails if the partial */
  117. /* sums can overflow. */
  118. # if (BYTES_TO_WORDS(MARK_BITS_SZ)) >= 256
  119. --> potential overflow; fix the code
  120. # endif
  121. register word *p = (word *)(&(hhdr -> hb_marks[0]));
  122. register word * plim =
  123. (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
  124. word sum_vector = 0;
  125. unsigned sum;
  126. while (p < plim) {
  127. sum_vector += *p;
  128. ++p;
  129. }
  130. sum = 0;
  131. while (sum_vector > 0) {
  132. sum += sum_vector & 0xff;
  133. sum_vector >>= 8;
  134. }
  135. return (sum > BYTES_TO_WORDS(7*HBLKSIZE/8)/(hhdr -> hb_sz));
  136. }
  137. #endif /* USE_MARK_BYTES */
  138. #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
  139. /*
  140. * Test whether nearly all of the mark words consist of the same
  141. * repeating pattern.
  142. */
  143. #define FULL_THRESHOLD (MARK_BITS_SZ/16)
  144. GC_bool GC_block_nearly_full1(hhdr, pat1)
  145. hdr *hhdr;
  146. word pat1;
  147. {
  148. unsigned i;
  149. unsigned misses = 0;
  150. GC_ASSERT((MARK_BITS_SZ & 1) == 0);
  151. for (i = 0; i < MARK_BITS_SZ; ++i) {
  152. if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
  153. if (++misses > FULL_THRESHOLD) return FALSE;
  154. }
  155. }
  156. return TRUE;
  157. }
  158. /*
  159. * Test whether the same repeating 3 word pattern occurs in nearly
  160. * all the mark bit slots.
  161. * This is used as a heuristic, so we're a bit sloppy and ignore
  162. * the last one or two words.
  163. */
  164. GC_bool GC_block_nearly_full3(hhdr, pat1, pat2, pat3)
  165. hdr *hhdr;
  166. word pat1, pat2, pat3;
  167. {
  168. unsigned i;
  169. unsigned misses = 0;
  170. if (MARK_BITS_SZ < 4) {
  171. return DONT_KNOW;
  172. }
  173. for (i = 0; i < MARK_BITS_SZ - 2; i += 3) {
  174. if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
  175. if (++misses > FULL_THRESHOLD) return FALSE;
  176. }
  177. if ((hhdr -> hb_marks[i+1] | ~pat2) != ONES) {
  178. if (++misses > FULL_THRESHOLD) return FALSE;
  179. }
  180. if ((hhdr -> hb_marks[i+2] | ~pat3) != ONES) {
  181. if (++misses > FULL_THRESHOLD) return FALSE;
  182. }
  183. }
  184. return TRUE;
  185. }
  186. /* Check whether a small object block is nearly full by looking at only */
  187. /* the mark bits. */
  188. /* We manually precomputed the mark bit patterns that need to be */
  189. /* checked for, and we give up on the ones that are unlikely to occur, */
  190. /* or have period > 3. */
  191. /* This would be a lot easier with a mark bit per object instead of per */
  192. /* word, but that would rewuire computing object numbers in the mark */
  193. /* loop, which would require different data structures ... */
  194. GC_bool GC_block_nearly_full(hhdr)
  195. hdr *hhdr;
  196. {
  197. int sz = hhdr -> hb_sz;
  198. # if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
  199. return DONT_KNOW; /* Shouldn't be used in any standard config. */
  200. # endif
  201. # if CPP_WORDSZ == 32
  202. switch(sz) {
  203. case 1:
  204. return GC_block_nearly_full1(hhdr, 0xffffffffl);
  205. case 2:
  206. return GC_block_nearly_full1(hhdr, 0x55555555l);
  207. case 4:
  208. return GC_block_nearly_full1(hhdr, 0x11111111l);
  209. case 6:
  210. return GC_block_nearly_full3(hhdr, 0x41041041l,
  211. 0x10410410l,
  212. 0x04104104l);
  213. case 8:
  214. return GC_block_nearly_full1(hhdr, 0x01010101l);
  215. case 12:
  216. return GC_block_nearly_full3(hhdr, 0x01001001l,
  217. 0x10010010l,
  218. 0x00100100l);
  219. case 16:
  220. return GC_block_nearly_full1(hhdr, 0x00010001l);
  221. case 32:
  222. return GC_block_nearly_full1(hhdr, 0x00000001l);
  223. default:
  224. return DONT_KNOW;
  225. }
  226. # endif
  227. # if CPP_WORDSZ == 64
  228. switch(sz) {
  229. case 1:
  230. return GC_block_nearly_full1(hhdr, 0xffffffffffffffffl);
  231. case 2:
  232. return GC_block_nearly_full1(hhdr, 0x5555555555555555l);
  233. case 4:
  234. return GC_block_nearly_full1(hhdr, 0x1111111111111111l);
  235. case 6:
  236. return GC_block_nearly_full3(hhdr, 0x1041041041041041l,
  237. 0x4104104104104104l,
  238. 0x0410410410410410l);
  239. case 8:
  240. return GC_block_nearly_full1(hhdr, 0x0101010101010101l);
  241. case 12:
  242. return GC_block_nearly_full3(hhdr, 0x1001001001001001l,
  243. 0x0100100100100100l,
  244. 0x0010010010010010l);
  245. case 16:
  246. return GC_block_nearly_full1(hhdr, 0x0001000100010001l);
  247. case 32:
  248. return GC_block_nearly_full1(hhdr, 0x0000000100000001l);
  249. default:
  250. return DONT_KNOW;
  251. }
  252. # endif
  253. }
  254. #endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
  255. /* We keep track of reclaimed memory if we are either asked to, or */
  256. /* we are using the parallel marker. In the latter case, we assume */
  257. /* that most allocation goes through GC_malloc_many for scalability. */
  258. /* GC_malloc_many needs the count anyway. */
  259. # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
  260. # define INCR_WORDS(sz) n_words_found += (sz)
  261. # define COUNT_PARAM , count
  262. # define COUNT_ARG , count
  263. # define COUNT_DECL signed_word * count;
  264. # define NWORDS_DECL signed_word n_words_found = 0;
  265. # define COUNT_UPDATE *count += n_words_found;
  266. # define MEM_FOUND_ADDR , &GC_mem_found
  267. # else
  268. # define INCR_WORDS(sz)
  269. # define COUNT_PARAM
  270. # define COUNT_ARG
  271. # define COUNT_DECL
  272. # define NWORDS_DECL
  273. # define COUNT_UPDATE
  274. # define MEM_FOUND_ADDR
  275. # endif
  276. /*
  277. * Restore unmarked small objects in h of size sz to the object
  278. * free list. Returns the new list.
  279. * Clears unmarked objects.
  280. */
  281. /*ARGSUSED*/
  282. ptr_t GC_reclaim_clear(hbp, hhdr, sz, list COUNT_PARAM)
  283. register struct hblk *hbp; /* ptr to current heap block */
  284. register hdr * hhdr;
  285. register ptr_t list;
  286. register word sz;
  287. COUNT_DECL
  288. {
  289. register int word_no;
  290. register word *p, *q, *plim;
  291. NWORDS_DECL
  292. GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
  293. p = (word *)(hbp->hb_body);
  294. word_no = 0;
  295. plim = (word *)((((word)hbp) + HBLKSIZE)
  296. - WORDS_TO_BYTES(sz));
  297. /* go through all words in block */
  298. while( p <= plim ) {
  299. if( mark_bit_from_hdr(hhdr, word_no) ) {
  300. p += sz;
  301. } else {
  302. INCR_WORDS(sz);
  303. /* object is available - put on list */
  304. obj_link(p) = list;
  305. list = ((ptr_t)p);
  306. /* Clear object, advance p to next object in the process */
  307. q = p + sz;
  308. # ifdef USE_MARK_BYTES
  309. GC_ASSERT(!(sz & 1)
  310. && !((word)p & (2 * sizeof(word) - 1)));
  311. p[1] = 0;
  312. p += 2;
  313. while (p < q) {
  314. CLEAR_DOUBLE(p);
  315. p += 2;
  316. }
  317. # else
  318. p++; /* Skip link field */
  319. while (p < q) {
  320. *p++ = 0;
  321. }
  322. # endif
  323. }
  324. word_no += sz;
  325. }
  326. COUNT_UPDATE
  327. return(list);
  328. }
  329. #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
  330. /*
  331. * A special case for 2 word composite objects (e.g. cons cells):
  332. */
  333. /*ARGSUSED*/
  334. ptr_t GC_reclaim_clear2(hbp, hhdr, list COUNT_PARAM)
  335. register struct hblk *hbp; /* ptr to current heap block */
  336. hdr * hhdr;
  337. register ptr_t list;
  338. COUNT_DECL
  339. {
  340. register word * mark_word_addr = &(hhdr->hb_marks[0]);
  341. register word *p, *plim;
  342. register word mark_word;
  343. register int i;
  344. NWORDS_DECL
  345. # define DO_OBJ(start_displ) \
  346. if (!(mark_word & ((word)1 << start_displ))) { \
  347. p[start_displ] = (word)list; \
  348. list = (ptr_t)(p+start_displ); \
  349. p[start_displ+1] = 0; \
  350. INCR_WORDS(2); \
  351. }
  352. p = (word *)(hbp->hb_body);
  353. plim = (word *)(((word)hbp) + HBLKSIZE);
  354. /* go through all words in block */
  355. while( p < plim ) {
  356. mark_word = *mark_word_addr++;
  357. for (i = 0; i < WORDSZ; i += 8) {
  358. DO_OBJ(0);
  359. DO_OBJ(2);
  360. DO_OBJ(4);
  361. DO_OBJ(6);
  362. p += 8;
  363. mark_word >>= 8;
  364. }
  365. }
  366. COUNT_UPDATE
  367. return(list);
  368. # undef DO_OBJ
  369. }
  370. /*
  371. * Another special case for 4 word composite objects:
  372. */
  373. /*ARGSUSED*/
  374. ptr_t GC_reclaim_clear4(hbp, hhdr, list COUNT_PARAM)
  375. register struct hblk *hbp; /* ptr to current heap block */
  376. hdr * hhdr;
  377. register ptr_t list;
  378. COUNT_DECL
  379. {
  380. register word * mark_word_addr = &(hhdr->hb_marks[0]);
  381. register word *p, *plim;
  382. register word mark_word;
  383. NWORDS_DECL
  384. # define DO_OBJ(start_displ) \
  385. if (!(mark_word & ((word)1 << start_displ))) { \
  386. p[start_displ] = (word)list; \
  387. list = (ptr_t)(p+start_displ); \
  388. p[start_displ+1] = 0; \
  389. CLEAR_DOUBLE(p + start_displ + 2); \
  390. INCR_WORDS(4); \
  391. }
  392. p = (word *)(hbp->hb_body);
  393. plim = (word *)(((word)hbp) + HBLKSIZE);
  394. /* go through all words in block */
  395. while( p < plim ) {
  396. mark_word = *mark_word_addr++;
  397. DO_OBJ(0);
  398. DO_OBJ(4);
  399. DO_OBJ(8);
  400. DO_OBJ(12);
  401. DO_OBJ(16);
  402. DO_OBJ(20);
  403. DO_OBJ(24);
  404. DO_OBJ(28);
  405. # if CPP_WORDSZ == 64
  406. DO_OBJ(32);
  407. DO_OBJ(36);
  408. DO_OBJ(40);
  409. DO_OBJ(44);
  410. DO_OBJ(48);
  411. DO_OBJ(52);
  412. DO_OBJ(56);
  413. DO_OBJ(60);
  414. # endif
  415. p += WORDSZ;
  416. }
  417. COUNT_UPDATE
  418. return(list);
  419. # undef DO_OBJ
  420. }
  421. #endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
  422. /* The same thing, but don't clear objects: */
  423. /*ARGSUSED*/
  424. ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list COUNT_PARAM)
  425. register struct hblk *hbp; /* ptr to current heap block */
  426. register hdr * hhdr;
  427. register ptr_t list;
  428. register word sz;
  429. COUNT_DECL
  430. {
  431. register int word_no = 0;
  432. register word *p, *plim;
  433. NWORDS_DECL
  434. p = (word *)(hbp->hb_body);
  435. plim = (word *)((((word)hbp) + HBLKSIZE)
  436. - WORDS_TO_BYTES(sz));
  437. /* go through all words in block */
  438. while( p <= plim ) {
  439. if( !mark_bit_from_hdr(hhdr, word_no) ) {
  440. INCR_WORDS(sz);
  441. /* object is available - put on list */
  442. obj_link(p) = list;
  443. list = ((ptr_t)p);
  444. }
  445. p += sz;
  446. word_no += sz;
  447. }
  448. COUNT_UPDATE
  449. return(list);
  450. }
  451. /* Don't really reclaim objects, just check for unmarked ones: */
  452. /*ARGSUSED*/
  453. void GC_reclaim_check(hbp, hhdr, sz)
  454. register struct hblk *hbp; /* ptr to current heap block */
  455. register hdr * hhdr;
  456. register word sz;
  457. {
  458. register int word_no = 0;
  459. register word *p, *plim;
  460. # ifdef GATHERSTATS
  461. register int n_words_found = 0;
  462. # endif
  463. p = (word *)(hbp->hb_body);
  464. plim = (word *)((((word)hbp) + HBLKSIZE)
  465. - WORDS_TO_BYTES(sz));
  466. /* go through all words in block */
  467. while( p <= plim ) {
  468. if( !mark_bit_from_hdr(hhdr, word_no) ) {
  469. FOUND_FREE(hbp, word_no);
  470. }
  471. p += sz;
  472. word_no += sz;
  473. }
  474. }
  475. #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
  476. /*
  477. * Another special case for 2 word atomic objects:
  478. */
  479. /*ARGSUSED*/
  480. ptr_t GC_reclaim_uninit2(hbp, hhdr, list COUNT_PARAM)
  481. register struct hblk *hbp; /* ptr to current heap block */
  482. hdr * hhdr;
  483. register ptr_t list;
  484. COUNT_DECL
  485. {
  486. register word * mark_word_addr = &(hhdr->hb_marks[0]);
  487. register word *p, *plim;
  488. register word mark_word;
  489. register int i;
  490. NWORDS_DECL
  491. # define DO_OBJ(start_displ) \
  492. if (!(mark_word & ((word)1 << start_displ))) { \
  493. p[start_displ] = (word)list; \
  494. list = (ptr_t)(p+start_displ); \
  495. INCR_WORDS(2); \
  496. }
  497. p = (word *)(hbp->hb_body);
  498. plim = (word *)(((word)hbp) + HBLKSIZE);
  499. /* go through all words in block */
  500. while( p < plim ) {
  501. mark_word = *mark_word_addr++;
  502. for (i = 0; i < WORDSZ; i += 8) {
  503. DO_OBJ(0);
  504. DO_OBJ(2);
  505. DO_OBJ(4);
  506. DO_OBJ(6);
  507. p += 8;
  508. mark_word >>= 8;
  509. }
  510. }
  511. COUNT_UPDATE
  512. return(list);
  513. # undef DO_OBJ
  514. }
  515. /*
  516. * Another special case for 4 word atomic objects:
  517. */
  518. /*ARGSUSED*/
  519. ptr_t GC_reclaim_uninit4(hbp, hhdr, list COUNT_PARAM)
  520. register struct hblk *hbp; /* ptr to current heap block */
  521. hdr * hhdr;
  522. register ptr_t list;
  523. COUNT_DECL
  524. {
  525. register word * mark_word_addr = &(hhdr->hb_marks[0]);
  526. register word *p, *plim;
  527. register word mark_word;
  528. NWORDS_DECL
  529. # define DO_OBJ(start_displ) \
  530. if (!(mark_word & ((word)1 << start_displ))) { \
  531. p[start_displ] = (word)list; \
  532. list = (ptr_t)(p+start_displ); \
  533. INCR_WORDS(4); \
  534. }
  535. p = (word *)(hbp->hb_body);
  536. plim = (word *)(((word)hbp) + HBLKSIZE);
  537. /* go through all words in block */
  538. while( p < plim ) {
  539. mark_word = *mark_word_addr++;
  540. DO_OBJ(0);
  541. DO_OBJ(4);
  542. DO_OBJ(8);
  543. DO_OBJ(12);
  544. DO_OBJ(16);
  545. DO_OBJ(20);
  546. DO_OBJ(24);
  547. DO_OBJ(28);
  548. # if CPP_WORDSZ == 64
  549. DO_OBJ(32);
  550. DO_OBJ(36);
  551. DO_OBJ(40);
  552. DO_OBJ(44);
  553. DO_OBJ(48);
  554. DO_OBJ(52);
  555. DO_OBJ(56);
  556. DO_OBJ(60);
  557. # endif
  558. p += WORDSZ;
  559. }
  560. COUNT_UPDATE
  561. return(list);
  562. # undef DO_OBJ
  563. }
  564. /* Finally the one word case, which never requires any clearing: */
  565. /*ARGSUSED*/
  566. ptr_t GC_reclaim1(hbp, hhdr, list COUNT_PARAM)
  567. register struct hblk *hbp; /* ptr to current heap block */
  568. hdr * hhdr;
  569. register ptr_t list;
  570. COUNT_DECL
  571. {
  572. register word * mark_word_addr = &(hhdr->hb_marks[0]);
  573. register word *p, *plim;
  574. register word mark_word;
  575. register int i;
  576. NWORDS_DECL
  577. # define DO_OBJ(start_displ) \
  578. if (!(mark_word & ((word)1 << start_displ))) { \
  579. p[start_displ] = (word)list; \
  580. list = (ptr_t)(p+start_displ); \
  581. INCR_WORDS(1); \
  582. }
  583. p = (word *)(hbp->hb_body);
  584. plim = (word *)(((word)hbp) + HBLKSIZE);
  585. /* go through all words in block */
  586. while( p < plim ) {
  587. mark_word = *mark_word_addr++;
  588. for (i = 0; i < WORDSZ; i += 4) {
  589. DO_OBJ(0);
  590. DO_OBJ(1);
  591. DO_OBJ(2);
  592. DO_OBJ(3);
  593. p += 4;
  594. mark_word >>= 4;
  595. }
  596. }
  597. COUNT_UPDATE
  598. return(list);
  599. # undef DO_OBJ
  600. }
  601. #endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
  602. /*
  603. * Generic procedure to rebuild a free list in hbp.
  604. * Also called directly from GC_malloc_many.
  605. */
  606. ptr_t GC_reclaim_generic(hbp, hhdr, sz, init, list COUNT_PARAM)
  607. struct hblk *hbp; /* ptr to current heap block */
  608. hdr * hhdr;
  609. GC_bool init;
  610. ptr_t list;
  611. word sz;
  612. COUNT_DECL
  613. {
  614. ptr_t result = list;
  615. GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
  616. GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
  617. if (init) {
  618. switch(sz) {
  619. # if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
  620. case 1:
  621. /* We now issue the hint even if GC_nearly_full returned */
  622. /* DONT_KNOW. */
  623. result = GC_reclaim1(hbp, hhdr, list COUNT_ARG);
  624. break;
  625. case 2:
  626. result = GC_reclaim_clear2(hbp, hhdr, list COUNT_ARG);
  627. break;
  628. case 4:
  629. result = GC_reclaim_clear4(hbp, hhdr, list COUNT_ARG);
  630. break;
  631. # endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
  632. default:
  633. result = GC_reclaim_clear(hbp, hhdr, sz, list COUNT_ARG);
  634. break;
  635. }
  636. } else {
  637. GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
  638. switch(sz) {
  639. # if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
  640. case 1:
  641. result = GC_reclaim1(hbp, hhdr, list COUNT_ARG);
  642. break;
  643. case 2:
  644. result = GC_reclaim_uninit2(hbp, hhdr, list COUNT_ARG);
  645. break;
  646. case 4:
  647. result = GC_reclaim_uninit4(hbp, hhdr, list COUNT_ARG);
  648. break;
  649. # endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
  650. default:
  651. result = GC_reclaim_uninit(hbp, hhdr, sz, list COUNT_ARG);
  652. break;
  653. }
  654. }
  655. if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
  656. return result;
  657. }
  658. /*
  659. * Restore unmarked small objects in the block pointed to by hbp
  660. * to the appropriate object free list.
  661. * If entirely empty blocks are to be completely deallocated, then
  662. * caller should perform that check.
  663. */
  664. void GC_reclaim_small_nonempty_block(hbp, report_if_found COUNT_PARAM)
  665. register struct hblk *hbp; /* ptr to current heap block */
  666. int report_if_found; /* Abort if a reclaimable object is found */
  667. COUNT_DECL
  668. {
  669. hdr *hhdr = HDR(hbp);
  670. word sz = hhdr -> hb_sz;
  671. int kind = hhdr -> hb_obj_kind;
  672. struct obj_kind * ok = &GC_obj_kinds[kind];
  673. ptr_t * flh = &(ok -> ok_freelist[sz]);
  674. hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
  675. if (report_if_found) {
  676. GC_reclaim_check(hbp, hhdr, sz);
  677. } else {
  678. *flh = GC_reclaim_generic(hbp, hhdr, sz,
  679. (ok -> ok_init || GC_debugging_started),
  680. *flh MEM_FOUND_ADDR);
  681. }
  682. }
  683. /*
  684. * Restore an unmarked large object or an entirely empty blocks of small objects
  685. * to the heap block free list.
  686. * Otherwise enqueue the block for later processing
  687. * by GC_reclaim_small_nonempty_block.
  688. * If report_if_found is TRUE, then process any block immediately, and
  689. * simply report free objects; do not actually reclaim them.
  690. */
  691. # if defined(__STDC__) || defined(__cplusplus)
  692. void GC_reclaim_block(register struct hblk *hbp, word report_if_found)
  693. # else
  694. void GC_reclaim_block(hbp, report_if_found)
  695. register struct hblk *hbp; /* ptr to current heap block */
  696. word report_if_found; /* Abort if a reclaimable object is found */
  697. # endif
  698. {
  699. register hdr * hhdr;
  700. register word sz; /* size of objects in current block */
  701. register struct obj_kind * ok;
  702. struct hblk ** rlh;
  703. hhdr = HDR(hbp);
  704. sz = hhdr -> hb_sz;
  705. ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
  706. if( sz > MAXOBJSZ ) { /* 1 big object */
  707. if( !mark_bit_from_hdr(hhdr, 0) ) {
  708. if (report_if_found) {
  709. FOUND_FREE(hbp, 0);
  710. } else {
  711. word blocks = OBJ_SZ_TO_BLOCKS(sz);
  712. if (blocks > 1) {
  713. GC_large_allocd_bytes -= blocks * HBLKSIZE;
  714. }
  715. # ifdef GATHERSTATS
  716. GC_mem_found += sz;
  717. # endif
  718. GC_freehblk(hbp);
  719. }
  720. }
  721. } else {
  722. GC_bool empty = GC_block_empty(hhdr);
  723. if (report_if_found) {
  724. GC_reclaim_small_nonempty_block(hbp, (int)report_if_found
  725. MEM_FOUND_ADDR);
  726. } else if (empty) {
  727. # ifdef GATHERSTATS
  728. GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
  729. # endif
  730. GC_freehblk(hbp);
  731. } else if (TRUE != GC_block_nearly_full(hhdr)){
  732. /* group of smaller objects, enqueue the real work */
  733. rlh = &(ok -> ok_reclaim_list[sz]);
  734. hhdr -> hb_next = *rlh;
  735. *rlh = hbp;
  736. } /* else not worth salvaging. */
  737. /* We used to do the nearly_full check later, but we */
  738. /* already have the right cache context here. Also */
  739. /* doing it here avoids some silly lock contention in */
  740. /* GC_malloc_many. */
  741. }
  742. }
  743. #if !defined(NO_DEBUGGING)
  744. /* Routines to gather and print heap block info */
  745. /* intended for debugging. Otherwise should be called */
  746. /* with lock. */
  747. struct Print_stats
  748. {
  749. size_t number_of_blocks;
  750. size_t total_bytes;
  751. };
  752. #ifdef USE_MARK_BYTES
  753. /* Return the number of set mark bits in the given header */
  754. int GC_n_set_marks(hhdr)
  755. hdr * hhdr;
  756. {
  757. register int result = 0;
  758. register int i;
  759. for (i = 0; i < MARK_BITS_SZ; i++) {
  760. result += hhdr -> hb_marks[i];
  761. }
  762. return(result);
  763. }
  764. #else
  765. /* Number of set bits in a word. Not performance critical. */
  766. static int set_bits(n)
  767. word n;
  768. {
  769. register word m = n;
  770. register int result = 0;
  771. while (m > 0) {
  772. if (m & 1) result++;
  773. m >>= 1;
  774. }
  775. return(result);
  776. }
  777. /* Return the number of set mark bits in the given header */
  778. int GC_n_set_marks(hhdr)
  779. hdr * hhdr;
  780. {
  781. register int result = 0;
  782. register int i;
  783. for (i = 0; i < MARK_BITS_SZ; i++) {
  784. result += set_bits(hhdr -> hb_marks[i]);
  785. }
  786. return(result);
  787. }
  788. #endif /* !USE_MARK_BYTES */
  789. /*ARGSUSED*/
  790. # if defined(__STDC__) || defined(__cplusplus)
  791. void GC_print_block_descr(struct hblk *h, word dummy)
  792. # else
  793. void GC_print_block_descr(h, dummy)
  794. struct hblk *h;
  795. word dummy;
  796. # endif
  797. {
  798. register hdr * hhdr = HDR(h);
  799. register size_t bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
  800. struct Print_stats *ps;
  801. GC_printf3("(%lu:%lu,%lu)", (unsigned long)(hhdr -> hb_obj_kind),
  802. (unsigned long)bytes,
  803. (unsigned long)(GC_n_set_marks(hhdr)));
  804. bytes += HBLKSIZE-1;
  805. bytes &= ~(HBLKSIZE-1);
  806. ps = (struct Print_stats *)dummy;
  807. ps->total_bytes += bytes;
  808. ps->number_of_blocks++;
  809. }
  810. void GC_print_block_list()
  811. {
  812. struct Print_stats pstats;
  813. GC_printf1("(kind(0=ptrfree,1=normal,2=unc.,%lu=stubborn):size_in_bytes, #_marks_set)\n", STUBBORN);
  814. pstats.number_of_blocks = 0;
  815. pstats.total_bytes = 0;
  816. GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
  817. GC_printf2("\nblocks = %lu, bytes = %lu\n",
  818. (unsigned long)pstats.number_of_blocks,
  819. (unsigned long)pstats.total_bytes);
  820. }
  821. #endif /* NO_DEBUGGING */
  822. /*
  823. * Clear all obj_link pointers in the list of free objects *flp.
  824. * Clear *flp.
  825. * This must be done before dropping a list of free gcj-style objects,
  826. * since may otherwise end up with dangling "descriptor" pointers.
  827. * It may help for other pointer-containing objects.
  828. */
  829. void GC_clear_fl_links(flp)
  830. ptr_t *flp;
  831. {
  832. ptr_t next = *flp;
  833. while (0 != next) {
  834. *flp = 0;
  835. flp = &(obj_link(next));
  836. next = *flp;
  837. }
  838. }
  839. /*
  840. * Perform GC_reclaim_block on the entire heap, after first clearing
  841. * small object free lists (if we are not just looking for leaks).
  842. */
  843. void GC_start_reclaim(report_if_found)
  844. int report_if_found; /* Abort if a GC_reclaimable object is found */
  845. {
  846. int kind;
  847. # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
  848. GC_ASSERT(0 == GC_fl_builder_count);
  849. # endif
  850. /* Clear reclaim- and free-lists */
  851. for (kind = 0; kind < GC_n_kinds; kind++) {
  852. ptr_t *fop;
  853. ptr_t *lim;
  854. struct hblk ** rlp;
  855. struct hblk ** rlim;
  856. struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
  857. GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
  858. if (rlist == 0) continue; /* This kind not used. */
  859. if (!report_if_found) {
  860. lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
  861. for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
  862. if (*fop != 0) {
  863. if (should_clobber) {
  864. GC_clear_fl_links(fop);
  865. } else {
  866. *fop = 0;
  867. }
  868. }
  869. }
  870. } /* otherwise free list objects are marked, */
  871. /* and its safe to leave them */
  872. rlim = rlist + MAXOBJSZ+1;
  873. for( rlp = rlist; rlp < rlim; rlp++ ) {
  874. *rlp = 0;
  875. }
  876. }
  877. # ifdef PRINTBLOCKS
  878. GC_printf0("GC_reclaim: current block sizes:\n");
  879. GC_print_block_list();
  880. # endif
  881. /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
  882. /* or enqueue the block for later processing. */
  883. GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
  884. # ifdef EAGER_SWEEP
  885. /* This is a very stupid thing to do. We make it possible anyway, */
  886. /* so that you can convince yourself that it really is very stupid. */
  887. GC_reclaim_all((GC_stop_func)0, FALSE);
  888. # endif
  889. # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
  890. GC_ASSERT(0 == GC_fl_builder_count);
  891. # endif
  892. }
  893. /*
  894. * Sweep blocks of the indicated object size and kind until either the
  895. * appropriate free list is nonempty, or there are no more blocks to
  896. * sweep.
  897. */
  898. void GC_continue_reclaim(sz, kind)
  899. word sz; /* words */
  900. int kind;
  901. {
  902. register hdr * hhdr;
  903. register struct hblk * hbp;
  904. register struct obj_kind * ok = &(GC_obj_kinds[kind]);
  905. struct hblk ** rlh = ok -> ok_reclaim_list;
  906. ptr_t *flh = &(ok -> ok_freelist[sz]);
  907. if (rlh == 0) return; /* No blocks of this kind. */
  908. rlh += sz;
  909. while ((hbp = *rlh) != 0) {
  910. hhdr = HDR(hbp);
  911. *rlh = hhdr -> hb_next;
  912. GC_reclaim_small_nonempty_block(hbp, FALSE MEM_FOUND_ADDR);
  913. if (*flh != 0) break;
  914. }
  915. }
  916. /*
  917. * Reclaim all small blocks waiting to be reclaimed.
  918. * Abort and return FALSE when/if (*stop_func)() returns TRUE.
  919. * If this returns TRUE, then it's safe to restart the world
  920. * with incorrectly cleared mark bits.
  921. * If ignore_old is TRUE, then reclaim only blocks that have been
  922. * recently reclaimed, and discard the rest.
  923. * Stop_func may be 0.
  924. */
  925. GC_bool GC_reclaim_all(stop_func, ignore_old)
  926. GC_stop_func stop_func;
  927. GC_bool ignore_old;
  928. {
  929. register word sz;
  930. register int kind;
  931. register hdr * hhdr;
  932. register struct hblk * hbp;
  933. register struct obj_kind * ok;
  934. struct hblk ** rlp;
  935. struct hblk ** rlh;
  936. # ifdef PRINTTIMES
  937. CLOCK_TYPE start_time;
  938. CLOCK_TYPE done_time;
  939. GET_TIME(start_time);
  940. # endif
  941. for (kind = 0; kind < GC_n_kinds; kind++) {
  942. ok = &(GC_obj_kinds[kind]);
  943. rlp = ok -> ok_reclaim_list;
  944. if (rlp == 0) continue;
  945. for (sz = 1; sz <= MAXOBJSZ; sz++) {
  946. rlh = rlp + sz;
  947. while ((hbp = *rlh) != 0) {
  948. if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
  949. return(FALSE);
  950. }
  951. hhdr = HDR(hbp);
  952. *rlh = hhdr -> hb_next;
  953. if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
  954. /* It's likely we'll need it this time, too */
  955. /* It's been touched recently, so this */
  956. /* shouldn't trigger paging. */
  957. GC_reclaim_small_nonempty_block(hbp, FALSE MEM_FOUND_ADDR);
  958. }
  959. }
  960. }
  961. }
  962. # ifdef PRINTTIMES
  963. GET_TIME(done_time);
  964. GC_printf1("Disposing of reclaim lists took %lu msecs\n",
  965. MS_TIME_DIFF(done_time,start_time));
  966. # endif
  967. return(TRUE);
  968. }