PageRenderTime 85ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 1ms

/vendor/gc/mark.c

http://github.com/feyeleanor/RubyGoLightly
C | 1867 lines | 1349 code | 171 blank | 347 comment | 316 complexity | 6685c52592117f6d166c04bfe92d8c4f MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-2.0
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. *
  15. */
  16. # include <stdio.h>
  17. # include "private/gc_pmark.h"
  18. #if defined(MSWIN32) && defined(__GNUC__)
  19. # include <excpt.h>
  20. #endif
  21. /* We put this here to minimize the risk of inlining. */
  22. /*VARARGS*/
  23. #ifdef __WATCOMC__
  24. void GC_noop(void *p, ...) {}
  25. #else
  26. void GC_noop() {}
  27. #endif
  28. /* Single argument version, robust against whole program analysis. */
  29. void GC_noop1(word x)
  30. {
  31. static volatile word sink;
  32. sink = x;
  33. }
  34. /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
  35. unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
  36. /* Initialize GC_obj_kinds properly and standard free lists properly. */
  37. /* This must be done statically since they may be accessed before */
  38. /* GC_init is called. */
  39. /* It's done here, since we need to deal with mark descriptors. */
  40. struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
  41. /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
  42. 0 | GC_DS_LENGTH, FALSE, FALSE },
  43. /* NORMAL */ { &GC_objfreelist[0], 0,
  44. 0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */
  45. TRUE /* add length to descr */, TRUE },
  46. /* UNCOLLECTABLE */
  47. { &GC_uobjfreelist[0], 0,
  48. 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
  49. # ifdef ATOMIC_UNCOLLECTABLE
  50. /* AUNCOLLECTABLE */
  51. { &GC_auobjfreelist[0], 0,
  52. 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
  53. # endif
  54. # ifdef STUBBORN_ALLOC
  55. /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
  56. 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
  57. # endif
  58. };
  59. # ifdef ATOMIC_UNCOLLECTABLE
  60. # ifdef STUBBORN_ALLOC
  61. unsigned GC_n_kinds = 5;
  62. # else
  63. unsigned GC_n_kinds = 4;
  64. # endif
  65. # else
  66. # ifdef STUBBORN_ALLOC
  67. unsigned GC_n_kinds = 4;
  68. # else
  69. unsigned GC_n_kinds = 3;
  70. # endif
  71. # endif
  72. # ifndef INITIAL_MARK_STACK_SIZE
  73. # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
  74. /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
  75. /* multiple of HBLKSIZE. */
  76. /* The incremental collector actually likes a larger */
  77. /* size, since it want to push all marked dirty objs */
  78. /* before marking anything new. Currently we let it */
  79. /* grow dynamically. */
  80. # endif
  81. /*
  82. * Limits of stack for GC_mark routine.
  83. * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
  84. * need to be marked from.
  85. */
  86. word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
  87. /* excludes ptrfree pages, etc. */
  88. mse * GC_mark_stack;
  89. mse * GC_mark_stack_limit;
  90. size_t GC_mark_stack_size = 0;
  91. #ifdef PARALLEL_MARK
  92. # include "atomic_ops.h"
  93. mse * volatile GC_mark_stack_top;
  94. /* Updated only with mark lock held, but read asynchronously. */
  95. volatile AO_t GC_first_nonempty;
  96. /* Lowest entry on mark stack */
  97. /* that may be nonempty. */
  98. /* Updated only by initiating */
  99. /* thread. */
  100. #else
  101. mse * GC_mark_stack_top;
  102. #endif
  103. static struct hblk * scan_ptr;
  104. mark_state_t GC_mark_state = MS_NONE;
  105. GC_bool GC_mark_stack_too_small = FALSE;
  106. GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
  107. /* objects in the heap? */
  108. /* Is a collection in progress? Note that this can return true in the */
  109. /* nonincremental case, if a collection has been abandoned and the */
  110. /* mark state is now MS_INVALID. */
  111. GC_bool GC_collection_in_progress(void)
  112. {
  113. return(GC_mark_state != MS_NONE);
  114. }
  115. /* clear all mark bits in the header */
  116. void GC_clear_hdr_marks(hdr *hhdr)
  117. {
  118. size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
  119. # ifdef USE_MARK_BYTES
  120. BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
  121. hhdr -> hb_marks[last_bit] = 1;
  122. # else
  123. BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
  124. set_mark_bit_from_hdr(hhdr, last_bit);
  125. # endif
  126. hhdr -> hb_n_marks = 0;
  127. }
  128. /* Set all mark bits in the header. Used for uncollectable blocks. */
  129. void GC_set_hdr_marks(hdr *hhdr)
  130. {
  131. unsigned i;
  132. size_t sz = hhdr -> hb_sz;
  133. size_t n_marks = FINAL_MARK_BIT(sz);
  134. # ifdef USE_MARK_BYTES
  135. for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) {
  136. hhdr -> hb_marks[i] = 1;
  137. }
  138. # else
  139. for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
  140. hhdr -> hb_marks[i] = ONES;
  141. }
  142. # endif
  143. # ifdef MARK_BIT_PER_OBJ
  144. hhdr -> hb_n_marks = n_marks - 1;
  145. # else
  146. hhdr -> hb_n_marks = HBLK_OBJS(sz);
  147. # endif
  148. }
  149. /*
  150. * Clear all mark bits associated with block h.
  151. */
  152. /*ARGSUSED*/
  153. static void clear_marks_for_block(struct hblk *h, word dummy)
  154. {
  155. register hdr * hhdr = HDR(h);
  156. if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
  157. /* Mark bit for these is cleared only once the object is */
  158. /* explicitly deallocated. This either frees the block, or */
  159. /* the bit is cleared once the object is on the free list. */
  160. GC_clear_hdr_marks(hhdr);
  161. }
  162. /* Slow but general routines for setting/clearing/asking about mark bits */
  163. void GC_set_mark_bit(ptr_t p)
  164. {
  165. struct hblk *h = HBLKPTR(p);
  166. hdr * hhdr = HDR(h);
  167. word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
  168. if (!mark_bit_from_hdr(hhdr, bit_no)) {
  169. set_mark_bit_from_hdr(hhdr, bit_no);
  170. ++hhdr -> hb_n_marks;
  171. }
  172. }
  173. void GC_clear_mark_bit(ptr_t p)
  174. {
  175. struct hblk *h = HBLKPTR(p);
  176. hdr * hhdr = HDR(h);
  177. word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
  178. if (mark_bit_from_hdr(hhdr, bit_no)) {
  179. size_t n_marks;
  180. clear_mark_bit_from_hdr(hhdr, bit_no);
  181. n_marks = hhdr -> hb_n_marks - 1;
  182. # ifdef PARALLEL_MARK
  183. if (n_marks != 0)
  184. hhdr -> hb_n_marks = n_marks;
  185. /* Don't decrement to zero. The counts are approximate due to */
  186. /* concurrency issues, but we need to ensure that a count of */
  187. /* zero implies an empty block. */
  188. # else
  189. hhdr -> hb_n_marks = n_marks;
  190. # endif
  191. }
  192. }
  193. GC_bool GC_is_marked(ptr_t p)
  194. {
  195. struct hblk *h = HBLKPTR(p);
  196. hdr * hhdr = HDR(h);
  197. word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
  198. return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
  199. }
  200. /*
  201. * Clear mark bits in all allocated heap blocks. This invalidates
  202. * the marker invariant, and sets GC_mark_state to reflect this.
  203. * (This implicitly starts marking to reestablish the invariant.)
  204. */
  205. void GC_clear_marks(void)
  206. {
  207. GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
  208. GC_objects_are_marked = FALSE;
  209. GC_mark_state = MS_INVALID;
  210. scan_ptr = 0;
  211. }
  212. /* Initiate a garbage collection. Initiates a full collection if the */
  213. /* mark state is invalid. */
  214. /*ARGSUSED*/
  215. void GC_initiate_gc(void)
  216. {
  217. if (GC_dirty_maintained) GC_read_dirty();
  218. # ifdef STUBBORN_ALLOC
  219. GC_read_changed();
  220. # endif
  221. # ifdef CHECKSUMS
  222. {
  223. extern void GC_check_dirty();
  224. if (GC_dirty_maintained) GC_check_dirty();
  225. }
  226. # endif
  227. GC_n_rescuing_pages = 0;
  228. if (GC_mark_state == MS_NONE) {
  229. GC_mark_state = MS_PUSH_RESCUERS;
  230. } else if (GC_mark_state != MS_INVALID) {
  231. ABORT("unexpected state");
  232. } /* else this is really a full collection, and mark */
  233. /* bits are invalid. */
  234. scan_ptr = 0;
  235. }
  236. static void alloc_mark_stack(size_t);
  237. # if defined(MSWIN32) || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
  238. /* Under rare conditions, we may end up marking from nonexistent memory. */
  239. /* Hence we need to be prepared to recover by running GC_mark_some */
  240. /* with a suitable handler in place. */
  241. # define WRAP_MARK_SOME
  242. # endif
  243. /* Perform a small amount of marking. */
  244. /* We try to touch roughly a page of memory. */
  245. /* Return TRUE if we just finished a mark phase. */
  246. /* Cold_gc_frame is an address inside a GC frame that */
  247. /* remains valid until all marking is complete. */
  248. /* A zero value indicates that it's OK to miss some */
  249. /* register values. */
  250. /* We hold the allocation lock. In the case of */
  251. /* incremental collection, the world may not be stopped.*/
  252. #ifdef WRAP_MARK_SOME
  253. /* For win32, this is called after we establish a structured */
  254. /* exception handler, in case Windows unmaps one of our root */
  255. /* segments. See below. In either case, we acquire the */
  256. /* allocator lock long before we get here. */
  257. GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
  258. #else
  259. GC_bool GC_mark_some(ptr_t cold_gc_frame)
  260. #endif
  261. {
  262. switch(GC_mark_state) {
  263. case MS_NONE:
  264. return(FALSE);
  265. case MS_PUSH_RESCUERS:
  266. if (GC_mark_stack_top
  267. >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
  268. /* Go ahead and mark, even though that might cause us to */
  269. /* see more marked dirty objects later on. Avoid this */
  270. /* in the future. */
  271. GC_mark_stack_too_small = TRUE;
  272. MARK_FROM_MARK_STACK();
  273. return(FALSE);
  274. } else {
  275. scan_ptr = GC_push_next_marked_dirty(scan_ptr);
  276. if (scan_ptr == 0) {
  277. if (GC_print_stats) {
  278. GC_log_printf("Marked from %u dirty pages\n",
  279. GC_n_rescuing_pages);
  280. }
  281. GC_push_roots(FALSE, cold_gc_frame);
  282. GC_objects_are_marked = TRUE;
  283. if (GC_mark_state != MS_INVALID) {
  284. GC_mark_state = MS_ROOTS_PUSHED;
  285. }
  286. }
  287. }
  288. return(FALSE);
  289. case MS_PUSH_UNCOLLECTABLE:
  290. if (GC_mark_stack_top
  291. >= GC_mark_stack + GC_mark_stack_size/4) {
  292. # ifdef PARALLEL_MARK
  293. /* Avoid this, since we don't parallelize the marker */
  294. /* here. */
  295. if (GC_parallel) GC_mark_stack_too_small = TRUE;
  296. # endif
  297. MARK_FROM_MARK_STACK();
  298. return(FALSE);
  299. } else {
  300. scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
  301. if (scan_ptr == 0) {
  302. GC_push_roots(TRUE, cold_gc_frame);
  303. GC_objects_are_marked = TRUE;
  304. if (GC_mark_state != MS_INVALID) {
  305. GC_mark_state = MS_ROOTS_PUSHED;
  306. }
  307. }
  308. }
  309. return(FALSE);
  310. case MS_ROOTS_PUSHED:
  311. # ifdef PARALLEL_MARK
  312. /* In the incremental GC case, this currently doesn't */
  313. /* quite do the right thing, since it runs to */
  314. /* completion. On the other hand, starting a */
  315. /* parallel marker is expensive, so perhaps it is */
  316. /* the right thing? */
  317. /* Eventually, incremental marking should run */
  318. /* asynchronously in multiple threads, without grabbing */
  319. /* the allocation lock. */
  320. if (GC_parallel) {
  321. GC_do_parallel_mark();
  322. GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
  323. GC_mark_stack_top = GC_mark_stack - 1;
  324. if (GC_mark_stack_too_small) {
  325. alloc_mark_stack(2*GC_mark_stack_size);
  326. }
  327. if (GC_mark_state == MS_ROOTS_PUSHED) {
  328. GC_mark_state = MS_NONE;
  329. return(TRUE);
  330. } else {
  331. return(FALSE);
  332. }
  333. }
  334. # endif
  335. if (GC_mark_stack_top >= GC_mark_stack) {
  336. MARK_FROM_MARK_STACK();
  337. return(FALSE);
  338. } else {
  339. GC_mark_state = MS_NONE;
  340. if (GC_mark_stack_too_small) {
  341. alloc_mark_stack(2*GC_mark_stack_size);
  342. }
  343. return(TRUE);
  344. }
  345. case MS_INVALID:
  346. case MS_PARTIALLY_INVALID:
  347. if (!GC_objects_are_marked) {
  348. GC_mark_state = MS_PUSH_UNCOLLECTABLE;
  349. return(FALSE);
  350. }
  351. if (GC_mark_stack_top >= GC_mark_stack) {
  352. MARK_FROM_MARK_STACK();
  353. return(FALSE);
  354. }
  355. if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
  356. /* About to start a heap scan for marked objects. */
  357. /* Mark stack is empty. OK to reallocate. */
  358. if (GC_mark_stack_too_small) {
  359. alloc_mark_stack(2*GC_mark_stack_size);
  360. }
  361. GC_mark_state = MS_PARTIALLY_INVALID;
  362. }
  363. scan_ptr = GC_push_next_marked(scan_ptr);
  364. if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
  365. GC_push_roots(TRUE, cold_gc_frame);
  366. GC_objects_are_marked = TRUE;
  367. if (GC_mark_state != MS_INVALID) {
  368. GC_mark_state = MS_ROOTS_PUSHED;
  369. }
  370. }
  371. return(FALSE);
  372. default:
  373. ABORT("GC_mark_some: bad state");
  374. return(FALSE);
  375. }
  376. }
  377. #if defined(MSWIN32) && defined(__GNUC__)
  378. typedef struct {
  379. EXCEPTION_REGISTRATION ex_reg;
  380. void *alt_path;
  381. } ext_ex_regn;
  382. static EXCEPTION_DISPOSITION mark_ex_handler(
  383. struct _EXCEPTION_RECORD *ex_rec,
  384. void *est_frame,
  385. struct _CONTEXT *context,
  386. void *disp_ctxt)
  387. {
  388. if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
  389. ext_ex_regn *xer = (ext_ex_regn *)est_frame;
  390. /* Unwind from the inner function assuming the standard */
  391. /* function prologue. */
  392. /* Assumes code has not been compiled with */
  393. /* -fomit-frame-pointer. */
  394. context->Esp = context->Ebp;
  395. context->Ebp = *((DWORD *)context->Esp);
  396. context->Esp = context->Esp - 8;
  397. /* Resume execution at the "real" handler within the */
  398. /* wrapper function. */
  399. context->Eip = (DWORD )(xer->alt_path);
  400. return ExceptionContinueExecution;
  401. } else {
  402. return ExceptionContinueSearch;
  403. }
  404. }
  405. # endif /* __GNUC__ && MSWIN32 */
  406. #ifdef GC_WIN32_THREADS
  407. extern GC_bool GC_started_thread_while_stopped(void);
  408. /* In win32_threads.c. Did we invalidate mark phase with an */
  409. /* unexpected thread start? */
  410. #endif
  411. # ifdef WRAP_MARK_SOME
  412. GC_bool GC_mark_some(ptr_t cold_gc_frame)
  413. {
  414. GC_bool ret_val;
  415. # ifdef MSWIN32
  416. # ifndef __GNUC__
  417. /* Windows 98 appears to asynchronously create and remove */
  418. /* writable memory mappings, for reasons we haven't yet */
  419. /* understood. Since we look for writable regions to */
  420. /* determine the root set, we may try to mark from an */
  421. /* address range that disappeared since we started the */
  422. /* collection. Thus we have to recover from faults here. */
  423. /* This code does not appear to be necessary for Windows */
  424. /* 95/NT/2000. Note that this code should never generate */
  425. /* an incremental GC write fault. */
  426. /* It's conceivable that this is the same issue with */
  427. /* terminating threads that we see with Linux and */
  428. /* USE_PROC_FOR_LIBRARIES. */
  429. __try {
  430. ret_val = GC_mark_some_inner(cold_gc_frame);
  431. } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
  432. EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
  433. goto handle_ex;
  434. }
  435. # ifdef GC_WIN32_THREADS
  436. /* With DllMain-based thread tracking, a thread may have */
  437. /* started while we were marking. This is logically equivalent */
  438. /* to the exception case; our results are invalid and we have */
  439. /* to start over. This cannot be prevented since we can't */
  440. /* block in DllMain. */
  441. if (GC_started_thread_while_stopped()) goto handle_ex;
  442. # endif
  443. rm_handler:
  444. return ret_val;
  445. # else /* __GNUC__ */
  446. /* Manually install an exception handler since GCC does */
  447. /* not yet support Structured Exception Handling (SEH) on */
  448. /* Win32. */
  449. ext_ex_regn er;
  450. er.alt_path = &&handle_ex;
  451. er.ex_reg.handler = mark_ex_handler;
  452. asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
  453. asm volatile ("movl %0, %%fs:0" : : "r" (&er));
  454. ret_val = GC_mark_some_inner(cold_gc_frame);
  455. /* Prevent GCC from considering the following code unreachable */
  456. /* and thus eliminating it. */
  457. if (er.alt_path == 0)
  458. goto handle_ex;
  459. rm_handler:
  460. /* Uninstall the exception handler */
  461. asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
  462. return ret_val;
  463. # endif /* __GNUC__ */
  464. # else /* !MSWIN32 */
  465. /* Here we are handling the case in which /proc is used for root */
  466. /* finding, and we have threads. We may find a stack for a */
  467. /* thread that is in the process of exiting, and disappears */
  468. /* while we are marking it. This seems extremely difficult to */
  469. /* avoid otherwise. */
  470. if (GC_incremental)
  471. WARN("Incremental GC incompatible with /proc roots\n", 0);
  472. /* I'm not sure if this could still work ... */
  473. GC_setup_temporary_fault_handler();
  474. if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
  475. ret_val = GC_mark_some_inner(cold_gc_frame);
  476. rm_handler:
  477. GC_reset_fault_handler();
  478. return ret_val;
  479. # endif /* !MSWIN32 */
  480. handle_ex:
  481. /* Exception handler starts here for all cases. */
  482. if (GC_print_stats) {
  483. GC_log_printf("Caught ACCESS_VIOLATION in marker. "
  484. "Memory mapping disappeared.\n");
  485. }
  486. /* We have bad roots on the stack. Discard mark stack. */
  487. /* Rescan from marked objects. Redetermine roots. */
  488. GC_invalidate_mark_state();
  489. scan_ptr = 0;
  490. ret_val = FALSE;
  491. goto rm_handler; // Back to platform-specific code.
  492. }
  493. #endif /* WRAP_MARK_SOME */
  494. GC_bool GC_mark_stack_empty(void)
  495. {
  496. return(GC_mark_stack_top < GC_mark_stack);
  497. }
  498. void GC_invalidate_mark_state(void)
  499. {
  500. GC_mark_state = MS_INVALID;
  501. GC_mark_stack_top = GC_mark_stack-1;
  502. }
  503. mse * GC_signal_mark_stack_overflow(mse *msp)
  504. {
  505. GC_mark_state = MS_INVALID;
  506. GC_mark_stack_too_small = TRUE;
  507. if (GC_print_stats) {
  508. GC_log_printf("Mark stack overflow; current size = %lu entries\n",
  509. GC_mark_stack_size);
  510. }
  511. return(msp - GC_MARK_STACK_DISCARDS);
  512. }
  513. /*
  514. * Mark objects pointed to by the regions described by
  515. * mark stack entries between mark_stack and mark_stack_top,
  516. * inclusive. Assumes the upper limit of a mark stack entry
  517. * is never 0. A mark stack entry never has size 0.
  518. * We try to traverse on the order of a hblk of memory before we return.
  519. * Caller is responsible for calling this until the mark stack is empty.
  520. * Note that this is the most performance critical routine in the
  521. * collector. Hence it contains all sorts of ugly hacks to speed
  522. * things up. In particular, we avoid procedure calls on the common
  523. * path, we take advantage of peculiarities of the mark descriptor
  524. * encoding, we optionally maintain a cache for the block address to
  525. * header mapping, we prefetch when an object is "grayed", etc.
  526. */
  527. mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
  528. {
  529. signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
  530. ptr_t current_p; /* Pointer to current candidate ptr. */
  531. word current; /* Candidate pointer. */
  532. ptr_t limit; /* (Incl) limit of current candidate */
  533. /* range */
  534. word descr;
  535. ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
  536. ptr_t least_ha = GC_least_plausible_heap_addr;
  537. DECLARE_HDR_CACHE;
  538. # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
  539. GC_objects_are_marked = TRUE;
  540. INIT_HDR_CACHE;
  541. # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
  542. while (mark_stack_top >= mark_stack && credit >= 0) {
  543. # else
  544. while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
  545. >= 0) {
  546. # endif
  547. current_p = mark_stack_top -> mse_start;
  548. descr = mark_stack_top -> mse_descr;
  549. retry:
  550. /* current_p and descr describe the current object. */
  551. /* *mark_stack_top is vacant. */
  552. /* The following is 0 only for small objects described by a simple */
  553. /* length descriptor. For many applications this is the common */
  554. /* case, so we try to detect it quickly. */
  555. if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
  556. word tag = descr & GC_DS_TAGS;
  557. switch(tag) {
  558. case GC_DS_LENGTH:
  559. /* Large length. */
  560. /* Process part of the range to avoid pushing too much on the */
  561. /* stack. */
  562. GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
  563. - (word)GC_least_plausible_heap_addr);
  564. # ifdef ENABLE_TRACE
  565. if (GC_trace_addr >= current_p
  566. && GC_trace_addr < current_p + descr) {
  567. GC_log_printf("GC:%d Large section; start %p len %lu\n",
  568. GC_gc_no, current_p, (unsigned long) descr);
  569. }
  570. # endif /* ENABLE_TRACE */
  571. # ifdef PARALLEL_MARK
  572. # define SHARE_BYTES 2048
  573. if (descr > SHARE_BYTES && GC_parallel
  574. && mark_stack_top < mark_stack_limit - 1) {
  575. int new_size = (descr/2) & ~(sizeof(word)-1);
  576. mark_stack_top -> mse_start = current_p;
  577. mark_stack_top -> mse_descr = new_size + sizeof(word);
  578. /* makes sure we handle */
  579. /* misaligned pointers. */
  580. mark_stack_top++;
  581. # ifdef ENABLE_TRACE
  582. if (GC_trace_addr >= current_p
  583. && GC_trace_addr < current_p + descr) {
  584. GC_log_printf("GC:%d splitting (parallel) %p at %p\n",
  585. GC_gc_no, current_p, current_p + new_size);
  586. }
  587. # endif /* ENABLE_TRACE */
  588. current_p += new_size;
  589. descr -= new_size;
  590. goto retry;
  591. }
  592. # endif /* PARALLEL_MARK */
  593. mark_stack_top -> mse_start =
  594. limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
  595. mark_stack_top -> mse_descr =
  596. descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
  597. # ifdef ENABLE_TRACE
  598. if (GC_trace_addr >= current_p
  599. && GC_trace_addr < current_p + descr) {
  600. GC_log_printf("GC:%d splitting %p at %p\n",
  601. GC_gc_no, current_p, limit);
  602. }
  603. # endif /* ENABLE_TRACE */
  604. /* Make sure that pointers overlapping the two ranges are */
  605. /* considered. */
  606. limit += sizeof(word) - ALIGNMENT;
  607. break;
  608. case GC_DS_BITMAP:
  609. mark_stack_top--;
  610. # ifdef ENABLE_TRACE
  611. if (GC_trace_addr >= current_p
  612. && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
  613. GC_log_printf("GC:%d Tracing from %p bitmap descr %lu\n",
  614. GC_gc_no, current_p, (unsigned long) descr);
  615. }
  616. # endif /* ENABLE_TRACE */
  617. descr &= ~GC_DS_TAGS;
  618. credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
  619. while (descr != 0) {
  620. if ((signed_word)descr < 0) {
  621. current = *(word *)current_p;
  622. FIXUP_POINTER(current);
  623. if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
  624. PREFETCH((ptr_t)current);
  625. # ifdef ENABLE_TRACE
  626. if (GC_trace_addr == current_p) {
  627. GC_log_printf("GC:%d Considering(3) %p -> %p\n",
  628. GC_gc_no, current_p, (ptr_t) current);
  629. }
  630. # endif /* ENABLE_TRACE */
  631. PUSH_CONTENTS((ptr_t)current, mark_stack_top,
  632. mark_stack_limit, current_p, exit1);
  633. }
  634. }
  635. descr <<= 1;
  636. current_p += sizeof(word);
  637. }
  638. continue;
  639. case GC_DS_PROC:
  640. mark_stack_top--;
  641. # ifdef ENABLE_TRACE
  642. if (GC_trace_addr >= current_p
  643. && GC_base(current_p) != 0
  644. && GC_base(current_p) == GC_base(GC_trace_addr)) {
  645. GC_log_printf("GC:%d Tracing from %p proc descr %lu\n",
  646. GC_gc_no, current_p, (unsigned long) descr);
  647. }
  648. # endif /* ENABLE_TRACE */
  649. credit -= GC_PROC_BYTES;
  650. mark_stack_top =
  651. (*PROC(descr))
  652. ((word *)current_p, mark_stack_top,
  653. mark_stack_limit, ENV(descr));
  654. continue;
  655. case GC_DS_PER_OBJECT:
  656. if ((signed_word)descr >= 0) {
  657. /* Descriptor is in the object. */
  658. descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
  659. } else {
  660. /* Descriptor is in type descriptor pointed to by first */
  661. /* word in object. */
  662. ptr_t type_descr = *(ptr_t *)current_p;
  663. /* type_descr is either a valid pointer to the descriptor */
  664. /* structure, or this object was on a free list. If it */
  665. /* it was anything but the last object on the free list, */
  666. /* we will misinterpret the next object on the free list as */
  667. /* the type descriptor, and get a 0 GC descriptor, which */
  668. /* is ideal. Unfortunately, we need to check for the last */
  669. /* object case explicitly. */
  670. if (0 == type_descr) {
  671. /* Rarely executed. */
  672. mark_stack_top--;
  673. continue;
  674. }
  675. descr = *(word *)(type_descr
  676. - (descr + (GC_INDIR_PER_OBJ_BIAS
  677. - GC_DS_PER_OBJECT)));
  678. }
  679. if (0 == descr) {
  680. /* Can happen either because we generated a 0 descriptor */
  681. /* or we saw a pointer to a free object. */
  682. mark_stack_top--;
  683. continue;
  684. }
  685. goto retry;
  686. }
  687. } else /* Small object with length descriptor */ {
  688. mark_stack_top--;
  689. limit = current_p + (word)descr;
  690. }
  691. # ifdef ENABLE_TRACE
  692. if (GC_trace_addr >= current_p
  693. && GC_trace_addr < limit) {
  694. GC_log_printf("GC:%d Tracing from %p len %lu\n",
  695. GC_gc_no, current_p, (unsigned long) descr);
  696. }
  697. # endif /* ENABLE_TRACE */
  698. /* The simple case in which we're scanning a range. */
  699. GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
  700. credit -= limit - current_p;
  701. limit -= sizeof(word);
  702. {
  703. # define PREF_DIST 4
  704. # ifndef SMALL_CONFIG
  705. word deferred;
  706. /* Try to prefetch the next pointer to be examined asap. */
  707. /* Empirically, this also seems to help slightly without */
  708. /* prefetches, at least on linux/X86. Presumably this loop */
  709. /* ends up with less register pressure, and gcc thus ends up */
  710. /* generating slightly better code. Overall gcc code quality */
  711. /* for this loop is still not great. */
  712. for(;;) {
  713. PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
  714. GC_ASSERT(limit >= current_p);
  715. deferred = *(word *)limit;
  716. FIXUP_POINTER(deferred);
  717. limit -= ALIGNMENT;
  718. if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
  719. PREFETCH((ptr_t)deferred);
  720. break;
  721. }
  722. if (current_p > limit) goto next_object;
  723. /* Unroll once, so we don't do too many of the prefetches */
  724. /* based on limit. */
  725. deferred = *(word *)limit;
  726. FIXUP_POINTER(deferred);
  727. limit -= ALIGNMENT;
  728. if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
  729. PREFETCH((ptr_t)deferred);
  730. break;
  731. }
  732. if (current_p > limit) goto next_object;
  733. }
  734. # endif
  735. while (current_p <= limit) {
  736. /* Empirically, unrolling this loop doesn't help a lot. */
  737. /* Since PUSH_CONTENTS expands to a lot of code, */
  738. /* we don't. */
  739. current = *(word *)current_p;
  740. FIXUP_POINTER(current);
  741. PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
  742. if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
  743. /* Prefetch the contents of the object we just pushed. It's */
  744. /* likely we will need them soon. */
  745. PREFETCH((ptr_t)current);
  746. # ifdef ENABLE_TRACE
  747. if (GC_trace_addr == current_p) {
  748. GC_log_printf("GC:%d Considering(1) %p -> %p\n",
  749. GC_gc_no, current_p, (ptr_t) current);
  750. }
  751. # endif /* ENABLE_TRACE */
  752. PUSH_CONTENTS((ptr_t)current, mark_stack_top,
  753. mark_stack_limit, current_p, exit2);
  754. }
  755. current_p += ALIGNMENT;
  756. }
  757. # ifndef SMALL_CONFIG
  758. /* We still need to mark the entry we previously prefetched. */
  759. /* We already know that it passes the preliminary pointer */
  760. /* validity test. */
  761. # ifdef ENABLE_TRACE
  762. if (GC_trace_addr == current_p) {
  763. GC_log_printf("GC:%d Considering(2) %p -> %p\n",
  764. GC_gc_no, current_p, (ptr_t) deferred);
  765. }
  766. # endif /* ENABLE_TRACE */
  767. PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
  768. mark_stack_limit, current_p, exit4);
  769. next_object:;
  770. # endif
  771. }
  772. }
  773. return mark_stack_top;
  774. }
  775. #ifdef PARALLEL_MARK
  776. /* We assume we have an ANSI C Compiler. */
  777. GC_bool GC_help_wanted = FALSE;
  778. unsigned GC_helper_count = 0;
  779. unsigned GC_active_count = 0;
  780. word GC_mark_no = 0;
  781. #define LOCAL_MARK_STACK_SIZE HBLKSIZE
  782. /* Under normal circumstances, this is big enough to guarantee */
  783. /* We don't overflow half of it in a single call to */
  784. /* GC_mark_from. */
  785. /* Steal mark stack entries starting at mse low into mark stack local */
  786. /* until we either steal mse high, or we have max entries. */
  787. /* Return a pointer to the top of the local mark stack. */
  788. /* *next is replaced by a pointer to the next unscanned mark stack */
  789. /* entry. */
  790. mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
  791. unsigned max, mse **next)
  792. {
  793. mse *p;
  794. mse *top = local - 1;
  795. unsigned i = 0;
  796. GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
  797. for (p = low; p <= high && i <= max; ++p) {
  798. word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
  799. if (descr != 0) {
  800. /* Must be ordered after read of descr: */
  801. AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
  802. /* More than one thread may get this entry, but that's only */
  803. /* a minor performance problem. */
  804. ++top;
  805. top -> mse_descr = descr;
  806. top -> mse_start = p -> mse_start;
  807. GC_ASSERT((top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
  808. top -> mse_descr < (ptr_t)GC_greatest_plausible_heap_addr
  809. - (ptr_t)GC_least_plausible_heap_addr);
  810. /* If this is a big object, count it as */
  811. /* size/256 + 1 objects. */
  812. ++i;
  813. if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8);
  814. }
  815. }
  816. *next = p;
  817. return top;
  818. }
  819. /* Copy back a local mark stack. */
  820. /* low and high are inclusive bounds. */
  821. void GC_return_mark_stack(mse * low, mse * high)
  822. {
  823. mse * my_top;
  824. mse * my_start;
  825. size_t stack_size;
  826. if (high < low) return;
  827. stack_size = high - low + 1;
  828. GC_acquire_mark_lock();
  829. my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
  830. my_start = my_top + 1;
  831. if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
  832. if (GC_print_stats) {
  833. GC_log_printf("No room to copy back mark stack.");
  834. }
  835. GC_mark_state = MS_INVALID;
  836. GC_mark_stack_too_small = TRUE;
  837. /* We drop the local mark stack. We'll fix things later. */
  838. } else {
  839. BCOPY(low, my_start, stack_size * sizeof(mse));
  840. GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
  841. == my_top);
  842. AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
  843. (AO_t)(my_top + stack_size));
  844. /* Ensures visibility of previously written stack contents. */
  845. }
  846. GC_release_mark_lock();
  847. GC_notify_all_marker();
  848. }
  849. /* Mark from the local mark stack. */
  850. /* On return, the local mark stack is empty. */
  851. /* But this may be achieved by copying the */
  852. /* local mark stack back into the global one. */
  853. void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
  854. {
  855. unsigned n;
  856. # define N_LOCAL_ITERS 1
  857. # ifdef GC_ASSERTIONS
  858. /* Make sure we don't hold mark lock. */
  859. GC_acquire_mark_lock();
  860. GC_release_mark_lock();
  861. # endif
  862. for (;;) {
  863. for (n = 0; n < N_LOCAL_ITERS; ++n) {
  864. local_top = GC_mark_from(local_top, local_mark_stack,
  865. local_mark_stack + LOCAL_MARK_STACK_SIZE);
  866. if (local_top < local_mark_stack) return;
  867. if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
  868. GC_return_mark_stack(local_mark_stack, local_top);
  869. return;
  870. }
  871. }
  872. if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
  873. < (mse *)AO_load(&GC_first_nonempty)
  874. && GC_active_count < GC_helper_count
  875. && local_top > local_mark_stack + 1) {
  876. /* Try to share the load, since the main stack is empty, */
  877. /* and helper threads are waiting for a refill. */
  878. /* The entries near the bottom of the stack are likely */
  879. /* to require more work. Thus we return those, eventhough */
  880. /* it's harder. */
  881. mse * new_bottom = local_mark_stack
  882. + (local_top - local_mark_stack)/2;
  883. GC_ASSERT(new_bottom > local_mark_stack
  884. && new_bottom < local_top);
  885. GC_return_mark_stack(local_mark_stack, new_bottom - 1);
  886. memmove(local_mark_stack, new_bottom,
  887. (local_top - new_bottom + 1) * sizeof(mse));
  888. local_top -= (new_bottom - local_mark_stack);
  889. }
  890. }
  891. }
  892. #define ENTRIES_TO_GET 5
  893. long GC_markers = 2; /* Normally changed by thread-library- */
  894. /* -specific code. */
  895. /* Mark using the local mark stack until the global mark stack is empty */
  896. /* and there are no active workers. Update GC_first_nonempty to reflect */
  897. /* progress. */
  898. /* Caller does not hold mark lock. */
  899. /* Caller has already incremented GC_helper_count. We decrement it, */
  900. /* and maintain GC_active_count. */
  901. void GC_mark_local(mse *local_mark_stack, int id)
  902. {
  903. mse * my_first_nonempty;
  904. GC_acquire_mark_lock();
  905. GC_active_count++;
  906. my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
  907. GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
  908. (mse *)AO_load(&GC_first_nonempty) <=
  909. (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
  910. if (GC_print_stats == VERBOSE)
  911. GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
  912. GC_release_mark_lock();
  913. for (;;) {
  914. size_t n_on_stack;
  915. size_t n_to_get;
  916. mse * my_top;
  917. mse * local_top;
  918. mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
  919. GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
  920. my_first_nonempty <=
  921. (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
  922. GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
  923. global_first_nonempty <=
  924. (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
  925. if (my_first_nonempty < global_first_nonempty) {
  926. my_first_nonempty = global_first_nonempty;
  927. } else if (global_first_nonempty < my_first_nonempty) {
  928. AO_compare_and_swap(&GC_first_nonempty,
  929. (AO_t) global_first_nonempty,
  930. (AO_t) my_first_nonempty);
  931. /* If this fails, we just go ahead, without updating */
  932. /* GC_first_nonempty. */
  933. }
  934. /* Perhaps we should also update GC_first_nonempty, if it */
  935. /* is less. But that would require using atomic updates. */
  936. my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
  937. n_on_stack = my_top - my_first_nonempty + 1;
  938. if (0 == n_on_stack) {
  939. GC_acquire_mark_lock();
  940. my_top = GC_mark_stack_top;
  941. /* Asynchronous modification impossible here, */
  942. /* since we hold mark lock. */
  943. n_on_stack = my_top - my_first_nonempty + 1;
  944. if (0 == n_on_stack) {
  945. GC_active_count--;
  946. GC_ASSERT(GC_active_count <= GC_helper_count);
  947. /* Other markers may redeposit objects */
  948. /* on the stack. */
  949. if (0 == GC_active_count) GC_notify_all_marker();
  950. while (GC_active_count > 0
  951. && (mse *)AO_load(&GC_first_nonempty)
  952. > GC_mark_stack_top) {
  953. /* We will be notified if either GC_active_count */
  954. /* reaches zero, or if more objects are pushed on */
  955. /* the global mark stack. */
  956. GC_wait_marker();
  957. }
  958. if (GC_active_count == 0 &&
  959. (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
  960. GC_bool need_to_notify = FALSE;
  961. /* The above conditions can't be falsified while we */
  962. /* hold the mark lock, since neither */
  963. /* GC_active_count nor GC_mark_stack_top can */
  964. /* change. GC_first_nonempty can only be */
  965. /* incremented asynchronously. Thus we know that */
  966. /* both conditions actually held simultaneously. */
  967. GC_helper_count--;
  968. if (0 == GC_helper_count) need_to_notify = TRUE;
  969. if (GC_print_stats == VERBOSE)
  970. GC_log_printf(
  971. "Finished mark helper %lu\n", (unsigned long)id);
  972. GC_release_mark_lock();
  973. if (need_to_notify) GC_notify_all_marker();
  974. return;
  975. }
  976. /* else there's something on the stack again, or */
  977. /* another helper may push something. */
  978. GC_active_count++;
  979. GC_ASSERT(GC_active_count > 0);
  980. GC_release_mark_lock();
  981. continue;
  982. } else {
  983. GC_release_mark_lock();
  984. }
  985. }
  986. n_to_get = ENTRIES_TO_GET;
  987. if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
  988. local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
  989. local_mark_stack, n_to_get,
  990. &my_first_nonempty);
  991. GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
  992. my_first_nonempty <=
  993. (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
  994. GC_do_local_mark(local_mark_stack, local_top);
  995. }
  996. }
  997. /* Perform Parallel mark. */
  998. /* We hold the GC lock, not the mark lock. */
  999. /* Currently runs until the mark stack is */
  1000. /* empty. */
  1001. void GC_do_parallel_mark()
  1002. {
  1003. mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
  1004. GC_acquire_mark_lock();
  1005. GC_ASSERT(I_HOLD_LOCK());
  1006. /* This could be a GC_ASSERT, but it seems safer to keep it on */
  1007. /* all the time, especially since it's cheap. */
  1008. if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
  1009. ABORT("Tried to start parallel mark in bad state");
  1010. if (GC_print_stats == VERBOSE)
  1011. GC_log_printf("Starting marking for mark phase number %lu\n",
  1012. (unsigned long)GC_mark_no);
  1013. GC_first_nonempty = (AO_t)GC_mark_stack;
  1014. GC_active_count = 0;
  1015. GC_helper_count = 1;
  1016. GC_help_wanted = TRUE;
  1017. GC_release_mark_lock();
  1018. GC_notify_all_marker();
  1019. /* Wake up potential helpers. */
  1020. GC_mark_local(local_mark_stack, 0);
  1021. GC_acquire_mark_lock();
  1022. GC_help_wanted = FALSE;
  1023. /* Done; clean up. */
  1024. while (GC_helper_count > 0) GC_wait_marker();
  1025. /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
  1026. if (GC_print_stats == VERBOSE)
  1027. GC_log_printf(
  1028. "Finished marking for mark phase number %lu\n",
  1029. (unsigned long)GC_mark_no);
  1030. GC_mark_no++;
  1031. GC_release_mark_lock();
  1032. GC_notify_all_marker();
  1033. }
  1034. /* Try to help out the marker, if it's running. */
  1035. /* We do not hold the GC lock, but the requestor does. */
  1036. void GC_help_marker(word my_mark_no)
  1037. {
  1038. mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
  1039. unsigned my_id;
  1040. if (!GC_parallel) return;
  1041. GC_acquire_mark_lock();
  1042. while (GC_mark_no < my_mark_no
  1043. || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
  1044. GC_wait_marker();
  1045. }
  1046. my_id = GC_helper_count;
  1047. if (GC_mark_no != my_mark_no || my_id >= GC_markers) {
  1048. /* Second test is useful only if original threads can also */
  1049. /* act as helpers. Under Linux they can't. */
  1050. GC_release_mark_lock();
  1051. return;
  1052. }
  1053. GC_helper_count = my_id + 1;
  1054. GC_release_mark_lock();
  1055. GC_mark_local(local_mark_stack, my_id);
  1056. /* GC_mark_local decrements GC_helper_count. */
  1057. }
  1058. #endif /* PARALLEL_MARK */
  1059. /* Allocate or reallocate space for mark stack of size n entries. */
  1060. /* May silently fail. */
  1061. static void alloc_mark_stack(size_t n)
  1062. {
  1063. mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
  1064. # ifdef GWW_VDB
  1065. /* Don't recycle a stack segment obtained with the wrong flags. */
  1066. /* Win32 GetWriteWatch requires the right kind of memory. */
  1067. static GC_bool GC_incremental_at_stack_alloc = 0;
  1068. GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
  1069. GC_incremental_at_stack_alloc = GC_incremental;
  1070. # else
  1071. # define recycle_old 1
  1072. # endif
  1073. GC_mark_stack_too_small = FALSE;
  1074. if (GC_mark_stack_size != 0) {
  1075. if (new_stack != 0) {
  1076. if (recycle_old) {
  1077. /* Recycle old space */
  1078. size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
  1079. size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
  1080. size_t displ = 0;
  1081. if (0 != page_offset) displ = GC_page_size - page_offset;
  1082. size = (size - displ) & ~(GC_page_size - 1);
  1083. if (size > 0) {
  1084. GC_add_to_heap((struct hblk *)
  1085. ((word)GC_mark_stack + displ), (word)size);
  1086. }
  1087. }
  1088. GC_mark_stack = new_stack;
  1089. GC_mark_stack_size = n;
  1090. GC_mark_stack_limit = new_stack + n;
  1091. if (GC_print_stats) {
  1092. GC_log_printf("Grew mark stack to %lu frames\n",
  1093. (unsigned long) GC_mark_stack_size);
  1094. }
  1095. } else {
  1096. if (GC_print_stats) {
  1097. GC_log_printf("Failed to grow mark stack to %lu frames\n",
  1098. (unsigned long) n);
  1099. }
  1100. }
  1101. } else {
  1102. if (new_stack == 0) {
  1103. GC_err_printf("No space for mark stack\n");
  1104. EXIT();
  1105. }
  1106. GC_mark_stack = new_stack;
  1107. GC_mark_stack_size = n;
  1108. GC_mark_stack_limit = new_stack + n;
  1109. }
  1110. GC_mark_stack_top = GC_mark_stack-1;
  1111. }
  1112. void GC_mark_init()
  1113. {
  1114. alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
  1115. }
  1116. /*
  1117. * Push all locations between b and t onto the mark stack.
  1118. * b is the first location to be checked. t is one past the last
  1119. * location to be checked.
  1120. * Should only be used if there is no possibility of mark stack
  1121. * overflow.
  1122. */
  1123. void GC_push_all(ptr_t bottom, ptr_t top)
  1124. {
  1125. register word length;
  1126. bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
  1127. top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
  1128. if (top == 0 || bottom == top) return;
  1129. GC_mark_stack_top++;
  1130. if (GC_mark_stack_top >= GC_mark_stack_limit) {
  1131. ABORT("unexpected mark stack overflow");
  1132. }
  1133. length = top - bottom;
  1134. # if GC_DS_TAGS > ALIGNMENT - 1
  1135. length += GC_DS_TAGS;
  1136. length &= ~GC_DS_TAGS;
  1137. # endif
  1138. GC_mark_stack_top -> mse_start = bottom;
  1139. GC_mark_stack_top -> mse_descr = length;
  1140. }
  1141. /*
  1142. * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
  1143. * We use push_fn to actually push the block.
  1144. * Used both to selectively push dirty pages, or to push a block
  1145. * in piecemeal fashion, to allow for more marking concurrency.
  1146. * Will not overflow mark stack if push_fn pushes a small fixed number
  1147. * of entries. (This is invoked only if push_fn pushes a single entry,
  1148. * or if it marks each object before pushing it, thus ensuring progress
  1149. * in the event of a stack overflow.)
  1150. */
  1151. void GC_push_selected(ptr_t bottom, ptr_t top,
  1152. int (*dirty_fn) (struct hblk *),
  1153. void (*push_fn) (ptr_t, ptr_t))
  1154. {
  1155. struct hblk * h;
  1156. bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
  1157. top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
  1158. if (top == 0 || bottom == top) return;
  1159. h = HBLKPTR(bottom + HBLKSIZE);
  1160. if (top <= (ptr_t) h) {
  1161. if ((*dirty_fn)(h-1)) {
  1162. (*push_fn)(bottom, top);
  1163. }
  1164. return;
  1165. }
  1166. if ((*dirty_fn)(h-1)) {
  1167. (*push_fn)(bottom, (ptr_t)h);
  1168. }
  1169. while ((ptr_t)(h+1) <= top) {
  1170. if ((*dirty_fn)(h)) {
  1171. if ((word)(GC_mark_stack_top - GC_mark_stack)
  1172. > 3 * GC_mark_stack_size / 4) {
  1173. /* Danger of mark stack overflow */
  1174. (*push_fn)((ptr_t)h, top);
  1175. return;
  1176. } else {
  1177. (*push_fn)((ptr_t)h, (ptr_t)(h+1));
  1178. }
  1179. }
  1180. h++;
  1181. }
  1182. if ((ptr_t)h != top) {
  1183. if ((*dirty_fn)(h)) {
  1184. (*push_fn)((ptr_t)h, top);
  1185. }
  1186. }
  1187. if (GC_mark_stack_top >= GC_mark_stack_limit) {
  1188. ABORT("unexpected mark stack overflow");
  1189. }
  1190. }
  1191. # ifndef SMALL_CONFIG
  1192. #ifdef PARALLEL_MARK
  1193. /* Break up root sections into page size chunks to better spread */
  1194. /* out work. */
  1195. GC_bool GC_true_func(struct hblk *h) { return TRUE; }
  1196. # define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
  1197. #else
  1198. # define GC_PUSH_ALL(b,t) GC_push_all(b,t);
  1199. #endif
  1200. void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
  1201. {
  1202. if (all) {
  1203. if (GC_dirty_maintained) {
  1204. # ifdef PROC_VDB
  1205. /* Pages that were never dirtied cannot contain pointers */
  1206. GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
  1207. # else
  1208. GC_push_all(bottom, top);
  1209. # endif
  1210. } else {
  1211. GC_push_all(bottom, top);
  1212. }
  1213. } else {
  1214. GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
  1215. }
  1216. }
  1217. #endif
  1218. # if defined(MSWIN32) || defined(MSWINCE)
  1219. void __cdecl GC_push_one(word p)
  1220. # else
  1221. void GC_push_one(word p)
  1222. # endif
  1223. {
  1224. GC_PUSH_ONE_STACK((ptr_t)p, MARKED_FROM_REGISTER);
  1225. }
  1226. struct GC_ms_entry *GC_mark_and_push(void *obj,
  1227. mse *mark_stack_ptr,
  1228. mse *mark_stack_limit,
  1229. void **src)
  1230. {
  1231. hdr * hhdr;
  1232. PREFETCH(obj);
  1233. GET_HDR(obj, hhdr);
  1234. if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
  1235. if (GC_all_interior_pointers) {
  1236. hhdr = GC_find_header(GC_base(obj));
  1237. if (hhdr == 0) {
  1238. GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
  1239. return mark_stack_ptr;
  1240. }
  1241. } else {
  1242. GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
  1243. return mark_stack_ptr;
  1244. }
  1245. }
  1246. if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
  1247. GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
  1248. return mark_stack_ptr;
  1249. }
  1250. PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
  1251. src, was_marked, hhdr, TRUE);
  1252. was_marked:
  1253. return mark_stack_ptr;
  1254. }
  1255. /* Mark and push (i.e. gray) a single object p onto the main */
  1256. /* mark stack. Consider p to be valid if it is an interior */
  1257. /* pointer. */
  1258. /* The object p has passed a preliminary pointer validity */
  1259. /* test, but we do not definitely know whether it is valid. */
  1260. /* Mark bits are NOT atomically updated. Thus this must be the */
  1261. /* only thread setting them. */
  1262. # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
  1263. void GC_mark_and_push_stack(ptr_t p, ptr_t source)
  1264. # else
  1265. void GC_mark_and_push_stack(ptr_t p)
  1266. # define source 0
  1267. # endif
  1268. {
  1269. hdr * hhdr;
  1270. ptr_t r = p;
  1271. PREFETCH(p);
  1272. GET_HDR(p, hhdr);
  1273. if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
  1274. if (hhdr != 0) {
  1275. r = GC_base(p);
  1276. hhdr = HDR(r);
  1277. }
  1278. if (hhdr == 0) {
  1279. GC_ADD_TO_BLACK_LIST_STACK(p, source);
  1280. return;
  1281. }
  1282. }
  1283. if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
  1284. GC_ADD_TO_BLACK_LIST_NORMAL(p, src);
  1285. return;
  1286. }
  1287. # if defined(MANUAL_VDB) && defined(THREADS)
  1288. /* Pointer is on the stack. We may have dirtied the object */
  1289. /* it points to, but not yet have called GC_dirty(); */
  1290. GC_dirty(p); /* Implicitly affects entire object. */
  1291. # endif
  1292. PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
  1293. source, mark_and_push_exit, hhdr, FALSE);
  1294. mark_and_push_exit: ;
  1295. /* We silently ignore pointers to near the end of a block, */
  1296. /* which is very mildly suboptimal. */
  1297. /* FIXME: We should probably add a header word to address */
  1298. /* this. */
  1299. }
  1300. # ifdef TRACE_BUF
  1301. # define TRACE_ENTRIES 1000
  1302. struct trace_entry {
  1303. char * kind;
  1304. word gc_no;
  1305. word bytes_allocd;
  1306. word arg1;
  1307. word arg2;
  1308. } GC_trace_buf[TRACE_ENTRIES];
  1309. int GC_trace_buf_ptr = 0;
  1310. void GC_add_trace_entry(char *kind, word arg1, word arg2)
  1311. {
  1312. GC_trace_buf[GC_trace_buf_ptr].kind = kind;
  1313. GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
  1314. GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
  1315. GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
  1316. GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
  1317. GC_trace_buf_ptr++;
  1318. if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
  1319. }
  1320. void GC_print_trace(word gc_no, GC_bool lock)
  1321. {
  1322. int i;
  1323. struct trace_entry *p;
  1324. if (lock) LOCK();
  1325. for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
  1326. if (i < 0) i = TRACE_ENTRIES-1;
  1327. p = GC_trace_buf + i;
  1328. if (p -> gc_no < gc_no || p -> kind == 0) return;
  1329. printf("Trace:%s (gc:%d,bytes:%d) 0x%X, 0x%X\n",
  1330. p -> kind, p -> gc_no, p -> bytes_allocd,
  1331. (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
  1332. }
  1333. printf("Trace incomplete\n");
  1334. if (lock) UNLOCK();
  1335. }
  1336. # endif /* TRACE_BUF */
  1337. /*
  1338. * A version of GC_push_all that treats all interior pointers as valid
  1339. * and scans the entire region immediately, in case the contents
  1340. * change.
  1341. */
  1342. void GC_push_all_eager(ptr_t bottom, ptr_t top)
  1343. {
  1344. word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
  1345. word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
  1346. register word *p;
  1347. register ptr_t q;
  1348. register word *lim;
  1349. register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
  1350. register ptr_t least_ha = GC_least_plausible_heap_addr;
  1351. # define GC_greatest_plausible_heap_addr greatest_ha
  1352. # define GC_least_plausible_heap_addr least_ha
  1353. if (top == 0) return;
  1354. /* check all pointers in range and push if they appear */
  1355. /* to be valid. */
  1356. lim = t - 1 /* longword */;
  1357. for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
  1358. q = (ptr_t)(*p);
  1359. GC_PUSH_ONE_STACK((ptr_t)q, p);
  1360. }
  1361. # undef GC_greatest_plausible_heap_addr
  1362. # undef GC_least_plausible_heap_addr
  1363. }
  1364. #ifndef THREADS
  1365. /*
  1366. * A version of GC_push_all that treats all interior pointers as valid
  1367. * and scans part of the area immediately, to make sure that saved
  1368. * register values are not lost.
  1369. * Cold_gc_frame delimits the stack section that must be scanned
  1370. * eagerly. A zero value indicates that no eager scanning is needed.
  1371. * We don't need to worry about the MANUAL_VDB case here, since this
  1372. * is only called in the single-threaded case. We assume that we
  1373. * cannot collect between an assignment and the corresponding
  1374. * GC_dirty() call.
  1375. */
  1376. void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
  1377. ptr_t cold_gc_frame)
  1378. {
  1379. if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
  1380. /* Push the hot end of the stack eagerly, so that register values */
  1381. /* saved inside GC frames are marked before they disappear. */
  1382. /* The rest of the marking can be deferred until later. */
  1383. if (0 == cold_gc_frame) {
  1384. GC_push_all_stack(bottom, top);
  1385. return;
  1386. }
  1387. GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
  1388. # ifdef STACK_GROWS_DOWN
  1389. GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
  1390. GC_push_all_eager(bottom, cold_gc_frame);
  1391. # else /* STACK_GROWS_UP */
  1392. GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
  1393. GC_push_all_eager(cold_gc_frame, top);
  1394. # endif /* STACK_GROWS_UP */
  1395. } else {
  1396. GC_push_all_eager(bottom, top);
  1397. }
  1398. # ifdef TRACE_BUF
  1399. GC_add_trace_entry("GC_push_all_stack", bottom, top);
  1400. # endif
  1401. }
  1402. #endif /* !THREADS */
  1403. void GC_push_all_stack(ptr_t bottom, ptr_t top)
  1404. {
  1405. # if defined(THREADS) && defined(MPROTECT_VDB)
  1406. GC_push_all_eager(bottom, top);
  1407. # else
  1408. if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
  1409. GC_push_all(bottom, top);
  1410. } else {
  1411. GC_push_all_eager(bottom, top);
  1412. }
  1413. # endif
  1414. }
  1415. #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
  1416. defined(MARK_BIT_PER_GRANULE)
  1417. # if GC_GRANULE_WORDS == 1
  1418. # define USE_PUSH_MARKED_ACCELERATORS
  1419. # define PUSH_GRANULE(q) \
  1420. { ptr_t qcontents = (ptr_t)((q)[0]); \
  1421. GC_PUSH_ONE_HEAP(qcontents, (q)); }
  1422. # elif GC_GRANULE_WORDS == 2
  1423. # define USE_PUSH_MARKED_ACCELERATORS
  1424. # define PUSH_GRANULE(q) \
  1425. { ptr_t qcontents = (ptr_t)((q)[0]); \
  1426. GC_PUSH_ONE_HEAP(qcontents, (q)); \
  1427. qcontents = (ptr_t)((q)[1]); \
  1428. GC_PUSH_ONE_HEAP(qcontents, (q)+1); }
  1429. # elif GC_GRANULE_WORDS == 4
  1430. # define USE_PUSH_MARKED_ACCELERATORS
  1431. # define PUSH_GRANULE(q) \
  1432. { ptr_t qcontents = (ptr_t)((q)[0]); \
  1433. GC_PUSH_ONE_HEAP(qcontents, (q)); \
  1434. qcontents = (ptr_t)((q)[1]); \
  1435. GC_PUSH_ONE_HEAP(qcontents, (q)+1); \
  1436. qcontents = (ptr_t)((q)[2]); \
  1437. GC_PUSH_ONE_HEAP(qcontents, (q)+2); \
  1438. qcontents = (ptr_t)((q)[3]); \
  1439. GC_PUSH_ONE_HEAP(qcontents, (q)+3); }
  1440. # endif
  1441. #endif
  1442. #ifdef USE_PUSH_MARKED_ACCELERATORS
  1443. /* Push all objects reachable from marked objects in the given block */
  1444. /* containing objects of size 1 granule. */
  1445. void GC_push_marked1(struct hblk *h, hdr *hhdr)
  1446. {
  1447. word * mark_word_addr = &(hhdr->hb_marks[0]);
  1448. word *p;
  1449. word *plim;
  1450. word *q;
  1451. word mark_word;
  1452. /* Allow registers to be used for some frequently acccessed */
  1453. /* global variables. Otherwise aliasing issues are likely */
  1454. /* to prevent that. */
  1455. ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
  1456. ptr_t least_ha = GC_least_plausible_heap_addr;
  1457. mse * mark_stack_top = GC_mark_stack_top;
  1458. mse * mark_stack_limit = GC_mark_stack_limit;
  1459. # define GC_mark_stack_top mark_stack_top
  1460. # define GC_mark_stack_limit mark_stack_limit
  1461. # define GC_greatest_plausible_heap_addr greatest_ha
  1462. # define GC_least_plausible_heap_addr least_ha
  1463. p = (word *)(h->hb_body);
  1464. plim = (word *)(((word)h) + HBLKSIZE);
  1465. /* go through all words in block */
  1466. while( p < plim ) {
  1467. mark_word = *mark_word_addr++;
  1468. q = p;
  1469. while(mark_word != 0) {
  1470. if (mark_word & 1) {
  1471. PUSH_GRANULE(q);
  1472. }
  1473. q += GC_GRANULE_WORDS;
  1474. mark_word >>= 1;
  1475. }
  1476. p += WORDSZ*GC_GRANULE_WORDS;
  1477. }
  1478. # undef GC_greatest_plausible_heap_addr
  1479. # undef GC_least_plausible_heap_addr
  1480. # undef GC_mark_stack_top
  1481. # undef GC_mark_stack_limit
  1482. GC_mark_stack_top = mark_stack_top;
  1483. }
  1484. #ifndef UNALIGNED
  1485. /* Push all objects reachable from marked objects in the given block */
  1486. /* of size 2 (granules) objects. */
  1487. void GC_push_marked2(struct hblk *h, hdr *hhdr)
  1488. {
  1489. word * mark_word_addr = &(hhdr->hb_marks[0]);
  1490. word *p;
  1491. word *plim;
  1492. word *q;
  1493. word mark_word;
  1494. ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
  1495. ptr_t least_ha = GC_least_plausible_heap_addr;
  1496. mse * mark_stack_top = GC_mark_stack_top;
  1497. mse * mark_stack_limit = GC_mark_stack_limit;
  1498. # define GC_mark_stack_top mark_stack_top
  1499. # define GC_mark_stack_limit mark_stack_limit
  1500. # define GC_greatest_plausible_heap_addr greatest_ha
  1501. # define GC_least_plausible_heap_addr least_ha
  1502. p = (word *)(h->hb_body);
  1503. plim = (word *)(((word)h) + HBLKSIZE);
  1504. /* go through all words in block */
  1505. while( p < plim ) {
  1506. mark_word = *mark_word_addr++;
  1507. q = p;
  1508. while(mark_word != 0) {
  1509. if (mark_word & 1) {
  1510. PUSH_GRANULE(q);
  1511. PUSH_GRANULE(q + GC_GRANULE_WORDS);
  1512. }
  1513. q += 2 * GC_GRANULE_WORDS;
  1514. mark_word >>= 2;
  1515. }
  1516. p += WORDSZ*GC_GRANULE_WORDS;
  1517. }
  1518. # undef GC_greatest_plausible_heap_addr
  1519. # undef GC_least_plausible_heap_addr
  1520. # undef GC_mark_stack_top
  1521. # undef GC_mark_stack_limit
  1522. GC_mark_stack_top = mark_stack_top;
  1523. }
  1524. # if GC_GRANULE_WORDS < 4
  1525. /* Push all objects reachable from marked objects in the given block */
  1526. /* of size 4 (granules) objects. */
  1527. /* There is a risk of mark stack overflow here. But we handle that. */
  1528. /* And only unmarked objects get pushed, so it's not very likely. */
  1529. void GC_push_marked4(struct hblk *h, hdr *hhdr)
  1530. {
  1531. word * mark_word_addr = &(hhdr->hb_marks[0]);
  1532. word *p;
  1533. word *plim;
  1534. word *q;
  1535. word mark_word;
  1536. ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
  1537. ptr_t least_ha = GC_least_plausible_heap_addr;
  1538. mse * mark_stack_top = GC_mark_stack_top;
  1539. mse * mark_stack_limit = GC_mark_stack_limit;
  1540. # define GC_mark_stack_top mark_stack_top
  1541. # define GC_mark_stack_limit mark_stack_limit
  1542. # define GC_greatest_plausible_heap_addr greatest_ha
  1543. # define GC_least_plausible_heap_addr least_ha
  1544. p = (word *)(h->hb_body);
  1545. plim = (word *)(((word)h) + HBLKSIZE);
  1546. /* go through all words in block */
  1547. while( p < plim ) {
  1548. mark_word = *mark_word_addr++;
  1549. q = p;
  1550. while(mark_word != 0) {
  1551. if (mark_word & 1) {
  1552. PUSH_GRANULE(q);
  1553. PUSH_GRANULE(q + GC_GRANULE_WORDS);
  1554. PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
  1555. PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
  1556. }
  1557. q += 4 * GC_GRANULE_WORDS;
  1558. mark_word >>= 4;
  1559. }
  1560. p += WORDSZ*GC_GRANULE_WORDS;
  1561. }
  1562. # undef GC_greatest_plausible_heap_addr
  1563. # undef GC_least_plausible_heap_addr
  1564. # undef GC_mark_stack_top
  1565. # undef GC_mark_stack_limit
  1566. GC_mark_stack_top = mark_stack_top;
  1567. }
  1568. #endif /* GC_GRANULE_WORDS < 4 */
  1569. #endif /* UNALIGNED */
  1570. #endif /* USE_PUSH_MARKED_ACCELERATORS */
  1571. /* Push all objects reachable from marked objects in the given block */
  1572. void GC_push_marked(struct hblk *h, hdr *hhdr)
  1573. {
  1574. size_t sz = hhdr -> hb_sz;
  1575. word descr = hhdr -> hb_descr;
  1576. ptr_t p;
  1577. word bit_no;
  1578. ptr_t lim;
  1579. mse * GC_mark_stack_top_reg;
  1580. mse * mark_stack_limit = GC_mark_stack_limit;
  1581. /* Some quick shortcuts: */
  1582. if ((0 | GC_DS_LENGTH) == descr) return;
  1583. if (GC_block_empty(hhdr)/* nothing marked */) return;
  1584. GC_n_rescuing_pages++;
  1585. GC_objects_are_marked = TRUE;
  1586. if (sz > MAXOBJBYTES) {
  1587. lim = h -> hb_body;
  1588. } else {
  1589. lim = (h + 1)->hb_body - sz;
  1590. }
  1591. switch(BYTES_TO_GRANULES(sz)) {
  1592. # if defined(USE_PUSH_MARKED_ACCELERATORS)
  1593. case 1:
  1594. GC_push_marked1(h, hhdr);
  1595. break;
  1596. # if !defined(UNALIGNED)
  1597. case 2:
  1598. GC_push_marked2(h, hhdr);
  1599. break;
  1600. # if GC_GRANULE_WORDS < 4
  1601. case 4:
  1602. GC_push_marked4(h, hhdr);
  1603. break;
  1604. # endif
  1605. # endif
  1606. # endif
  1607. default:
  1608. GC_mark_stack_top_reg = GC_mark_stack_top;
  1609. for (p = h -> hb_body, bit_no = 0; p <= lim;
  1610. p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
  1611. if (mark_bit_from_hdr(hhdr, bit_no)) {
  1612. /* Mark from fields inside the object */
  1613. PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
  1614. }
  1615. }
  1616. GC_mark_stack_top = GC_mark_stack_top_reg;
  1617. }
  1618. }
  1619. #ifndef SMALL_CONFIG
  1620. /* Test whether any page in the given block is dirty */
  1621. GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
  1622. {
  1623. size_t sz = hhdr -> hb_sz;
  1624. if (sz <= MAXOBJBYTES) {
  1625. return(GC_page_was_dirty(h));
  1626. } else {
  1627. ptr_t p = (ptr_t)h;
  1628. while (p < (ptr_t)h + sz) {
  1629. if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
  1630. p += HBLKSIZE;
  1631. }
  1632. return(FALSE);
  1633. }
  1634. }
  1635. #endif /* SMALL_CONFIG */
  1636. /* Similar to GC_push_marked, but skip over unallocated blocks */
  1637. /* and return address of next plausible block. */
  1638. struct hblk * GC_push_next_marked(struct hblk *h)
  1639. {
  1640. hdr * hhdr = HDR(h);
  1641. if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
  1642. h = GC_next_used_block(h);
  1643. if (h == 0) return(0);
  1644. hhdr = GC_find_header((ptr_t)h);
  1645. }
  1646. GC_push_marked(h, hhdr);
  1647. return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
  1648. }
  1649. #ifndef SMALL_CONFIG
  1650. /* Identical to above, but mark only from dirty pages */
  1651. struct hblk * GC_push_next_marked_dirty(struct hblk *h)
  1652. {
  1653. hdr * hhdr = HDR(h);
  1654. if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
  1655. for (;;) {
  1656. if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
  1657. h = GC_next_used_block(h);
  1658. if (h == 0) return(0);
  1659. hhdr = GC_find_header((ptr_t)h);
  1660. }
  1661. # ifdef STUBBORN_ALLOC
  1662. if (hhdr -> hb_obj_kind == STUBBORN) {
  1663. if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
  1664. break;
  1665. }
  1666. } else {
  1667. if (GC_block_was_dirty(h, hhdr)) break;
  1668. }
  1669. # else
  1670. if (GC_block_was_dirty(h, hhdr)) break;
  1671. # endif
  1672. h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  1673. hhdr = HDR(h);
  1674. }
  1675. GC_push_marked(h, hhdr);
  1676. return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
  1677. }
  1678. #endif
  1679. /* Similar to above, but for uncollectable pages. Needed since we */
  1680. /* do not clear marks for such pages, even for full collections. */
  1681. struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
  1682. {
  1683. hdr * hhdr = HDR(h);
  1684. for (;;) {
  1685. if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
  1686. h = GC_next_used_block(h);
  1687. if (h == 0) return(0);
  1688. hhdr = GC_find_header((ptr_t)h);
  1689. }
  1690. if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
  1691. h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  1692. hhdr = HDR(h);
  1693. }
  1694. GC_push_marked(h, hhdr);
  1695. return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
  1696. }