PageRenderTime 29ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/tools/build/src/engine/boehm_gc/mark_rts.c

https://bitbucket.org/bosp/external-boost
C | 617 lines | 399 code | 66 blank | 152 comment | 109 complexity | 6ec578532e7121fc292fe38edd6e9583 MD5 | raw file
Possible License(s): MIT
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. # include <stdio.h>
  15. # include "private/gc_priv.h"
  16. /* Data structure for list of root sets. */
  17. /* We keep a hash table, so that we can filter out duplicate additions. */
  18. /* Under Win32, we need to do a better job of filtering overlaps, so */
  19. /* we resort to sequential search, and pay the price. */
  20. /* This is really declared in gc_priv.h:
  21. struct roots {
  22. ptr_t r_start;
  23. ptr_t r_end;
  24. # if !defined(MSWIN32) && !defined(MSWINCE)
  25. struct roots * r_next;
  26. # endif
  27. GC_bool r_tmp;
  28. -- Delete before registering new dynamic libraries
  29. };
  30. struct roots GC_static_roots[MAX_ROOT_SETS];
  31. */
  32. int GC_no_dls = 0; /* Register dynamic library data segments. */
  33. static int n_root_sets = 0;
  34. /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
  35. # if !defined(NO_DEBUGGING)
  36. /* For debugging: */
  37. void GC_print_static_roots(void)
  38. {
  39. register int i;
  40. size_t total = 0;
  41. for (i = 0; i < n_root_sets; i++) {
  42. GC_printf("From %p to %p ",
  43. GC_static_roots[i].r_start,
  44. GC_static_roots[i].r_end);
  45. if (GC_static_roots[i].r_tmp) {
  46. GC_printf(" (temporary)\n");
  47. } else {
  48. GC_printf("\n");
  49. }
  50. total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
  51. }
  52. GC_printf("Total size: %ld\n", (unsigned long) total);
  53. if (GC_root_size != total) {
  54. GC_printf("GC_root_size incorrect: %ld!!\n",
  55. (unsigned long) GC_root_size);
  56. }
  57. }
  58. # endif /* NO_DEBUGGING */
  59. /* Primarily for debugging support: */
  60. /* Is the address p in one of the registered static */
  61. /* root sections? */
  62. GC_bool GC_is_static_root(ptr_t p)
  63. {
  64. static int last_root_set = MAX_ROOT_SETS;
  65. register int i;
  66. if (last_root_set < n_root_sets
  67. && p >= GC_static_roots[last_root_set].r_start
  68. && p < GC_static_roots[last_root_set].r_end) return(TRUE);
  69. for (i = 0; i < n_root_sets; i++) {
  70. if (p >= GC_static_roots[i].r_start
  71. && p < GC_static_roots[i].r_end) {
  72. last_root_set = i;
  73. return(TRUE);
  74. }
  75. }
  76. return(FALSE);
  77. }
  78. #if !defined(MSWIN32) && !defined(MSWINCE)
  79. /*
  80. # define LOG_RT_SIZE 6
  81. # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
  82. struct roots * GC_root_index[RT_SIZE];
  83. -- Hash table header. Used only to check whether a range is
  84. -- already present.
  85. -- really defined in gc_priv.h
  86. */
  87. static INLINE int rt_hash(ptr_t addr)
  88. {
  89. word result = (word) addr;
  90. # if CPP_WORDSZ > 8*LOG_RT_SIZE
  91. result ^= result >> 8*LOG_RT_SIZE;
  92. # endif
  93. # if CPP_WORDSZ > 4*LOG_RT_SIZE
  94. result ^= result >> 4*LOG_RT_SIZE;
  95. # endif
  96. result ^= result >> 2*LOG_RT_SIZE;
  97. result ^= result >> LOG_RT_SIZE;
  98. result &= (RT_SIZE-1);
  99. return(result);
  100. }
  101. /* Is a range starting at b already in the table? If so return a */
  102. /* pointer to it, else NIL. */
  103. struct roots * GC_roots_present(ptr_t b)
  104. {
  105. int h = rt_hash(b);
  106. struct roots *p = GC_root_index[h];
  107. while (p != 0) {
  108. if (p -> r_start == (ptr_t)b) return(p);
  109. p = p -> r_next;
  110. }
  111. return(FALSE);
  112. }
  113. /* Add the given root structure to the index. */
  114. static void add_roots_to_index(struct roots *p)
  115. {
  116. int h = rt_hash(p -> r_start);
  117. p -> r_next = GC_root_index[h];
  118. GC_root_index[h] = p;
  119. }
  120. # else /* MSWIN32 || MSWINCE */
  121. # define add_roots_to_index(p)
  122. # endif
  123. word GC_root_size = 0;
  124. void GC_add_roots(void *b, void *e)
  125. {
  126. DCL_LOCK_STATE;
  127. if (!GC_is_initialized) GC_init();
  128. LOCK();
  129. GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
  130. UNLOCK();
  131. }
  132. /* Add [b,e) to the root set. Adding the same interval a second time */
  133. /* is a moderately fast noop, and hence benign. We do not handle */
  134. /* different but overlapping intervals efficiently. (We do handle */
  135. /* them correctly.) */
  136. /* Tmp specifies that the interval may be deleted before */
  137. /* reregistering dynamic libraries. */
  138. void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
  139. {
  140. struct roots * old;
  141. # if defined(MSWIN32) || defined(MSWINCE)
  142. /* Spend the time to ensure that there are no overlapping */
  143. /* or adjacent intervals. */
  144. /* This could be done faster with e.g. a */
  145. /* balanced tree. But the execution time here is */
  146. /* virtually guaranteed to be dominated by the time it */
  147. /* takes to scan the roots. */
  148. {
  149. register int i;
  150. for (i = 0; i < n_root_sets; i++) {
  151. old = GC_static_roots + i;
  152. if (b <= old -> r_end && e >= old -> r_start) {
  153. if (b < old -> r_start) {
  154. old -> r_start = b;
  155. GC_root_size += (old -> r_start - b);
  156. }
  157. if (e > old -> r_end) {
  158. old -> r_end = e;
  159. GC_root_size += (e - old -> r_end);
  160. }
  161. old -> r_tmp &= tmp;
  162. break;
  163. }
  164. }
  165. if (i < n_root_sets) {
  166. /* merge other overlapping intervals */
  167. struct roots *other;
  168. for (i++; i < n_root_sets; i++) {
  169. other = GC_static_roots + i;
  170. b = other -> r_start;
  171. e = other -> r_end;
  172. if (b <= old -> r_end && e >= old -> r_start) {
  173. if (b < old -> r_start) {
  174. old -> r_start = b;
  175. GC_root_size += (old -> r_start - b);
  176. }
  177. if (e > old -> r_end) {
  178. old -> r_end = e;
  179. GC_root_size += (e - old -> r_end);
  180. }
  181. old -> r_tmp &= other -> r_tmp;
  182. /* Delete this entry. */
  183. GC_root_size -= (other -> r_end - other -> r_start);
  184. other -> r_start = GC_static_roots[n_root_sets-1].r_start;
  185. other -> r_end = GC_static_roots[n_root_sets-1].r_end;
  186. n_root_sets--;
  187. }
  188. }
  189. return;
  190. }
  191. }
  192. # else
  193. old = GC_roots_present(b);
  194. if (old != 0) {
  195. if (e <= old -> r_end) /* already there */ return;
  196. /* else extend */
  197. GC_root_size += e - old -> r_end;
  198. old -> r_end = e;
  199. return;
  200. }
  201. # endif
  202. if (n_root_sets == MAX_ROOT_SETS) {
  203. ABORT("Too many root sets\n");
  204. }
  205. GC_static_roots[n_root_sets].r_start = (ptr_t)b;
  206. GC_static_roots[n_root_sets].r_end = (ptr_t)e;
  207. GC_static_roots[n_root_sets].r_tmp = tmp;
  208. # if !defined(MSWIN32) && !defined(MSWINCE)
  209. GC_static_roots[n_root_sets].r_next = 0;
  210. # endif
  211. add_roots_to_index(GC_static_roots + n_root_sets);
  212. GC_root_size += e - b;
  213. n_root_sets++;
  214. }
  215. static GC_bool roots_were_cleared = FALSE;
  216. void GC_clear_roots (void)
  217. {
  218. DCL_LOCK_STATE;
  219. if (!GC_is_initialized) GC_init();
  220. LOCK();
  221. roots_were_cleared = TRUE;
  222. n_root_sets = 0;
  223. GC_root_size = 0;
  224. # if !defined(MSWIN32) && !defined(MSWINCE)
  225. {
  226. register int i;
  227. for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
  228. }
  229. # endif
  230. UNLOCK();
  231. }
  232. /* Internal use only; lock held. */
  233. static void GC_remove_root_at_pos(int i)
  234. {
  235. GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
  236. GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
  237. GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
  238. GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
  239. n_root_sets--;
  240. }
  241. #if !defined(MSWIN32) && !defined(MSWINCE)
  242. static void GC_rebuild_root_index(void)
  243. {
  244. int i;
  245. for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
  246. for (i = 0; i < n_root_sets; i++)
  247. add_roots_to_index(GC_static_roots + i);
  248. }
  249. #endif
  250. /* Internal use only; lock held. */
  251. void GC_remove_tmp_roots(void)
  252. {
  253. int i;
  254. for (i = 0; i < n_root_sets; ) {
  255. if (GC_static_roots[i].r_tmp) {
  256. GC_remove_root_at_pos(i);
  257. } else {
  258. i++;
  259. }
  260. }
  261. #if !defined(MSWIN32) && !defined(MSWINCE)
  262. GC_rebuild_root_index();
  263. #endif
  264. }
  265. #if !defined(MSWIN32) && !defined(MSWINCE)
  266. void GC_remove_roots(void *b, void *e)
  267. {
  268. DCL_LOCK_STATE;
  269. LOCK();
  270. GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
  271. UNLOCK();
  272. }
  273. /* Should only be called when the lock is held */
  274. void GC_remove_roots_inner(ptr_t b, ptr_t e)
  275. {
  276. int i;
  277. for (i = 0; i < n_root_sets; ) {
  278. if (GC_static_roots[i].r_start >= b
  279. && GC_static_roots[i].r_end <= e) {
  280. GC_remove_root_at_pos(i);
  281. } else {
  282. i++;
  283. }
  284. }
  285. GC_rebuild_root_index();
  286. }
  287. #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
  288. #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
  289. /* Workaround for the OS mapping and unmapping behind our back: */
  290. /* Is the address p in one of the temporary static root sections? */
  291. GC_bool GC_is_tmp_root(ptr_t p)
  292. {
  293. static int last_root_set = MAX_ROOT_SETS;
  294. register int i;
  295. if (last_root_set < n_root_sets
  296. && p >= GC_static_roots[last_root_set].r_start
  297. && p < GC_static_roots[last_root_set].r_end)
  298. return GC_static_roots[last_root_set].r_tmp;
  299. for (i = 0; i < n_root_sets; i++) {
  300. if (p >= GC_static_roots[i].r_start
  301. && p < GC_static_roots[i].r_end) {
  302. last_root_set = i;
  303. return GC_static_roots[i].r_tmp;
  304. }
  305. }
  306. return(FALSE);
  307. }
  308. #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
  309. ptr_t GC_approx_sp(void)
  310. {
  311. volatile word dummy;
  312. dummy = 42; /* Force stack to grow if necessary. Otherwise the */
  313. /* later accesses might cause the kernel to think we're */
  314. /* doing something wrong. */
  315. # ifdef _MSC_VER
  316. # pragma warning(disable:4172)
  317. # endif
  318. return((ptr_t)(&dummy));
  319. # ifdef _MSC_VER
  320. # pragma warning(default:4172)
  321. # endif
  322. }
  323. /*
  324. * Data structure for excluded static roots.
  325. * Real declaration is in gc_priv.h.
  326. struct exclusion {
  327. ptr_t e_start;
  328. ptr_t e_end;
  329. };
  330. struct exclusion GC_excl_table[MAX_EXCLUSIONS];
  331. -- Array of exclusions, ascending
  332. -- address order.
  333. */
  334. size_t GC_excl_table_entries = 0; /* Number of entries in use. */
  335. /* Return the first exclusion range that includes an address >= start_addr */
  336. /* Assumes the exclusion table contains at least one entry (namely the */
  337. /* GC data structures). */
  338. struct exclusion * GC_next_exclusion(ptr_t start_addr)
  339. {
  340. size_t low = 0;
  341. size_t high = GC_excl_table_entries - 1;
  342. size_t mid;
  343. while (high > low) {
  344. mid = (low + high) >> 1;
  345. /* low <= mid < high */
  346. if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
  347. low = mid + 1;
  348. } else {
  349. high = mid;
  350. }
  351. }
  352. if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
  353. return GC_excl_table + low;
  354. }
  355. void GC_exclude_static_roots(void *start, void *finish)
  356. {
  357. struct exclusion * next;
  358. size_t next_index, i;
  359. if (0 == GC_excl_table_entries) {
  360. next = 0;
  361. } else {
  362. next = GC_next_exclusion(start);
  363. }
  364. if (0 != next) {
  365. if ((word)(next -> e_start) < (word) finish) {
  366. /* incomplete error check. */
  367. ABORT("exclusion ranges overlap");
  368. }
  369. if ((word)(next -> e_start) == (word) finish) {
  370. /* extend old range backwards */
  371. next -> e_start = (ptr_t)start;
  372. return;
  373. }
  374. next_index = next - GC_excl_table;
  375. for (i = GC_excl_table_entries; i > next_index; --i) {
  376. GC_excl_table[i] = GC_excl_table[i-1];
  377. }
  378. } else {
  379. next_index = GC_excl_table_entries;
  380. }
  381. if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
  382. GC_excl_table[next_index].e_start = (ptr_t)start;
  383. GC_excl_table[next_index].e_end = (ptr_t)finish;
  384. ++GC_excl_table_entries;
  385. }
  386. /* Invoke push_conditional on ranges that are not excluded. */
  387. void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top, GC_bool all)
  388. {
  389. struct exclusion * next;
  390. ptr_t excl_start;
  391. while (bottom < top) {
  392. next = GC_next_exclusion(bottom);
  393. if (0 == next || (excl_start = next -> e_start) >= top) {
  394. GC_push_conditional(bottom, top, all);
  395. return;
  396. }
  397. if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
  398. bottom = next -> e_end;
  399. }
  400. }
  401. /*
  402. * In the absence of threads, push the stack contents.
  403. * In the presence of threads, push enough of the current stack
  404. * to ensure that callee-save registers saved in collector frames have been
  405. * seen.
  406. * FIXME: Merge with per-thread stuff.
  407. */
  408. void GC_push_current_stack(ptr_t cold_gc_frame, void * context)
  409. {
  410. # if defined(THREADS)
  411. if (0 == cold_gc_frame) return;
  412. # ifdef STACK_GROWS_DOWN
  413. GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
  414. /* For IA64, the register stack backing store is handled */
  415. /* in the thread-specific code. */
  416. # else
  417. GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
  418. # endif
  419. # else
  420. # ifdef STACK_GROWS_DOWN
  421. GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
  422. cold_gc_frame );
  423. # ifdef IA64
  424. /* We also need to push the register stack backing store. */
  425. /* This should really be done in the same way as the */
  426. /* regular stack. For now we fudge it a bit. */
  427. /* Note that the backing store grows up, so we can't use */
  428. /* GC_push_all_stack_partially_eager. */
  429. {
  430. extern word GC_save_regs_ret_val;
  431. /* Previously set to backing store pointer. */
  432. ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
  433. ptr_t cold_gc_bs_pointer;
  434. if (GC_all_interior_pointers) {
  435. cold_gc_bs_pointer = bsp - 2048;
  436. if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
  437. cold_gc_bs_pointer = BACKING_STORE_BASE;
  438. } else {
  439. GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
  440. }
  441. } else {
  442. cold_gc_bs_pointer = BACKING_STORE_BASE;
  443. }
  444. GC_push_all_eager(cold_gc_bs_pointer, bsp);
  445. /* All values should be sufficiently aligned that we */
  446. /* dont have to worry about the boundary. */
  447. }
  448. # endif
  449. # else
  450. GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
  451. cold_gc_frame );
  452. # endif
  453. # endif /* !THREADS */
  454. }
  455. /*
  456. * Push GC internal roots. Only called if there is some reason to believe
  457. * these would not otherwise get registered.
  458. */
  459. void GC_push_gc_structures(void)
  460. {
  461. GC_push_finalizer_structures();
  462. # if defined(THREADS)
  463. GC_push_thread_structures();
  464. # endif
  465. }
  466. #ifdef THREAD_LOCAL_ALLOC
  467. void GC_mark_thread_local_free_lists(void);
  468. #endif
  469. void GC_cond_register_dynamic_libraries(void)
  470. {
  471. # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
  472. || defined(PCR)
  473. GC_remove_tmp_roots();
  474. if (!GC_no_dls) GC_register_dynamic_libraries();
  475. # else
  476. GC_no_dls = TRUE;
  477. # endif
  478. }
  479. /*
  480. * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
  481. * on groups of pointers) on every top level accessible pointer.
  482. * If all is FALSE, arrange to push only possibly altered values.
  483. * Cold_gc_frame is an address inside a GC frame that
  484. * remains valid until all marking is complete.
  485. * A zero value indicates that it's OK to miss some
  486. * register values.
  487. */
  488. void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
  489. {
  490. int i;
  491. unsigned kind;
  492. /*
  493. * Next push static data. This must happen early on, since it's
  494. * not robust against mark stack overflow.
  495. */
  496. /* Reregister dynamic libraries, in case one got added. */
  497. /* There is some argument for doing this as late as possible, */
  498. /* especially on win32, where it can change asynchronously. */
  499. /* In those cases, we do it here. But on other platforms, it's */
  500. /* not safe with the world stopped, so we do it earlier. */
  501. # if !defined(REGISTER_LIBRARIES_EARLY)
  502. GC_cond_register_dynamic_libraries();
  503. # endif
  504. /* Mark everything in static data areas */
  505. for (i = 0; i < n_root_sets; i++) {
  506. GC_push_conditional_with_exclusions(
  507. GC_static_roots[i].r_start,
  508. GC_static_roots[i].r_end, all);
  509. }
  510. /* Mark all free list header blocks, if those were allocated from */
  511. /* the garbage collected heap. This makes sure they don't */
  512. /* disappear if we are not marking from static data. It also */
  513. /* saves us the trouble of scanning them, and possibly that of */
  514. /* marking the freelists. */
  515. for (kind = 0; kind < GC_n_kinds; kind++) {
  516. void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
  517. if (0 != base) {
  518. GC_set_mark_bit(base);
  519. }
  520. }
  521. /* Mark from GC internal roots if those might otherwise have */
  522. /* been excluded. */
  523. if (GC_no_dls || roots_were_cleared) {
  524. GC_push_gc_structures();
  525. }
  526. /* Mark thread local free lists, even if their mark */
  527. /* descriptor excludes the link field. */
  528. /* If the world is not stopped, this is unsafe. It is */
  529. /* also unnecessary, since we will do this again with the */
  530. /* world stopped. */
  531. # if defined(THREAD_LOCAL_ALLOC)
  532. if (GC_world_stopped) GC_mark_thread_local_free_lists();
  533. # endif
  534. /*
  535. * Now traverse stacks, and mark from register contents.
  536. * These must be done last, since they can legitimately overflow
  537. * the mark stack.
  538. * This is usually done by saving the current context on the
  539. * stack, and then just tracing from the stack.
  540. */
  541. GC_push_regs_and_stack(cold_gc_frame);
  542. if (GC_push_other_roots != 0) (*GC_push_other_roots)();
  543. /* In the threads case, this also pushes thread stacks. */
  544. /* Note that without interior pointer recognition lots */
  545. /* of stuff may have been pushed already, and this */
  546. /* should be careful about mark stack overflows. */
  547. }