/vendor/gc/headers.c

http://github.com/feyeleanor/RubyGoLightly · C · 398 lines · 301 code · 37 blank · 60 comment · 80 complexity · fc6df30d777bee1a34293529d8c02666 MD5 · raw file

  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. /*
  16. * This implements:
  17. * 1. allocation of heap block headers
  18. * 2. A map from addresses to heap block addresses to heap block headers
  19. *
  20. * Access speed is crucial. We implement an index structure based on a 2
  21. * level tree.
  22. */
  23. # include "private/gc_priv.h"
  24. bottom_index * GC_all_bottom_indices = 0;
  25. /* Pointer to first (lowest addr) */
  26. /* bottom_index. */
  27. bottom_index * GC_all_bottom_indices_end = 0;
  28. /* Pointer to last (highest addr) */
  29. /* bottom_index. */
  30. /* Non-macro version of header location routine */
  31. hdr * GC_find_header(ptr_t h)
  32. {
  33. # ifdef HASH_TL
  34. hdr * result;
  35. GET_HDR(h, result);
  36. return(result);
  37. # else
  38. return(HDR_INNER(h));
  39. # endif
  40. }
  41. /* Handle a header cache miss. Returns a pointer to the */
  42. /* header corresponding to p, if p can possibly be a valid */
  43. /* object pointer, and 0 otherwise. */
  44. /* GUARANTEED to return 0 for a pointer past the first page */
  45. /* of an object unless both GC_all_interior_pointers is set */
  46. /* and p is in fact a valid object pointer. */
  47. #ifdef PRINT_BLACK_LIST
  48. hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
  49. #else
  50. hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
  51. #endif
  52. {
  53. hdr *hhdr;
  54. HC_MISS();
  55. GET_HDR(p, hhdr);
  56. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  57. if (GC_all_interior_pointers) {
  58. if (hhdr != 0) {
  59. ptr_t current = p;
  60. current = (ptr_t)HBLKPTR(current);
  61. do {
  62. current = current - HBLKSIZE*(word)hhdr;
  63. hhdr = HDR(current);
  64. } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
  65. /* current points to near the start of the large object */
  66. if (hhdr -> hb_flags & IGNORE_OFF_PAGE
  67. || HBLK_IS_FREE(hhdr))
  68. return 0;
  69. if (p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
  70. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  71. /* Pointer past the end of the block */
  72. return 0;
  73. }
  74. } else {
  75. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  76. }
  77. return hhdr;
  78. /* Pointers past the first page are probably too rare */
  79. /* to add them to the cache. We don't. */
  80. /* And correctness relies on the fact that we don't. */
  81. } else {
  82. if (hhdr == 0) {
  83. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  84. }
  85. return 0;
  86. }
  87. } else {
  88. if (HBLK_IS_FREE(hhdr)) {
  89. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  90. return 0;
  91. } else {
  92. hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
  93. hce -> hce_hdr = hhdr;
  94. return hhdr;
  95. }
  96. }
  97. }
  98. /* Routines to dynamically allocate collector data structures that will */
  99. /* never be freed. */
  100. static ptr_t scratch_free_ptr = 0;
  101. /* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
  102. /* GC_scratch_end_ptr is end point of current scratch area. */
  103. ptr_t GC_scratch_alloc(size_t bytes)
  104. {
  105. register ptr_t result = scratch_free_ptr;
  106. bytes += GRANULE_BYTES-1;
  107. bytes &= ~(GRANULE_BYTES-1);
  108. scratch_free_ptr += bytes;
  109. if (scratch_free_ptr <= GC_scratch_end_ptr) {
  110. return(result);
  111. }
  112. {
  113. word bytes_to_get = MINHINCR * HBLKSIZE;
  114. if (bytes_to_get <= bytes) {
  115. /* Undo the damage, and get memory directly */
  116. bytes_to_get = bytes;
  117. # ifdef USE_MMAP
  118. bytes_to_get += GC_page_size - 1;
  119. bytes_to_get &= ~(GC_page_size - 1);
  120. # endif
  121. result = (ptr_t)GET_MEM(bytes_to_get);
  122. GC_add_to_our_memory(result, bytes_to_get);
  123. scratch_free_ptr -= bytes;
  124. GC_scratch_last_end_ptr = result + bytes;
  125. return(result);
  126. }
  127. result = (ptr_t)GET_MEM(bytes_to_get);
  128. GC_add_to_our_memory(result, bytes_to_get);
  129. if (result == 0) {
  130. if (GC_print_stats)
  131. GC_printf("Out of memory - trying to allocate less\n");
  132. scratch_free_ptr -= bytes;
  133. bytes_to_get = bytes;
  134. # ifdef USE_MMAP
  135. bytes_to_get += GC_page_size - 1;
  136. bytes_to_get &= ~(GC_page_size - 1);
  137. # endif
  138. result = (ptr_t)GET_MEM(bytes_to_get);
  139. GC_add_to_our_memory(result, bytes_to_get);
  140. return result;
  141. }
  142. scratch_free_ptr = result;
  143. GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
  144. GC_scratch_last_end_ptr = GC_scratch_end_ptr;
  145. return(GC_scratch_alloc(bytes));
  146. }
  147. }
  148. static hdr * hdr_free_list = 0;
  149. /* Return an uninitialized header */
  150. static hdr * alloc_hdr(void)
  151. {
  152. register hdr * result;
  153. if (hdr_free_list == 0) {
  154. result = (hdr *) GC_scratch_alloc((word)(sizeof(hdr)));
  155. } else {
  156. result = hdr_free_list;
  157. hdr_free_list = (hdr *) (result -> hb_next);
  158. }
  159. return(result);
  160. }
  161. static void free_hdr(hdr * hhdr)
  162. {
  163. hhdr -> hb_next = (struct hblk *) hdr_free_list;
  164. hdr_free_list = hhdr;
  165. }
  166. #ifdef USE_HDR_CACHE
  167. word GC_hdr_cache_hits = 0;
  168. word GC_hdr_cache_misses = 0;
  169. #endif
  170. void GC_init_headers(void)
  171. {
  172. register unsigned i;
  173. GC_all_nils = (bottom_index *)GC_scratch_alloc((word)sizeof(bottom_index));
  174. BZERO(GC_all_nils, sizeof(bottom_index));
  175. for (i = 0; i < TOP_SZ; i++) {
  176. GC_top_index[i] = GC_all_nils;
  177. }
  178. }
  179. /* Make sure that there is a bottom level index block for address addr */
  180. /* Return FALSE on failure. */
  181. static GC_bool get_index(word addr)
  182. {
  183. word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  184. bottom_index * r;
  185. bottom_index * p;
  186. bottom_index ** prev;
  187. bottom_index *pi;
  188. # ifdef HASH_TL
  189. word i = TL_HASH(hi);
  190. bottom_index * old;
  191. old = p = GC_top_index[i];
  192. while(p != GC_all_nils) {
  193. if (p -> key == hi) return(TRUE);
  194. p = p -> hash_link;
  195. }
  196. r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
  197. if (r == 0) return(FALSE);
  198. BZERO(r, sizeof (bottom_index));
  199. r -> hash_link = old;
  200. GC_top_index[i] = r;
  201. # else
  202. if (GC_top_index[hi] != GC_all_nils) return(TRUE);
  203. r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
  204. if (r == 0) return(FALSE);
  205. GC_top_index[hi] = r;
  206. BZERO(r, sizeof (bottom_index));
  207. # endif
  208. r -> key = hi;
  209. /* Add it to the list of bottom indices */
  210. prev = &GC_all_bottom_indices; /* pointer to p */
  211. pi = 0; /* bottom_index preceding p */
  212. while ((p = *prev) != 0 && p -> key < hi) {
  213. pi = p;
  214. prev = &(p -> asc_link);
  215. }
  216. r -> desc_link = pi;
  217. if (0 == p) {
  218. GC_all_bottom_indices_end = r;
  219. } else {
  220. p -> desc_link = r;
  221. }
  222. r -> asc_link = p;
  223. *prev = r;
  224. return(TRUE);
  225. }
  226. /* Install a header for block h. */
  227. /* The header is uninitialized. */
  228. /* Returns the header or 0 on failure. */
  229. struct hblkhdr * GC_install_header(struct hblk *h)
  230. {
  231. hdr * result;
  232. if (!get_index((word) h)) return(0);
  233. result = alloc_hdr();
  234. SET_HDR(h, result);
  235. # ifdef USE_MUNMAP
  236. result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  237. # endif
  238. return(result);
  239. }
  240. /* Set up forwarding counts for block h of size sz */
  241. GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
  242. {
  243. struct hblk * hbp;
  244. word i;
  245. for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
  246. if (!get_index((word) hbp)) return(FALSE);
  247. }
  248. if (!get_index((word)h + sz - 1)) return(FALSE);
  249. for (hbp = h + 1; (char *)hbp < (char *)h + sz; hbp += 1) {
  250. i = HBLK_PTR_DIFF(hbp, h);
  251. SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
  252. }
  253. return(TRUE);
  254. }
  255. /* Remove the header for block h */
  256. void GC_remove_header(struct hblk *h)
  257. {
  258. hdr ** ha;
  259. GET_HDR_ADDR(h, ha);
  260. free_hdr(*ha);
  261. *ha = 0;
  262. }
  263. /* Remove forwarding counts for h */
  264. void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
  265. {
  266. register struct hblk * hbp;
  267. for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
  268. SET_HDR(hbp, 0);
  269. }
  270. }
  271. /* Apply fn to all allocated blocks */
  272. /*VARARGS1*/
  273. void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
  274. word client_data)
  275. {
  276. signed_word j;
  277. bottom_index * index_p;
  278. for (index_p = GC_all_bottom_indices; index_p != 0;
  279. index_p = index_p -> asc_link) {
  280. for (j = BOTTOM_SZ-1; j >= 0;) {
  281. if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
  282. if (!HBLK_IS_FREE(index_p->index[j])) {
  283. (*fn)(((struct hblk *)
  284. (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
  285. << LOG_HBLKSIZE)),
  286. client_data);
  287. }
  288. j--;
  289. } else if (index_p->index[j] == 0) {
  290. j--;
  291. } else {
  292. j -= (signed_word)(index_p->index[j]);
  293. }
  294. }
  295. }
  296. }
  297. /* Get the next valid block whose address is at least h */
  298. /* Return 0 if there is none. */
  299. struct hblk * GC_next_used_block(struct hblk *h)
  300. {
  301. register bottom_index * bi;
  302. register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
  303. GET_BI(h, bi);
  304. if (bi == GC_all_nils) {
  305. register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  306. bi = GC_all_bottom_indices;
  307. while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
  308. j = 0;
  309. }
  310. while(bi != 0) {
  311. while (j < BOTTOM_SZ) {
  312. hdr * hhdr = bi -> index[j];
  313. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  314. j++;
  315. } else {
  316. if (!HBLK_IS_FREE(hhdr)) {
  317. return((struct hblk *)
  318. (((bi -> key << LOG_BOTTOM_SZ) + j)
  319. << LOG_HBLKSIZE));
  320. } else {
  321. j += divHBLKSZ(hhdr -> hb_sz);
  322. }
  323. }
  324. }
  325. j = 0;
  326. bi = bi -> asc_link;
  327. }
  328. return(0);
  329. }
  330. /* Get the last (highest address) block whose address is */
  331. /* at most h. Return 0 if there is none. */
  332. /* Unlike the above, this may return a free block. */
  333. struct hblk * GC_prev_block(struct hblk *h)
  334. {
  335. register bottom_index * bi;
  336. register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
  337. GET_BI(h, bi);
  338. if (bi == GC_all_nils) {
  339. register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  340. bi = GC_all_bottom_indices_end;
  341. while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
  342. j = BOTTOM_SZ - 1;
  343. }
  344. while(bi != 0) {
  345. while (j >= 0) {
  346. hdr * hhdr = bi -> index[j];
  347. if (0 == hhdr) {
  348. --j;
  349. } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  350. j -= (signed_word)hhdr;
  351. } else {
  352. return((struct hblk *)
  353. (((bi -> key << LOG_BOTTOM_SZ) + j)
  354. << LOG_HBLKSIZE));
  355. }
  356. }
  357. j = BOTTOM_SZ - 1;
  358. bi = bi -> desc_link;
  359. }
  360. return(0);
  361. }