/vendor/gc/misc.c

http://github.com/feyeleanor/RubyGoLightly · C · 1184 lines · 903 code · 128 blank · 153 comment · 225 complexity · 617d6da37dec76e9a19ab08e04d7e9fa MD5 · raw file

  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. /* Boehm, July 31, 1995 5:02 pm PDT */
  16. #include <stdio.h>
  17. #include <limits.h>
  18. #include <stdarg.h>
  19. #ifndef _WIN32_WCE
  20. #include <signal.h>
  21. #endif
  22. #define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
  23. #include "private/gc_pmark.h"
  24. #ifdef GC_SOLARIS_THREADS
  25. # include <sys/syscall.h>
  26. #endif
  27. #if defined(MSWIN32) || defined(MSWINCE)
  28. # define WIN32_LEAN_AND_MEAN
  29. # define NOSERVICE
  30. # include <windows.h>
  31. # include <tchar.h>
  32. #endif
  33. #ifdef UNIX_LIKE
  34. # include <fcntl.h>
  35. # include <sys/types.h>
  36. # include <sys/stat.h>
  37. int GC_log; /* Forward decl, so we can set it. */
  38. #endif
  39. #ifdef NONSTOP
  40. # include <floss.h>
  41. #endif
  42. #if defined(THREADS) && defined(PCR)
  43. # include "il/PCR_IL.h"
  44. PCR_Th_ML GC_allocate_ml;
  45. #endif
  46. /* For other platforms with threads, the lock and possibly */
  47. /* GC_lock_holder variables are defined in the thread support code. */
  48. #if defined(NOSYS) || defined(ECOS)
  49. #undef STACKBASE
  50. #endif
  51. /* Dont unnecessarily call GC_register_main_static_data() in case */
  52. /* dyn_load.c isn't linked in. */
  53. #ifdef DYNAMIC_LOADING
  54. # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()
  55. #else
  56. # define GC_REGISTER_MAIN_STATIC_DATA() TRUE
  57. #endif
  58. GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
  59. GC_bool GC_debugging_started = FALSE;
  60. /* defined here so we don't have to load debug_malloc.o */
  61. void (*GC_check_heap) (void) = (void (*) (void))0;
  62. void (*GC_print_all_smashed) (void) = (void (*) (void))0;
  63. void (*GC_start_call_back) (void) = (void (*) (void))0;
  64. ptr_t GC_stackbottom = 0;
  65. #ifdef IA64
  66. ptr_t GC_register_stackbottom = 0;
  67. #endif
  68. GC_bool GC_dont_gc = 0;
  69. GC_bool GC_dont_precollect = 0;
  70. GC_bool GC_quiet = 0;
  71. #ifndef SMALL_CONFIG
  72. GC_bool GC_print_stats = 0;
  73. #endif
  74. GC_bool GC_print_back_height = 0;
  75. #ifndef NO_DEBUGGING
  76. GC_bool GC_dump_regularly = 0; /* Generate regular debugging dumps. */
  77. #endif
  78. #ifdef KEEP_BACK_PTRS
  79. long GC_backtraces = 0; /* Number of random backtraces to */
  80. /* generate for each GC. */
  81. #endif
  82. #ifdef FIND_LEAK
  83. int GC_find_leak = 1;
  84. #else
  85. int GC_find_leak = 0;
  86. #endif
  87. #ifdef ALL_INTERIOR_POINTERS
  88. int GC_all_interior_pointers = 1;
  89. #else
  90. int GC_all_interior_pointers = 0;
  91. #endif
  92. long GC_large_alloc_warn_interval = 5;
  93. /* Interval between unsuppressed warnings. */
  94. long GC_large_alloc_warn_suppressed = 0;
  95. /* Number of warnings suppressed so far. */
  96. /*ARGSUSED*/
  97. void * GC_default_oom_fn(size_t bytes_requested)
  98. {
  99. return(0);
  100. }
  101. void * (*GC_oom_fn) (size_t bytes_requested) = GC_default_oom_fn;
  102. void * GC_project2(void *arg1, void *arg2)
  103. {
  104. return arg2;
  105. }
  106. /* Set things up so that GC_size_map[i] >= granules(i), */
  107. /* but not too much bigger */
  108. /* and so that size_map contains relatively few distinct entries */
  109. /* This was originally stolen from Russ Atkinson's Cedar */
  110. /* quantization alogrithm (but we precompute it). */
  111. void GC_init_size_map(void)
  112. {
  113. int i;
  114. /* Map size 0 to something bigger. */
  115. /* This avoids problems at lower levels. */
  116. GC_size_map[0] = 1;
  117. for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
  118. GC_size_map[i] = ROUNDED_UP_GRANULES(i);
  119. GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);
  120. }
  121. /* We leave the rest of the array to be filled in on demand. */
  122. }
  123. /* Fill in additional entries in GC_size_map, including the ith one */
  124. /* We assume the ith entry is currently 0. */
  125. /* Note that a filled in section of the array ending at n always */
  126. /* has length at least n/4. */
  127. void GC_extend_size_map(size_t i)
  128. {
  129. size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
  130. size_t granule_sz = orig_granule_sz;
  131. size_t byte_sz = GRANULES_TO_BYTES(granule_sz);
  132. /* The size we try to preserve. */
  133. /* Close to i, unless this would */
  134. /* introduce too many distinct sizes. */
  135. size_t smaller_than_i = byte_sz - (byte_sz >> 3);
  136. size_t much_smaller_than_i = byte_sz - (byte_sz >> 2);
  137. size_t low_limit; /* The lowest indexed entry we */
  138. /* initialize. */
  139. size_t j;
  140. if (GC_size_map[smaller_than_i] == 0) {
  141. low_limit = much_smaller_than_i;
  142. while (GC_size_map[low_limit] != 0) low_limit++;
  143. } else {
  144. low_limit = smaller_than_i + 1;
  145. while (GC_size_map[low_limit] != 0) low_limit++;
  146. granule_sz = ROUNDED_UP_GRANULES(low_limit);
  147. granule_sz += granule_sz >> 3;
  148. if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz;
  149. }
  150. /* For these larger sizes, we use an even number of granules. */
  151. /* This makes it easier to, for example, construct a 16byte-aligned */
  152. /* allocator even if GRANULE_BYTES is 8. */
  153. granule_sz += 1;
  154. granule_sz &= ~1;
  155. if (granule_sz > MAXOBJGRANULES) {
  156. granule_sz = MAXOBJGRANULES;
  157. }
  158. /* If we can fit the same number of larger objects in a block, */
  159. /* do so. */
  160. {
  161. size_t number_of_objs = HBLK_GRANULES/granule_sz;
  162. granule_sz = HBLK_GRANULES/number_of_objs;
  163. granule_sz &= ~1;
  164. }
  165. byte_sz = GRANULES_TO_BYTES(granule_sz);
  166. /* We may need one extra byte; */
  167. /* don't always fill in GC_size_map[byte_sz] */
  168. byte_sz -= EXTRA_BYTES;
  169. for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz;
  170. }
  171. /*
  172. * The following is a gross hack to deal with a problem that can occur
  173. * on machines that are sloppy about stack frame sizes, notably SPARC.
  174. * Bogus pointers may be written to the stack and not cleared for
  175. * a LONG time, because they always fall into holes in stack frames
  176. * that are not written. We partially address this by clearing
  177. * sections of the stack whenever we get control.
  178. */
  179. word GC_stack_last_cleared = 0; /* GC_no when we last did this */
  180. # ifdef THREADS
  181. # define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */
  182. # define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */
  183. # endif
  184. # define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
  185. # define DEGRADE_RATE 50
  186. ptr_t GC_min_sp; /* Coolest stack pointer value from which we've */
  187. /* already cleared the stack. */
  188. ptr_t GC_high_water;
  189. /* "hottest" stack pointer value we have seen */
  190. /* recently. Degrades over time. */
  191. word GC_bytes_allocd_at_reset;
  192. #if defined(ASM_CLEAR_CODE)
  193. extern void *GC_clear_stack_inner(void *, ptr_t);
  194. #else
  195. /* Clear the stack up to about limit. Return arg. */
  196. /*ARGSUSED*/
  197. void * GC_clear_stack_inner(void *arg, ptr_t limit)
  198. {
  199. word dummy[CLEAR_SIZE];
  200. BZERO(dummy, CLEAR_SIZE*sizeof(word));
  201. if ((ptr_t)(dummy) COOLER_THAN limit) {
  202. (void) GC_clear_stack_inner(arg, limit);
  203. }
  204. /* Make sure the recursive call is not a tail call, and the bzero */
  205. /* call is not recognized as dead code. */
  206. GC_noop1((word)dummy);
  207. return(arg);
  208. }
  209. #endif
  210. /* Clear some of the inaccessible part of the stack. Returns its */
  211. /* argument, so it can be used in a tail call position, hence clearing */
  212. /* another frame. */
  213. void * GC_clear_stack(void *arg)
  214. {
  215. ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */
  216. # ifdef THREADS
  217. word dummy[SMALL_CLEAR_SIZE];
  218. static unsigned random_no = 0;
  219. /* Should be more random than it is ... */
  220. /* Used to occasionally clear a bigger */
  221. /* chunk. */
  222. # endif
  223. ptr_t limit;
  224. # define SLOP 400
  225. /* Extra bytes we clear every time. This clears our own */
  226. /* activation record, and should cause more frequent */
  227. /* clearing near the cold end of the stack, a good thing. */
  228. # define GC_SLOP 4000
  229. /* We make GC_high_water this much hotter than we really saw */
  230. /* saw it, to cover for GC noise etc. above our current frame. */
  231. # define CLEAR_THRESHOLD 100000
  232. /* We restart the clearing process after this many bytes of */
  233. /* allocation. Otherwise very heavily recursive programs */
  234. /* with sparse stacks may result in heaps that grow almost */
  235. /* without bounds. As the heap gets larger, collection */
  236. /* frequency decreases, thus clearing frequency would decrease, */
  237. /* thus more junk remains accessible, thus the heap gets */
  238. /* larger ... */
  239. # ifdef THREADS
  240. if (++random_no % 13 == 0) {
  241. limit = sp;
  242. MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
  243. limit = (ptr_t)((word)limit & ~0xf);
  244. /* Make it sufficiently aligned for assembly */
  245. /* implementations of GC_clear_stack_inner. */
  246. return GC_clear_stack_inner(arg, limit);
  247. } else {
  248. BZERO(dummy, SMALL_CLEAR_SIZE*sizeof(word));
  249. return arg;
  250. }
  251. # else
  252. if (GC_gc_no > GC_stack_last_cleared) {
  253. /* Start things over, so we clear the entire stack again */
  254. if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom;
  255. GC_min_sp = GC_high_water;
  256. GC_stack_last_cleared = GC_gc_no;
  257. GC_bytes_allocd_at_reset = GC_bytes_allocd;
  258. }
  259. /* Adjust GC_high_water */
  260. MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
  261. if (sp HOTTER_THAN GC_high_water) {
  262. GC_high_water = sp;
  263. }
  264. MAKE_HOTTER(GC_high_water, GC_SLOP);
  265. limit = GC_min_sp;
  266. MAKE_HOTTER(limit, SLOP);
  267. if (sp COOLER_THAN limit) {
  268. limit = (ptr_t)((word)limit & ~0xf);
  269. /* Make it sufficiently aligned for assembly */
  270. /* implementations of GC_clear_stack_inner. */
  271. GC_min_sp = sp;
  272. return(GC_clear_stack_inner(arg, limit));
  273. } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
  274. /* Restart clearing process, but limit how much clearing we do. */
  275. GC_min_sp = sp;
  276. MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
  277. if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water;
  278. GC_bytes_allocd_at_reset = GC_bytes_allocd;
  279. }
  280. return(arg);
  281. # endif
  282. }
  283. /* Return a pointer to the base address of p, given a pointer to a */
  284. /* an address within an object. Return 0 o.w. */
  285. void * GC_base(void * p)
  286. {
  287. ptr_t r;
  288. struct hblk *h;
  289. bottom_index *bi;
  290. hdr *candidate_hdr;
  291. ptr_t limit;
  292. r = p;
  293. if (!GC_is_initialized) return 0;
  294. h = HBLKPTR(r);
  295. GET_BI(r, bi);
  296. candidate_hdr = HDR_FROM_BI(bi, r);
  297. if (candidate_hdr == 0) return(0);
  298. /* If it's a pointer to the middle of a large object, move it */
  299. /* to the beginning. */
  300. while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
  301. h = FORWARDED_ADDR(h,candidate_hdr);
  302. r = (ptr_t)h;
  303. candidate_hdr = HDR(h);
  304. }
  305. if (HBLK_IS_FREE(candidate_hdr)) return(0);
  306. /* Make sure r points to the beginning of the object */
  307. r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
  308. {
  309. size_t offset = HBLKDISPL(r);
  310. signed_word sz = candidate_hdr -> hb_sz;
  311. size_t obj_displ = offset % sz;
  312. r -= obj_displ;
  313. limit = r + sz;
  314. if (limit > (ptr_t)(h + 1) && sz <= HBLKSIZE) {
  315. return(0);
  316. }
  317. if ((ptr_t)p >= limit) return(0);
  318. }
  319. return((void *)r);
  320. }
  321. /* Return the size of an object, given a pointer to its base. */
  322. /* (For small obects this also happens to work from interior pointers, */
  323. /* but that shouldn't be relied upon.) */
  324. size_t GC_size(void * p)
  325. {
  326. hdr * hhdr = HDR(p);
  327. return hhdr -> hb_sz;
  328. }
  329. size_t GC_get_heap_size(void)
  330. {
  331. return GC_heapsize;
  332. }
  333. size_t GC_get_free_bytes(void)
  334. {
  335. return GC_large_free_bytes;
  336. }
  337. size_t GC_get_bytes_since_gc(void)
  338. {
  339. return GC_bytes_allocd;
  340. }
  341. size_t GC_get_total_bytes(void)
  342. {
  343. return GC_bytes_allocd+GC_bytes_allocd_before_gc;
  344. }
  345. GC_bool GC_is_initialized = FALSE;
  346. # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
  347. extern void GC_init_parallel(void);
  348. # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
  349. /* FIXME: The GC_init/GC_init_inner distinction should go away. */
  350. void GC_init(void)
  351. {
  352. /* LOCK(); -- no longer does anything this early. */
  353. GC_init_inner();
  354. /* UNLOCK(); */
  355. }
  356. #if defined(MSWIN32) || defined(MSWINCE)
  357. CRITICAL_SECTION GC_write_cs;
  358. #endif
  359. #ifdef MSWIN32
  360. extern void GC_init_win32(void);
  361. #endif
  362. extern void GC_setpagesize();
  363. #ifdef MSWIN32
  364. extern GC_bool GC_no_win32_dlls;
  365. #else
  366. # define GC_no_win32_dlls FALSE
  367. #endif
  368. void GC_exit_check(void)
  369. {
  370. GC_gcollect();
  371. }
  372. #ifdef SEARCH_FOR_DATA_START
  373. extern void GC_init_linux_data_start(void);
  374. #endif
  375. #ifdef UNIX_LIKE
  376. extern void GC_set_and_save_fault_handler(void (*handler)(int));
  377. static void looping_handler(sig)
  378. int sig;
  379. {
  380. GC_err_printf("Caught signal %d: looping in handler\n", sig);
  381. for(;;);
  382. }
  383. static GC_bool installed_looping_handler = FALSE;
  384. static void maybe_install_looping_handler()
  385. {
  386. /* Install looping handler before the write fault handler, so we */
  387. /* handle write faults correctly. */
  388. if (!installed_looping_handler && 0 != GETENV("GC_LOOP_ON_ABORT")) {
  389. GC_set_and_save_fault_handler(looping_handler);
  390. installed_looping_handler = TRUE;
  391. }
  392. }
  393. #else /* !UNIX_LIKE */
  394. # define maybe_install_looping_handler()
  395. #endif
  396. #if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
  397. void GC_thr_init(void);
  398. #endif
  399. void GC_init_inner()
  400. {
  401. # if !defined(THREADS) && defined(GC_ASSERTIONS)
  402. word dummy;
  403. # endif
  404. word initial_heap_sz = (word)MINHINCR;
  405. if (GC_is_initialized) return;
  406. /* Note that although we are nominally called with the */
  407. /* allocation lock held, the allocation lock is now */
  408. /* only really acquired once a second thread is forked.*/
  409. /* And the initialization code needs to run before */
  410. /* then. Thus we really don't hold any locks, and can */
  411. /* in fact safely initialize them here. */
  412. # ifdef THREADS
  413. GC_ASSERT(!GC_need_to_lock);
  414. # endif
  415. # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
  416. if (!GC_is_initialized) {
  417. BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL;
  418. HMODULE hK32 = GetModuleHandleA("kernel32.dll");
  419. if (hK32)
  420. pfn = (BOOL (WINAPI *) (LPCRITICAL_SECTION, DWORD))
  421. GetProcAddress (hK32,
  422. "InitializeCriticalSectionAndSpinCount");
  423. if (pfn)
  424. pfn(&GC_allocate_ml, 4000);
  425. else
  426. InitializeCriticalSection (&GC_allocate_ml);
  427. }
  428. #endif /* MSWIN32 */
  429. # if defined(MSWIN32) || defined(MSWINCE)
  430. InitializeCriticalSection(&GC_write_cs);
  431. # endif
  432. # if (!defined(SMALL_CONFIG))
  433. if (0 != GETENV("GC_PRINT_STATS")) {
  434. GC_print_stats = 1;
  435. }
  436. if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
  437. GC_print_stats = VERBOSE;
  438. }
  439. # if defined(UNIX_LIKE)
  440. {
  441. char * file_name = GETENV("GC_LOG_FILE");
  442. if (0 != file_name) {
  443. int log_d = open(file_name, O_CREAT|O_WRONLY|O_APPEND, 0666);
  444. if (log_d < 0) {
  445. GC_log_printf("Failed to open %s as log file\n", file_name);
  446. } else {
  447. GC_log = log_d;
  448. }
  449. }
  450. }
  451. # endif
  452. # endif
  453. # ifndef NO_DEBUGGING
  454. if (0 != GETENV("GC_DUMP_REGULARLY")) {
  455. GC_dump_regularly = 1;
  456. }
  457. # endif
  458. # ifdef KEEP_BACK_PTRS
  459. {
  460. char * backtraces_string = GETENV("GC_BACKTRACES");
  461. if (0 != backtraces_string) {
  462. GC_backtraces = atol(backtraces_string);
  463. if (backtraces_string[0] == '\0') GC_backtraces = 1;
  464. }
  465. }
  466. # endif
  467. if (0 != GETENV("GC_FIND_LEAK")) {
  468. GC_find_leak = 1;
  469. atexit(GC_exit_check);
  470. }
  471. if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
  472. GC_all_interior_pointers = 1;
  473. }
  474. if (0 != GETENV("GC_DONT_GC")) {
  475. GC_dont_gc = 1;
  476. }
  477. if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) {
  478. GC_print_back_height = 1;
  479. }
  480. if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
  481. GC_large_alloc_warn_interval = LONG_MAX;
  482. }
  483. {
  484. char * addr_string = GETENV("GC_TRACE");
  485. if (0 != addr_string) {
  486. # ifndef ENABLE_TRACE
  487. WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0);
  488. # else
  489. # ifdef STRTOULL
  490. long long addr = strtoull(addr_string, NULL, 16);
  491. # else
  492. long addr = strtoul(addr_string, NULL, 16);
  493. # endif
  494. if (addr < 0x1000)
  495. WARN("Unlikely trace address: 0x%lx\n", (GC_word)addr);
  496. GC_trace_addr = (ptr_t)addr;
  497. # endif
  498. }
  499. }
  500. {
  501. char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
  502. if (0 != time_limit_string) {
  503. long time_limit = atol(time_limit_string);
  504. if (time_limit < 5) {
  505. WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
  506. "or bad syntax: Ignoring\n", 0);
  507. } else {
  508. GC_time_limit = time_limit;
  509. }
  510. }
  511. }
  512. {
  513. char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL");
  514. if (0 != interval_string) {
  515. long interval = atol(interval_string);
  516. if (interval <= 0) {
  517. WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has "
  518. "bad value: Ignoring\n", 0);
  519. } else {
  520. GC_large_alloc_warn_interval = interval;
  521. }
  522. }
  523. }
  524. maybe_install_looping_handler();
  525. /* Adjust normal object descriptor for extra allocation. */
  526. if (ALIGNMENT > GC_DS_TAGS && EXTRA_BYTES != 0) {
  527. GC_obj_kinds[NORMAL].ok_descriptor = ((word)(-ALIGNMENT) | GC_DS_LENGTH);
  528. }
  529. GC_setpagesize();
  530. GC_exclude_static_roots(beginGC_arrays, endGC_arrays);
  531. GC_exclude_static_roots(beginGC_obj_kinds, endGC_obj_kinds);
  532. # ifdef SEPARATE_GLOBALS
  533. GC_exclude_static_roots(beginGC_objfreelist, endGC_objfreelist);
  534. GC_exclude_static_roots(beginGC_aobjfreelist, endGC_aobjfreelist);
  535. # endif
  536. # ifdef MSWIN32
  537. GC_init_win32();
  538. # endif
  539. # if defined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS)
  540. WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0);
  541. /* If thread stacks are cached, they tend to be scanned in */
  542. /* entirety as part of the root set. This wil grow them to */
  543. /* maximum size, and is generally not desirable. */
  544. # endif
  545. # if defined(SEARCH_FOR_DATA_START)
  546. GC_init_linux_data_start();
  547. # endif
  548. # if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
  549. GC_init_netbsd_elf();
  550. # endif
  551. # if !defined(THREADS) || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) \
  552. || defined(GC_SOLARIS_THREADS)
  553. if (GC_stackbottom == 0) {
  554. GC_stackbottom = GC_get_main_stack_base();
  555. # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
  556. GC_register_stackbottom = GC_get_register_stack_base();
  557. # endif
  558. } else {
  559. # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
  560. if (GC_register_stackbottom == 0) {
  561. WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0);
  562. /* The following may fail, since we may rely on */
  563. /* alignment properties that may not hold with a user set */
  564. /* GC_stackbottom. */
  565. GC_register_stackbottom = GC_get_register_stack_base();
  566. }
  567. # endif
  568. }
  569. # endif
  570. /* Ignore gcc -Wall warnings on the following. */
  571. GC_STATIC_ASSERT(sizeof (ptr_t) == sizeof(word));
  572. GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
  573. GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
  574. # ifndef THREADS
  575. # ifdef STACK_GROWS_DOWN
  576. GC_ASSERT((word)(&dummy) <= (word)GC_stackbottom);
  577. # else
  578. GC_ASSERT((word)(&dummy) >= (word)GC_stackbottom);
  579. # endif
  580. # endif
  581. # if !defined(_AUX_SOURCE) || defined(__GNUC__)
  582. GC_ASSERT((word)(-1) > (word)0);
  583. /* word should be unsigned */
  584. # endif
  585. GC_ASSERT((ptr_t)(word)(-1) > (ptr_t)0);
  586. /* Ptr_t comparisons should behave as unsigned comparisons. */
  587. GC_ASSERT((signed_word)(-1) < (signed_word)0);
  588. # if !defined(SMALL_CONFIG)
  589. if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
  590. /* This used to test for !GC_no_win32_dlls. Why? */
  591. GC_setpagesize();
  592. /* For GWW_MPROTECT on Win32, this needs to happen before any */
  593. /* heap memory is allocated. */
  594. GC_dirty_init();
  595. GC_ASSERT(GC_bytes_allocd == 0)
  596. GC_incremental = TRUE;
  597. }
  598. # endif /* !SMALL_CONFIG */
  599. /* Add initial guess of root sets. Do this first, since sbrk(0) */
  600. /* might be used. */
  601. if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments();
  602. GC_init_headers();
  603. GC_bl_init();
  604. GC_mark_init();
  605. {
  606. char * sz_str = GETENV("GC_INITIAL_HEAP_SIZE");
  607. if (sz_str != NULL) {
  608. initial_heap_sz = atoi(sz_str);
  609. if (initial_heap_sz <= MINHINCR * HBLKSIZE) {
  610. WARN("Bad initial heap size %s - ignoring it.\n",
  611. sz_str);
  612. }
  613. initial_heap_sz = divHBLKSZ(initial_heap_sz);
  614. }
  615. }
  616. {
  617. char * sz_str = GETENV("GC_MAXIMUM_HEAP_SIZE");
  618. if (sz_str != NULL) {
  619. word max_heap_sz = (word)atol(sz_str);
  620. if (max_heap_sz < initial_heap_sz * HBLKSIZE) {
  621. WARN("Bad maximum heap size %s - ignoring it.\n",
  622. sz_str);
  623. }
  624. if (0 == GC_max_retries) GC_max_retries = 2;
  625. GC_set_max_heap_size(max_heap_sz);
  626. }
  627. }
  628. if (!GC_expand_hp_inner(initial_heap_sz)) {
  629. GC_err_printf("Can't start up: not enough memory\n");
  630. EXIT();
  631. }
  632. GC_initialize_offsets();
  633. GC_register_displacement_inner(0L);
  634. # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
  635. if (!GC_all_interior_pointers) {
  636. /* TLS ABI uses pointer-sized offsets for dtv. */
  637. GC_register_displacement_inner(sizeof(void *));
  638. }
  639. # endif
  640. GC_init_size_map();
  641. # ifdef PCR
  642. if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
  643. != PCR_ERes_okay) {
  644. ABORT("Can't lock load state\n");
  645. } else if (PCR_IL_Unlock() != PCR_ERes_okay) {
  646. ABORT("Can't unlock load state\n");
  647. }
  648. PCR_IL_Unlock();
  649. GC_pcr_install();
  650. # endif
  651. GC_is_initialized = TRUE;
  652. # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
  653. GC_thr_init();
  654. # endif
  655. COND_DUMP;
  656. /* Get black list set up and/or incremental GC started */
  657. if (!GC_dont_precollect || GC_incremental) GC_gcollect_inner();
  658. # ifdef STUBBORN_ALLOC
  659. GC_stubborn_init();
  660. # endif
  661. /* Convince lint that some things are used */
  662. # ifdef LINT
  663. {
  664. extern char * GC_copyright[];
  665. extern int GC_read();
  666. extern void GC_register_finalizer_no_order();
  667. GC_noop(GC_copyright, GC_find_header,
  668. GC_push_one, GC_call_with_alloc_lock, GC_read,
  669. GC_dont_expand,
  670. # ifndef NO_DEBUGGING
  671. GC_dump,
  672. # endif
  673. GC_register_finalizer_no_order);
  674. }
  675. # endif
  676. /* The rest of this again assumes we don't really hold */
  677. /* the allocation lock. */
  678. # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
  679. /* Make sure marker threads and started and thread local */
  680. /* allocation is initialized, in case we didn't get */
  681. /* called from GC_init_parallel(); */
  682. {
  683. GC_init_parallel();
  684. }
  685. # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
  686. # if defined(DYNAMIC_LOADING) && defined(DARWIN)
  687. {
  688. /* This must be called WITHOUT the allocation lock held
  689. and before any threads are created */
  690. extern void GC_init_dyld();
  691. GC_init_dyld();
  692. }
  693. # endif
  694. }
  695. void GC_enable_incremental(void)
  696. {
  697. # if !defined(SMALL_CONFIG) && !defined(KEEP_BACK_PTRS)
  698. /* If we are keeping back pointers, the GC itself dirties all */
  699. /* pages on which objects have been marked, making */
  700. /* incremental GC pointless. */
  701. if (!GC_find_leak) {
  702. DCL_LOCK_STATE;
  703. LOCK();
  704. if (GC_incremental) goto out;
  705. GC_setpagesize();
  706. /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
  707. maybe_install_looping_handler(); /* Before write fault handler! */
  708. GC_incremental = TRUE;
  709. if (!GC_is_initialized) {
  710. GC_init_inner();
  711. } else {
  712. GC_dirty_init();
  713. }
  714. if (!GC_dirty_maintained) goto out;
  715. if (GC_dont_gc) {
  716. /* Can't easily do it. */
  717. UNLOCK();
  718. return;
  719. }
  720. if (GC_bytes_allocd > 0) {
  721. /* There may be unmarked reachable objects */
  722. GC_gcollect_inner();
  723. } /* else we're OK in assuming everything's */
  724. /* clean since nothing can point to an */
  725. /* unmarked object. */
  726. GC_read_dirty();
  727. out:
  728. UNLOCK();
  729. } else {
  730. GC_init();
  731. }
  732. # else
  733. GC_init();
  734. # endif
  735. }
  736. #if defined(MSWIN32) || defined(MSWINCE)
  737. # if defined(_MSC_VER) && defined(_DEBUG)
  738. # include <crtdbg.h>
  739. # endif
  740. # ifdef OLD_WIN32_LOG_FILE
  741. # define LOG_FILE _T("gc.log")
  742. # endif
  743. HANDLE GC_stdout = 0;
  744. void GC_deinit()
  745. {
  746. if (GC_is_initialized) {
  747. DeleteCriticalSection(&GC_write_cs);
  748. }
  749. }
  750. # ifndef THREADS
  751. # define GC_need_to_lock 0 /* Not defined without threads */
  752. # endif
  753. int GC_write(const char *buf, size_t len)
  754. {
  755. BOOL tmp;
  756. DWORD written;
  757. if (len == 0)
  758. return 0;
  759. if (GC_need_to_lock) EnterCriticalSection(&GC_write_cs);
  760. if (GC_stdout == INVALID_HANDLE_VALUE) {
  761. if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs);
  762. return -1;
  763. } else if (GC_stdout == 0) {
  764. char * file_name = GETENV("GC_LOG_FILE");
  765. char logPath[_MAX_PATH + 5];
  766. if (0 == file_name) {
  767. # ifdef OLD_WIN32_LOG_FILE
  768. strcpy(logPath, LOG_FILE);
  769. # else
  770. GetModuleFileName(NULL, logPath, _MAX_PATH);
  771. strcat(logPath, ".log");
  772. # endif
  773. file_name = logPath;
  774. }
  775. GC_stdout = CreateFile(logPath, GENERIC_WRITE,
  776. FILE_SHARE_READ,
  777. NULL, CREATE_ALWAYS, FILE_FLAG_WRITE_THROUGH,
  778. NULL);
  779. if (GC_stdout == INVALID_HANDLE_VALUE)
  780. ABORT("Open of log file failed");
  781. }
  782. tmp = WriteFile(GC_stdout, buf, (DWORD)len, &written, NULL);
  783. if (!tmp)
  784. DebugBreak();
  785. # if defined(_MSC_VER) && defined(_DEBUG)
  786. _CrtDbgReport(_CRT_WARN, NULL, 0, NULL, "%.*s", len, buf);
  787. # endif
  788. if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs);
  789. return tmp ? (int)written : -1;
  790. }
  791. # undef GC_need_to_lock
  792. #endif
  793. #if defined(OS2) || defined(MACOS)
  794. FILE * GC_stdout = NULL;
  795. FILE * GC_stderr = NULL;
  796. FILE * GC_log = NULL;
  797. int GC_tmp; /* Should really be local ... */
  798. void GC_set_files()
  799. {
  800. if (GC_stdout == NULL) {
  801. GC_stdout = stdout;
  802. }
  803. if (GC_stderr == NULL) {
  804. GC_stderr = stderr;
  805. }
  806. if (GC_log == NULL) {
  807. GC_log = stderr;
  808. }
  809. }
  810. #endif
  811. #if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE)
  812. int GC_stdout = 1;
  813. int GC_stderr = 2;
  814. int GC_log = 2;
  815. # if !defined(AMIGA)
  816. # include <unistd.h>
  817. # endif
  818. #endif
  819. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(OS2) \
  820. && !defined(MACOS) && !defined(ECOS) && !defined(NOSYS)
  821. int GC_write(fd, buf, len)
  822. int fd;
  823. const char *buf;
  824. size_t len;
  825. {
  826. register int bytes_written = 0;
  827. register int result;
  828. while (bytes_written < len) {
  829. # ifdef GC_SOLARIS_THREADS
  830. result = syscall(SYS_write, fd, buf + bytes_written,
  831. len - bytes_written);
  832. # else
  833. result = write(fd, buf + bytes_written, len - bytes_written);
  834. # endif
  835. if (-1 == result) return(result);
  836. bytes_written += result;
  837. }
  838. return(bytes_written);
  839. }
  840. #endif /* UN*X */
  841. #ifdef ECOS
  842. int GC_write(fd, buf, len)
  843. {
  844. _Jv_diag_write (buf, len);
  845. return len;
  846. }
  847. #endif
  848. #ifdef NOSYS
  849. int GC_write(fd, buf, len)
  850. {
  851. /* No writing. */
  852. return len;
  853. }
  854. #endif
  855. #if defined(MSWIN32) || defined(MSWINCE)
  856. /* FIXME: This is pretty ugly ... */
  857. # define WRITE(f, buf, len) GC_write(buf, len)
  858. #else
  859. # if defined(OS2) || defined(MACOS)
  860. # define WRITE(f, buf, len) (GC_set_files(), \
  861. GC_tmp = fwrite((buf), 1, (len), (f)), \
  862. fflush(f), GC_tmp)
  863. # else
  864. # define WRITE(f, buf, len) GC_write((f), (buf), (len))
  865. # endif
  866. #endif
  867. #define BUFSZ 1024
  868. #ifdef _MSC_VER
  869. # define vsnprintf _vsnprintf
  870. #endif
  871. /* A version of printf that is unlikely to call malloc, and is thus safer */
  872. /* to call from the collector in case malloc has been bound to GC_malloc. */
  873. /* Floating point arguments ans formats should be avoided, since fp */
  874. /* conversion is more likely to allocate. */
  875. /* Assumes that no more than BUFSZ-1 characters are written at once. */
  876. void GC_printf(const char *format, ...)
  877. {
  878. va_list args;
  879. char buf[BUFSZ+1];
  880. va_start(args, format);
  881. if (GC_quiet) return;
  882. buf[BUFSZ] = 0x15;
  883. (void) vsnprintf(buf, BUFSZ, format, args);
  884. va_end(args);
  885. if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
  886. if (WRITE(GC_stdout, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
  887. }
  888. void GC_err_printf(const char *format, ...)
  889. {
  890. va_list args;
  891. char buf[BUFSZ+1];
  892. va_start(args, format);
  893. buf[BUFSZ] = 0x15;
  894. (void) vsnprintf(buf, BUFSZ, format, args);
  895. va_end(args);
  896. if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
  897. if (WRITE(GC_stderr, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
  898. }
  899. void GC_log_printf(const char *format, ...)
  900. {
  901. va_list args;
  902. char buf[BUFSZ+1];
  903. va_start(args, format);
  904. buf[BUFSZ] = 0x15;
  905. (void) vsnprintf(buf, BUFSZ, format, args);
  906. va_end(args);
  907. if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
  908. if (WRITE(GC_log, buf, strlen(buf)) < 0) ABORT("write to log failed");
  909. }
  910. void GC_err_puts(const char *s)
  911. {
  912. if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
  913. }
  914. #if defined(LINUX) && !defined(SMALL_CONFIG)
  915. void GC_err_write(buf, len)
  916. const char *buf;
  917. size_t len;
  918. {
  919. if (WRITE(GC_stderr, buf, len) < 0) ABORT("write to stderr failed");
  920. }
  921. #endif
  922. void GC_default_warn_proc(char *msg, GC_word arg)
  923. {
  924. GC_err_printf(msg, arg);
  925. }
  926. GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
  927. GC_warn_proc GC_set_warn_proc(GC_warn_proc p)
  928. {
  929. GC_warn_proc result;
  930. # ifdef GC_WIN32_THREADS
  931. GC_ASSERT(GC_is_initialized);
  932. # endif
  933. LOCK();
  934. result = GC_current_warn_proc;
  935. GC_current_warn_proc = p;
  936. UNLOCK();
  937. return(result);
  938. }
  939. GC_word GC_set_free_space_divisor (GC_word value)
  940. {
  941. GC_word old = GC_free_space_divisor;
  942. GC_free_space_divisor = value;
  943. return old;
  944. }
  945. #ifndef PCR
  946. void GC_abort(const char *msg)
  947. {
  948. # if defined(MSWIN32)
  949. (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK);
  950. # else
  951. GC_err_printf("%s\n", msg);
  952. # endif
  953. if (GETENV("GC_LOOP_ON_ABORT") != NULL) {
  954. /* In many cases it's easier to debug a running process. */
  955. /* It's arguably nicer to sleep, but that makes it harder */
  956. /* to look at the thread if the debugger doesn't know much */
  957. /* about threads. */
  958. for(;;) {}
  959. }
  960. # if defined(MSWIN32) || defined(MSWINCE)
  961. DebugBreak();
  962. # else
  963. (void) abort();
  964. # endif
  965. }
  966. #endif
  967. void GC_enable()
  968. {
  969. LOCK();
  970. GC_dont_gc--;
  971. UNLOCK();
  972. }
  973. void GC_disable()
  974. {
  975. LOCK();
  976. GC_dont_gc++;
  977. UNLOCK();
  978. }
  979. /* Helper procedures for new kind creation. */
  980. void ** GC_new_free_list_inner()
  981. {
  982. void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),
  983. PTRFREE);
  984. if (result == 0) ABORT("Failed to allocate freelist for new kind");
  985. BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t));
  986. return result;
  987. }
  988. void ** GC_new_free_list()
  989. {
  990. void *result;
  991. LOCK();
  992. result = GC_new_free_list_inner();
  993. UNLOCK();
  994. return result;
  995. }
  996. unsigned GC_new_kind_inner(void **fl, GC_word descr, int adjust, int clear)
  997. {
  998. unsigned result = GC_n_kinds++;
  999. if (GC_n_kinds > MAXOBJKINDS) ABORT("Too many kinds");
  1000. GC_obj_kinds[result].ok_freelist = fl;
  1001. GC_obj_kinds[result].ok_reclaim_list = 0;
  1002. GC_obj_kinds[result].ok_descriptor = descr;
  1003. GC_obj_kinds[result].ok_relocate_descr = adjust;
  1004. GC_obj_kinds[result].ok_init = clear;
  1005. return result;
  1006. }
  1007. unsigned GC_new_kind(void **fl, GC_word descr, int adjust, int clear)
  1008. {
  1009. unsigned result;
  1010. LOCK();
  1011. result = GC_new_kind_inner(fl, descr, adjust, clear);
  1012. UNLOCK();
  1013. return result;
  1014. }
  1015. unsigned GC_new_proc_inner(GC_mark_proc proc)
  1016. {
  1017. unsigned result = GC_n_mark_procs++;
  1018. if (GC_n_mark_procs > MAX_MARK_PROCS) ABORT("Too many mark procedures");
  1019. GC_mark_procs[result] = proc;
  1020. return result;
  1021. }
  1022. unsigned GC_new_proc(GC_mark_proc proc)
  1023. {
  1024. unsigned result;
  1025. LOCK();
  1026. result = GC_new_proc_inner(proc);
  1027. UNLOCK();
  1028. return result;
  1029. }
  1030. GC_API void * GC_call_with_stack_base(GC_stack_base_func fn, void *arg)
  1031. {
  1032. int dummy;
  1033. struct GC_stack_base base;
  1034. base.mem_base = (void *)&dummy;
  1035. # ifdef IA64
  1036. base.reg_base = (void *)GC_save_regs_in_stack();
  1037. /* Unnecessarily flushes register stack, */
  1038. /* but that probably doesn't hurt. */
  1039. # endif
  1040. return fn(&base, arg);
  1041. }
  1042. #if !defined(NO_DEBUGGING)
  1043. void GC_dump()
  1044. {
  1045. GC_printf("***Static roots:\n");
  1046. GC_print_static_roots();
  1047. GC_printf("\n***Heap sections:\n");
  1048. GC_print_heap_sects();
  1049. GC_printf("\n***Free blocks:\n");
  1050. GC_print_hblkfreelist();
  1051. GC_printf("\n***Blocks in use:\n");
  1052. GC_print_block_list();
  1053. GC_printf("\n***Finalization statistics:\n");
  1054. GC_print_finalization_stats();
  1055. }
  1056. #endif /* NO_DEBUGGING */