PageRenderTime 79ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 1ms

/gc.c

https://github.com/thepelkus/ruby
C | 4421 lines | 3402 code | 557 blank | 462 comment | 498 complexity | 5ce1e16a09ae848f297e43b8822581d3 MD5 | raw file
Possible License(s): 0BSD, Unlicense, GPL-2.0, BSD-3-Clause, AGPL-3.0
  1. /**********************************************************************
  2. gc.c -
  3. $Author$
  4. created at: Tue Oct 5 09:44:46 JST 1993
  5. Copyright (C) 1993-2007 Yukihiro Matsumoto
  6. Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
  7. Copyright (C) 2000 Information-technology Promotion Agency, Japan
  8. **********************************************************************/
  9. #include "ruby/ruby.h"
  10. #include "ruby/st.h"
  11. #include "ruby/re.h"
  12. #include "ruby/io.h"
  13. #include "ruby/thread.h"
  14. #include "ruby/util.h"
  15. #include "eval_intern.h"
  16. #include "vm_core.h"
  17. #include "internal.h"
  18. #include "gc.h"
  19. #include "constant.h"
  20. #include "atomic.h"
  21. #include <stdio.h>
  22. #include <setjmp.h>
  23. #include <sys/types.h>
  24. #include <assert.h>
  25. #ifdef HAVE_SYS_TIME_H
  26. #include <sys/time.h>
  27. #endif
  28. #ifdef HAVE_SYS_RESOURCE_H
  29. #include <sys/resource.h>
  30. #endif
  31. #if defined(__native_client__) && defined(NACL_NEWLIB)
  32. # include "nacl/resource.h"
  33. # undef HAVE_POSIX_MEMALIGN
  34. # undef HAVE_MEMALIGN
  35. #endif
  36. #if defined _WIN32 || defined __CYGWIN__
  37. #include <windows.h>
  38. #elif defined(HAVE_POSIX_MEMALIGN)
  39. #elif defined(HAVE_MEMALIGN)
  40. #include <malloc.h>
  41. #endif
  42. #ifdef HAVE_VALGRIND_MEMCHECK_H
  43. # include <valgrind/memcheck.h>
  44. # ifndef VALGRIND_MAKE_MEM_DEFINED
  45. # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
  46. # endif
  47. # ifndef VALGRIND_MAKE_MEM_UNDEFINED
  48. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
  49. # endif
  50. #else
  51. # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
  52. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
  53. #endif
  54. #define rb_setjmp(env) RUBY_SETJMP(env)
  55. #define rb_jmp_buf rb_jmpbuf_t
  56. /* Make alloca work the best possible way. */
  57. #ifdef __GNUC__
  58. # ifndef atarist
  59. # ifndef alloca
  60. # define alloca __builtin_alloca
  61. # endif
  62. # endif /* atarist */
  63. #else
  64. # ifdef HAVE_ALLOCA_H
  65. # include <alloca.h>
  66. # else
  67. # ifdef _AIX
  68. #pragma alloca
  69. # else
  70. # ifndef alloca /* predefined by HP cc +Olibcalls */
  71. void *alloca ();
  72. # endif
  73. # endif /* AIX */
  74. # endif /* HAVE_ALLOCA_H */
  75. #endif /* __GNUC__ */
  76. #ifndef GC_MALLOC_LIMIT
  77. #define GC_MALLOC_LIMIT 8000000
  78. #endif
  79. #define HEAP_MIN_SLOTS 10000
  80. #define FREE_MIN 4096
  81. typedef struct {
  82. unsigned int initial_malloc_limit;
  83. unsigned int initial_heap_min_slots;
  84. unsigned int initial_free_min;
  85. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  86. int gc_stress;
  87. #endif
  88. } ruby_gc_params_t;
  89. static ruby_gc_params_t initial_params = {
  90. GC_MALLOC_LIMIT,
  91. HEAP_MIN_SLOTS,
  92. FREE_MIN,
  93. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  94. FALSE,
  95. #endif
  96. };
  97. #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
  98. #ifndef GC_PROFILE_MORE_DETAIL
  99. #define GC_PROFILE_MORE_DETAIL 0
  100. #endif
  101. typedef struct gc_profile_record {
  102. double gc_time;
  103. double gc_mark_time;
  104. double gc_sweep_time;
  105. double gc_invoke_time;
  106. size_t heap_use_slots;
  107. size_t heap_live_objects;
  108. size_t heap_free_objects;
  109. size_t heap_total_objects;
  110. size_t heap_use_size;
  111. size_t heap_total_size;
  112. int have_finalize;
  113. int is_marked;
  114. size_t allocate_increase;
  115. size_t allocate_limit;
  116. } gc_profile_record;
  117. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  118. #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
  119. #endif
  120. typedef struct RVALUE {
  121. union {
  122. struct {
  123. VALUE flags; /* always 0 for freed obj */
  124. struct RVALUE *next;
  125. } free;
  126. struct RBasic basic;
  127. struct RObject object;
  128. struct RClass klass;
  129. struct RFloat flonum;
  130. struct RString string;
  131. struct RArray array;
  132. struct RRegexp regexp;
  133. struct RHash hash;
  134. struct RData data;
  135. struct RTypedData typeddata;
  136. struct RStruct rstruct;
  137. struct RBignum bignum;
  138. struct RFile file;
  139. struct RNode node;
  140. struct RMatch match;
  141. struct RRational rational;
  142. struct RComplex complex;
  143. } as;
  144. #ifdef GC_DEBUG
  145. const char *file;
  146. int line;
  147. #endif
  148. } RVALUE;
  149. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  150. #pragma pack(pop)
  151. #endif
  152. struct heaps_slot {
  153. void *membase;
  154. RVALUE *slot;
  155. size_t limit;
  156. uintptr_t *bits;
  157. RVALUE *freelist;
  158. struct heaps_slot *next;
  159. struct heaps_slot *prev;
  160. struct heaps_slot *free_next;
  161. };
  162. struct heaps_header {
  163. struct heaps_slot *base;
  164. uintptr_t *bits;
  165. };
  166. struct sorted_heaps_slot {
  167. RVALUE *start;
  168. RVALUE *end;
  169. struct heaps_slot *slot;
  170. };
  171. struct heaps_free_bitmap {
  172. struct heaps_free_bitmap *next;
  173. };
  174. struct gc_list {
  175. VALUE *varptr;
  176. struct gc_list *next;
  177. };
  178. #define STACK_CHUNK_SIZE 500
  179. typedef struct stack_chunk {
  180. VALUE data[STACK_CHUNK_SIZE];
  181. struct stack_chunk *next;
  182. } stack_chunk_t;
  183. typedef struct mark_stack {
  184. stack_chunk_t *chunk;
  185. stack_chunk_t *cache;
  186. size_t index;
  187. size_t limit;
  188. size_t cache_size;
  189. size_t unused_cache_size;
  190. } mark_stack_t;
  191. #ifndef CALC_EXACT_MALLOC_SIZE
  192. #define CALC_EXACT_MALLOC_SIZE 0
  193. #endif
  194. typedef struct rb_objspace {
  195. struct {
  196. size_t limit;
  197. size_t increase;
  198. #if CALC_EXACT_MALLOC_SIZE
  199. size_t allocated_size;
  200. size_t allocations;
  201. #endif
  202. } malloc_params;
  203. struct {
  204. size_t increment;
  205. struct heaps_slot *ptr;
  206. struct heaps_slot *sweep_slots;
  207. struct heaps_slot *free_slots;
  208. struct sorted_heaps_slot *sorted;
  209. size_t length;
  210. size_t used;
  211. struct heaps_free_bitmap *free_bitmap;
  212. RVALUE *range[2];
  213. RVALUE *freed;
  214. size_t live_num;
  215. size_t free_num;
  216. size_t free_min;
  217. size_t final_num;
  218. size_t do_heap_free;
  219. } heap;
  220. struct {
  221. int dont_gc;
  222. int dont_lazy_sweep;
  223. int during_gc;
  224. rb_atomic_t finalizing;
  225. } flags;
  226. struct {
  227. st_table *table;
  228. RVALUE *deferred;
  229. } final;
  230. mark_stack_t mark_stack;
  231. struct {
  232. int run;
  233. gc_profile_record *record;
  234. size_t count;
  235. size_t size;
  236. double invoke_time;
  237. } profile;
  238. struct gc_list *global_list;
  239. size_t count;
  240. int gc_stress;
  241. struct mark_func_data_struct {
  242. VALUE data;
  243. void (*mark_func)(struct rb_objspace *objspace, VALUE v);
  244. } *mark_func_data;
  245. } rb_objspace_t;
  246. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  247. #define rb_objspace (*GET_VM()->objspace)
  248. #define ruby_initial_gc_stress initial_params.gc_stress
  249. int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
  250. #else
  251. static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
  252. int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
  253. #endif
  254. #define malloc_limit objspace->malloc_params.limit
  255. #define malloc_increase objspace->malloc_params.increase
  256. #define heaps objspace->heap.ptr
  257. #define heaps_length objspace->heap.length
  258. #define heaps_used objspace->heap.used
  259. #define lomem objspace->heap.range[0]
  260. #define himem objspace->heap.range[1]
  261. #define heaps_inc objspace->heap.increment
  262. #define heaps_freed objspace->heap.freed
  263. #define dont_gc objspace->flags.dont_gc
  264. #define during_gc objspace->flags.during_gc
  265. #define finalizing objspace->flags.finalizing
  266. #define finalizer_table objspace->final.table
  267. #define deferred_final_list objspace->final.deferred
  268. #define global_List objspace->global_list
  269. #define ruby_gc_stress objspace->gc_stress
  270. #define initial_malloc_limit initial_params.initial_malloc_limit
  271. #define initial_heap_min_slots initial_params.initial_heap_min_slots
  272. #define initial_free_min initial_params.initial_free_min
  273. #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
  274. #define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
  275. #define RANY(o) ((RVALUE*)(o))
  276. #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
  277. #define HEAP_HEADER(p) ((struct heaps_header *)(p))
  278. #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
  279. #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
  280. #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
  281. #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
  282. #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
  283. #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
  284. #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
  285. #ifndef HEAP_ALIGN_LOG
  286. /* default tiny heap size: 16KB */
  287. #define HEAP_ALIGN_LOG 14
  288. #endif
  289. #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
  290. enum {
  291. HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
  292. HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
  293. REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
  294. HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
  295. HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
  296. HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t) * CHAR_BIT)
  297. };
  298. int ruby_gc_debug_indent = 0;
  299. VALUE rb_mGC;
  300. extern st_table *rb_class_tbl;
  301. int ruby_disable_gc_stress = 0;
  302. static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
  303. static VALUE define_final0(VALUE obj, VALUE block);
  304. VALUE rb_define_final(VALUE obj, VALUE block);
  305. VALUE rb_undefine_final(VALUE obj);
  306. static void run_final(rb_objspace_t *objspace, VALUE obj);
  307. static void initial_expand_heap(rb_objspace_t *objspace);
  308. static void negative_size_allocation_error(const char *);
  309. static void *aligned_malloc(size_t, size_t);
  310. static void aligned_free(void *);
  311. static void init_mark_stack(mark_stack_t *stack);
  312. static VALUE lazy_sweep_enable(void);
  313. static int garbage_collect(rb_objspace_t *);
  314. static int gc_lazy_sweep(rb_objspace_t *);
  315. static void mark_tbl(rb_objspace_t *, st_table *);
  316. static void rest_sweep(rb_objspace_t *);
  317. static void gc_mark_stacked_objects(rb_objspace_t *);
  318. static double getrusage_time(void);
  319. static inline void gc_prof_timer_start(rb_objspace_t *);
  320. static inline void gc_prof_timer_stop(rb_objspace_t *, int);
  321. static inline void gc_prof_mark_timer_start(rb_objspace_t *);
  322. static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
  323. static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
  324. static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
  325. static inline void gc_prof_set_malloc_info(rb_objspace_t *);
  326. static inline void gc_prof_inc_live_num(rb_objspace_t *);
  327. static inline void gc_prof_dec_live_num(rb_objspace_t *);
  328. /*
  329. --------------------------- ObjectSpace -----------------------------
  330. */
  331. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  332. rb_objspace_t *
  333. rb_objspace_alloc(void)
  334. {
  335. rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
  336. memset(objspace, 0, sizeof(*objspace));
  337. malloc_limit = initial_malloc_limit;
  338. ruby_gc_stress = ruby_initial_gc_stress;
  339. return objspace;
  340. }
  341. #endif
  342. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  343. static void free_stack_chunks(mark_stack_t *);
  344. void
  345. rb_objspace_free(rb_objspace_t *objspace)
  346. {
  347. rest_sweep(objspace);
  348. if (objspace->profile.record) {
  349. free(objspace->profile.record);
  350. objspace->profile.record = 0;
  351. }
  352. if (global_List) {
  353. struct gc_list *list, *next;
  354. for (list = global_List; list; list = next) {
  355. next = list->next;
  356. xfree(list);
  357. }
  358. }
  359. if (objspace->heap.free_bitmap) {
  360. struct heaps_free_bitmap *list, *next;
  361. for (list = objspace->heap.free_bitmap; list; list = next) {
  362. next = list->next;
  363. free(list);
  364. }
  365. }
  366. if (objspace->heap.sorted) {
  367. size_t i;
  368. for (i = 0; i < heaps_used; ++i) {
  369. free(objspace->heap.sorted[i].slot->bits);
  370. aligned_free(objspace->heap.sorted[i].slot->membase);
  371. free(objspace->heap.sorted[i].slot);
  372. }
  373. free(objspace->heap.sorted);
  374. heaps_used = 0;
  375. heaps = 0;
  376. }
  377. free_stack_chunks(&objspace->mark_stack);
  378. free(objspace);
  379. }
  380. #endif
  381. void
  382. rb_global_variable(VALUE *var)
  383. {
  384. rb_gc_register_address(var);
  385. }
  386. static void
  387. allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
  388. {
  389. struct sorted_heaps_slot *p;
  390. struct heaps_free_bitmap *bits;
  391. size_t size, add, i;
  392. size = next_heaps_length*sizeof(struct sorted_heaps_slot);
  393. add = next_heaps_length - heaps_used;
  394. if (heaps_used > 0) {
  395. p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
  396. if (p) objspace->heap.sorted = p;
  397. }
  398. else {
  399. p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
  400. }
  401. if (p == 0) {
  402. during_gc = 0;
  403. rb_memerror();
  404. }
  405. for (i = 0; i < add; i++) {
  406. bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  407. if (bits == 0) {
  408. during_gc = 0;
  409. rb_memerror();
  410. return;
  411. }
  412. bits->next = objspace->heap.free_bitmap;
  413. objspace->heap.free_bitmap = bits;
  414. }
  415. }
  416. static void
  417. link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  418. {
  419. slot->free_next = objspace->heap.free_slots;
  420. objspace->heap.free_slots = slot;
  421. }
  422. static void
  423. unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  424. {
  425. objspace->heap.free_slots = slot->free_next;
  426. slot->free_next = NULL;
  427. }
  428. static void
  429. assign_heap_slot(rb_objspace_t *objspace)
  430. {
  431. RVALUE *p, *pend, *membase;
  432. struct heaps_slot *slot;
  433. size_t hi, lo, mid;
  434. size_t objs;
  435. objs = HEAP_OBJ_LIMIT;
  436. p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
  437. if (p == 0) {
  438. during_gc = 0;
  439. rb_memerror();
  440. }
  441. slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
  442. if (slot == 0) {
  443. aligned_free(p);
  444. during_gc = 0;
  445. rb_memerror();
  446. }
  447. MEMZERO((void*)slot, struct heaps_slot, 1);
  448. slot->next = heaps;
  449. if (heaps) heaps->prev = slot;
  450. heaps = slot;
  451. membase = p;
  452. p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
  453. if ((VALUE)p % sizeof(RVALUE) != 0) {
  454. p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
  455. objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
  456. }
  457. lo = 0;
  458. hi = heaps_used;
  459. while (lo < hi) {
  460. register RVALUE *mid_membase;
  461. mid = (lo + hi) / 2;
  462. mid_membase = objspace->heap.sorted[mid].slot->membase;
  463. if (mid_membase < membase) {
  464. lo = mid + 1;
  465. }
  466. else if (mid_membase > membase) {
  467. hi = mid;
  468. }
  469. else {
  470. rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
  471. }
  472. }
  473. if (hi < heaps_used) {
  474. MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
  475. }
  476. objspace->heap.sorted[hi].slot = slot;
  477. objspace->heap.sorted[hi].start = p;
  478. objspace->heap.sorted[hi].end = (p + objs);
  479. heaps->membase = membase;
  480. heaps->slot = p;
  481. heaps->limit = objs;
  482. assert(objspace->heap.free_bitmap != NULL);
  483. heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
  484. objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
  485. HEAP_HEADER(membase)->base = heaps;
  486. HEAP_HEADER(membase)->bits = heaps->bits;
  487. memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  488. objspace->heap.free_num += objs;
  489. pend = p + objs;
  490. if (lomem == 0 || lomem > p) lomem = p;
  491. if (himem < pend) himem = pend;
  492. heaps_used++;
  493. while (p < pend) {
  494. p->as.free.flags = 0;
  495. p->as.free.next = heaps->freelist;
  496. heaps->freelist = p;
  497. p++;
  498. }
  499. link_free_heap_slot(objspace, heaps);
  500. }
  501. static void
  502. add_heap_slots(rb_objspace_t *objspace, size_t add)
  503. {
  504. size_t i;
  505. size_t next_heaps_length;
  506. next_heaps_length = heaps_used + add;
  507. if (next_heaps_length > heaps_length) {
  508. allocate_sorted_heaps(objspace, next_heaps_length);
  509. heaps_length = next_heaps_length;
  510. }
  511. for (i = 0; i < add; i++) {
  512. assign_heap_slot(objspace);
  513. }
  514. heaps_inc = 0;
  515. }
  516. static void
  517. init_heap(rb_objspace_t *objspace)
  518. {
  519. add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
  520. init_mark_stack(&objspace->mark_stack);
  521. #ifdef USE_SIGALTSTACK
  522. {
  523. /* altstack of another threads are allocated in another place */
  524. rb_thread_t *th = GET_THREAD();
  525. void *tmp = th->altstack;
  526. th->altstack = malloc(ALT_STACK_SIZE);
  527. free(tmp); /* free previously allocated area */
  528. }
  529. #endif
  530. objspace->profile.invoke_time = getrusage_time();
  531. finalizer_table = st_init_numtable();
  532. }
  533. static void
  534. initial_expand_heap(rb_objspace_t *objspace)
  535. {
  536. size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
  537. if (min_size > heaps_used) {
  538. add_heap_slots(objspace, min_size - heaps_used);
  539. }
  540. }
  541. static void
  542. set_heaps_increment(rb_objspace_t *objspace)
  543. {
  544. size_t next_heaps_length = (size_t)(heaps_used * 1.8);
  545. if (next_heaps_length == heaps_used) {
  546. next_heaps_length++;
  547. }
  548. heaps_inc = next_heaps_length - heaps_used;
  549. if (next_heaps_length > heaps_length) {
  550. allocate_sorted_heaps(objspace, next_heaps_length);
  551. heaps_length = next_heaps_length;
  552. }
  553. }
  554. static int
  555. heaps_increment(rb_objspace_t *objspace)
  556. {
  557. if (heaps_inc > 0) {
  558. assign_heap_slot(objspace);
  559. heaps_inc--;
  560. return TRUE;
  561. }
  562. return FALSE;
  563. }
  564. VALUE
  565. rb_newobj(void)
  566. {
  567. rb_objspace_t *objspace = &rb_objspace;
  568. VALUE obj;
  569. if (UNLIKELY(during_gc)) {
  570. dont_gc = 1;
  571. during_gc = 0;
  572. rb_bug("object allocation during garbage collection phase");
  573. }
  574. if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
  575. if (!garbage_collect(objspace)) {
  576. during_gc = 0;
  577. rb_memerror();
  578. }
  579. }
  580. if (UNLIKELY(!has_free_object)) {
  581. if (!gc_lazy_sweep(objspace)) {
  582. during_gc = 0;
  583. rb_memerror();
  584. }
  585. }
  586. obj = (VALUE)objspace->heap.free_slots->freelist;
  587. objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
  588. if (objspace->heap.free_slots->freelist == NULL) {
  589. unlink_free_heap_slot(objspace, objspace->heap.free_slots);
  590. }
  591. MEMZERO((void*)obj, RVALUE, 1);
  592. #ifdef GC_DEBUG
  593. RANY(obj)->file = rb_sourcefile();
  594. RANY(obj)->line = rb_sourceline();
  595. #endif
  596. gc_prof_inc_live_num(objspace);
  597. return obj;
  598. }
  599. NODE*
  600. rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
  601. {
  602. NODE *n = (NODE*)rb_newobj();
  603. n->flags |= T_NODE;
  604. nd_set_type(n, type);
  605. n->u1.value = a0;
  606. n->u2.value = a1;
  607. n->u3.value = a2;
  608. return n;
  609. }
  610. VALUE
  611. rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
  612. {
  613. NEWOBJ(data, struct RData);
  614. if (klass) Check_Type(klass, T_CLASS);
  615. OBJSETUP(data, klass, T_DATA);
  616. data->data = datap;
  617. data->dfree = dfree;
  618. data->dmark = dmark;
  619. return (VALUE)data;
  620. }
  621. VALUE
  622. rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
  623. {
  624. NEWOBJ(data, struct RTypedData);
  625. if (klass) Check_Type(klass, T_CLASS);
  626. OBJSETUP(data, klass, T_DATA);
  627. data->data = datap;
  628. data->typed_flag = 1;
  629. data->type = type;
  630. return (VALUE)data;
  631. }
  632. size_t
  633. rb_objspace_data_type_memsize(VALUE obj)
  634. {
  635. if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
  636. return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
  637. }
  638. else {
  639. return 0;
  640. }
  641. }
  642. const char *
  643. rb_objspace_data_type_name(VALUE obj)
  644. {
  645. if (RTYPEDDATA_P(obj)) {
  646. return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
  647. }
  648. else {
  649. return 0;
  650. }
  651. }
  652. static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
  653. static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
  654. static inline int
  655. is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
  656. {
  657. register RVALUE *p = RANY(ptr);
  658. register struct sorted_heaps_slot *heap;
  659. register size_t hi, lo, mid;
  660. if (p < lomem || p > himem) return FALSE;
  661. if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
  662. /* check if p looks like a pointer using bsearch*/
  663. lo = 0;
  664. hi = heaps_used;
  665. while (lo < hi) {
  666. mid = (lo + hi) / 2;
  667. heap = &objspace->heap.sorted[mid];
  668. if (heap->start <= p) {
  669. if (p < heap->end)
  670. return TRUE;
  671. lo = mid + 1;
  672. }
  673. else {
  674. hi = mid;
  675. }
  676. }
  677. return FALSE;
  678. }
  679. static int
  680. free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
  681. {
  682. if (!me->mark) {
  683. rb_free_method_entry(me);
  684. }
  685. return ST_CONTINUE;
  686. }
  687. void
  688. rb_free_m_table(st_table *tbl)
  689. {
  690. st_foreach(tbl, free_method_entry_i, 0);
  691. st_free_table(tbl);
  692. }
  693. static int
  694. free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
  695. {
  696. xfree(ce);
  697. return ST_CONTINUE;
  698. }
  699. void
  700. rb_free_const_table(st_table *tbl)
  701. {
  702. st_foreach(tbl, free_const_entry_i, 0);
  703. st_free_table(tbl);
  704. }
  705. static int obj_free(rb_objspace_t *, VALUE);
  706. static inline struct heaps_slot *
  707. add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
  708. {
  709. struct heaps_slot *slot;
  710. VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  711. p->as.free.flags = 0;
  712. slot = GET_HEAP_SLOT(p);
  713. p->as.free.next = slot->freelist;
  714. slot->freelist = p;
  715. return slot;
  716. }
  717. static void
  718. unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  719. {
  720. if (slot->prev)
  721. slot->prev->next = slot->next;
  722. if (slot->next)
  723. slot->next->prev = slot->prev;
  724. if (heaps == slot)
  725. heaps = slot->next;
  726. if (objspace->heap.sweep_slots == slot)
  727. objspace->heap.sweep_slots = slot->next;
  728. slot->prev = NULL;
  729. slot->next = NULL;
  730. }
  731. static void
  732. free_unused_heaps(rb_objspace_t *objspace)
  733. {
  734. size_t i, j;
  735. RVALUE *last = 0;
  736. for (i = j = 1; j < heaps_used; i++) {
  737. if (objspace->heap.sorted[i].slot->limit == 0) {
  738. struct heaps_slot* h = objspace->heap.sorted[i].slot;
  739. ((struct heaps_free_bitmap *)(h->bits))->next =
  740. objspace->heap.free_bitmap;
  741. objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
  742. if (!last) {
  743. last = objspace->heap.sorted[i].slot->membase;
  744. }
  745. else {
  746. aligned_free(objspace->heap.sorted[i].slot->membase);
  747. }
  748. free(objspace->heap.sorted[i].slot);
  749. heaps_used--;
  750. }
  751. else {
  752. if (i != j) {
  753. objspace->heap.sorted[j] = objspace->heap.sorted[i];
  754. }
  755. j++;
  756. }
  757. }
  758. if (last) {
  759. if (last < heaps_freed) {
  760. aligned_free(heaps_freed);
  761. heaps_freed = last;
  762. }
  763. else {
  764. aligned_free(last);
  765. }
  766. }
  767. }
  768. static inline void
  769. make_deferred(RVALUE *p)
  770. {
  771. p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
  772. }
  773. static inline void
  774. make_io_deferred(RVALUE *p)
  775. {
  776. rb_io_t *fptr = p->as.file.fptr;
  777. make_deferred(p);
  778. p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
  779. p->as.data.data = fptr;
  780. }
  781. static int
  782. obj_free(rb_objspace_t *objspace, VALUE obj)
  783. {
  784. switch (BUILTIN_TYPE(obj)) {
  785. case T_NIL:
  786. case T_FIXNUM:
  787. case T_TRUE:
  788. case T_FALSE:
  789. rb_bug("obj_free() called for broken object");
  790. break;
  791. }
  792. if (FL_TEST(obj, FL_EXIVAR)) {
  793. rb_free_generic_ivar((VALUE)obj);
  794. FL_UNSET(obj, FL_EXIVAR);
  795. }
  796. switch (BUILTIN_TYPE(obj)) {
  797. case T_OBJECT:
  798. if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
  799. RANY(obj)->as.object.as.heap.ivptr) {
  800. xfree(RANY(obj)->as.object.as.heap.ivptr);
  801. }
  802. break;
  803. case T_MODULE:
  804. case T_CLASS:
  805. rb_clear_cache_by_class((VALUE)obj);
  806. if (RCLASS_M_TBL(obj)) {
  807. rb_free_m_table(RCLASS_M_TBL(obj));
  808. }
  809. if (RCLASS_IV_TBL(obj)) {
  810. st_free_table(RCLASS_IV_TBL(obj));
  811. }
  812. if (RCLASS_CONST_TBL(obj)) {
  813. rb_free_const_table(RCLASS_CONST_TBL(obj));
  814. }
  815. if (RCLASS_IV_INDEX_TBL(obj)) {
  816. st_free_table(RCLASS_IV_INDEX_TBL(obj));
  817. }
  818. xfree(RANY(obj)->as.klass.ptr);
  819. break;
  820. case T_STRING:
  821. rb_str_free(obj);
  822. break;
  823. case T_ARRAY:
  824. rb_ary_free(obj);
  825. break;
  826. case T_HASH:
  827. if (RANY(obj)->as.hash.ntbl) {
  828. st_free_table(RANY(obj)->as.hash.ntbl);
  829. }
  830. break;
  831. case T_REGEXP:
  832. if (RANY(obj)->as.regexp.ptr) {
  833. onig_free(RANY(obj)->as.regexp.ptr);
  834. }
  835. break;
  836. case T_DATA:
  837. if (DATA_PTR(obj)) {
  838. if (RTYPEDDATA_P(obj)) {
  839. RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
  840. }
  841. if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  842. xfree(DATA_PTR(obj));
  843. }
  844. else if (RANY(obj)->as.data.dfree) {
  845. make_deferred(RANY(obj));
  846. return 1;
  847. }
  848. }
  849. break;
  850. case T_MATCH:
  851. if (RANY(obj)->as.match.rmatch) {
  852. struct rmatch *rm = RANY(obj)->as.match.rmatch;
  853. onig_region_free(&rm->regs, 0);
  854. if (rm->char_offset)
  855. xfree(rm->char_offset);
  856. xfree(rm);
  857. }
  858. break;
  859. case T_FILE:
  860. if (RANY(obj)->as.file.fptr) {
  861. make_io_deferred(RANY(obj));
  862. return 1;
  863. }
  864. break;
  865. case T_RATIONAL:
  866. case T_COMPLEX:
  867. break;
  868. case T_ICLASS:
  869. /* iClass shares table with the module */
  870. xfree(RANY(obj)->as.klass.ptr);
  871. break;
  872. case T_FLOAT:
  873. break;
  874. case T_BIGNUM:
  875. if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
  876. xfree(RBIGNUM_DIGITS(obj));
  877. }
  878. break;
  879. case T_NODE:
  880. switch (nd_type(obj)) {
  881. case NODE_SCOPE:
  882. if (RANY(obj)->as.node.u1.tbl) {
  883. xfree(RANY(obj)->as.node.u1.tbl);
  884. }
  885. break;
  886. case NODE_ARGS:
  887. if (RANY(obj)->as.node.u3.args) {
  888. xfree(RANY(obj)->as.node.u3.args);
  889. }
  890. break;
  891. case NODE_ALLOCA:
  892. xfree(RANY(obj)->as.node.u1.node);
  893. break;
  894. }
  895. break; /* no need to free iv_tbl */
  896. case T_STRUCT:
  897. if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
  898. RANY(obj)->as.rstruct.as.heap.ptr) {
  899. xfree(RANY(obj)->as.rstruct.as.heap.ptr);
  900. }
  901. break;
  902. default:
  903. rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
  904. BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
  905. }
  906. return 0;
  907. }
  908. void
  909. Init_heap(void)
  910. {
  911. init_heap(&rb_objspace);
  912. }
  913. typedef int each_obj_callback(void *, void *, size_t, void *);
  914. struct each_obj_args {
  915. each_obj_callback *callback;
  916. void *data;
  917. };
  918. static VALUE
  919. objspace_each_objects(VALUE arg)
  920. {
  921. size_t i;
  922. RVALUE *membase = 0;
  923. RVALUE *pstart, *pend;
  924. rb_objspace_t *objspace = &rb_objspace;
  925. struct each_obj_args *args = (struct each_obj_args *)arg;
  926. volatile VALUE v;
  927. i = 0;
  928. while (i < heaps_used) {
  929. while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
  930. i--;
  931. while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
  932. i++;
  933. if (heaps_used <= i)
  934. break;
  935. membase = objspace->heap.sorted[i].slot->membase;
  936. pstart = objspace->heap.sorted[i].slot->slot;
  937. pend = pstart + objspace->heap.sorted[i].slot->limit;
  938. for (; pstart != pend; pstart++) {
  939. if (pstart->as.basic.flags) {
  940. v = (VALUE)pstart; /* acquire to save this object */
  941. break;
  942. }
  943. }
  944. if (pstart != pend) {
  945. if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
  946. break;
  947. }
  948. }
  949. }
  950. RB_GC_GUARD(v);
  951. return Qnil;
  952. }
  953. /*
  954. * rb_objspace_each_objects() is special C API to walk through
  955. * Ruby object space. This C API is too difficult to use it.
  956. * To be frank, you should not use it. Or you need to read the
  957. * source code of this function and understand what this function does.
  958. *
  959. * 'callback' will be called several times (the number of heap slot,
  960. * at current implementation) with:
  961. * vstart: a pointer to the first living object of the heap_slot.
  962. * vend: a pointer to next to the valid heap_slot area.
  963. * stride: a distance to next VALUE.
  964. *
  965. * If callback() returns non-zero, the iteration will be stopped.
  966. *
  967. * This is a sample callback code to iterate liveness objects:
  968. *
  969. * int
  970. * sample_callback(void *vstart, void *vend, int stride, void *data) {
  971. * VALUE v = (VALUE)vstart;
  972. * for (; v != (VALUE)vend; v += stride) {
  973. * if (RBASIC(v)->flags) { // liveness check
  974. * // do something with live object 'v'
  975. * }
  976. * return 0; // continue to iteration
  977. * }
  978. *
  979. * Note: 'vstart' is not a top of heap_slot. This point the first
  980. * living object to grasp at least one object to avoid GC issue.
  981. * This means that you can not walk through all Ruby object slot
  982. * including freed object slot.
  983. *
  984. * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
  985. * However, there are possibilities to pass variable values with
  986. * 'stride' with some reasons. You must use stride instead of
  987. * use some constant value in the iteration.
  988. */
  989. void
  990. rb_objspace_each_objects(each_obj_callback *callback, void *data)
  991. {
  992. struct each_obj_args args;
  993. rb_objspace_t *objspace = &rb_objspace;
  994. rest_sweep(objspace);
  995. objspace->flags.dont_lazy_sweep = TRUE;
  996. args.callback = callback;
  997. args.data = data;
  998. rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
  999. }
  1000. struct os_each_struct {
  1001. size_t num;
  1002. VALUE of;
  1003. };
  1004. static int
  1005. internal_object_p(VALUE obj)
  1006. {
  1007. RVALUE *p = (RVALUE *)obj;
  1008. if (p->as.basic.flags) {
  1009. switch (BUILTIN_TYPE(p)) {
  1010. case T_NONE:
  1011. case T_ICLASS:
  1012. case T_NODE:
  1013. case T_ZOMBIE:
  1014. break;
  1015. case T_CLASS:
  1016. if (FL_TEST(p, FL_SINGLETON))
  1017. break;
  1018. default:
  1019. if (!p->as.basic.klass) break;
  1020. return 0;
  1021. }
  1022. }
  1023. return 1;
  1024. }
  1025. static int
  1026. os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
  1027. {
  1028. struct os_each_struct *oes = (struct os_each_struct *)data;
  1029. RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
  1030. for (; p != pend; p++) {
  1031. volatile VALUE v = (VALUE)p;
  1032. if (!internal_object_p(v)) {
  1033. if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
  1034. rb_yield(v);
  1035. oes->num++;
  1036. }
  1037. }
  1038. }
  1039. return 0;
  1040. }
  1041. static VALUE
  1042. os_obj_of(VALUE of)
  1043. {
  1044. struct os_each_struct oes;
  1045. oes.num = 0;
  1046. oes.of = of;
  1047. rb_objspace_each_objects(os_obj_of_i, &oes);
  1048. return SIZET2NUM(oes.num);
  1049. }
  1050. /*
  1051. * call-seq:
  1052. * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
  1053. * ObjectSpace.each_object([module]) -> an_enumerator
  1054. *
  1055. * Calls the block once for each living, nonimmediate object in this
  1056. * Ruby process. If <i>module</i> is specified, calls the block
  1057. * for only those classes or modules that match (or are a subclass of)
  1058. * <i>module</i>. Returns the number of objects found. Immediate
  1059. * objects (<code>Fixnum</code>s, <code>Symbol</code>s
  1060. * <code>true</code>, <code>false</code>, and <code>nil</code>) are
  1061. * never returned. In the example below, <code>each_object</code>
  1062. * returns both the numbers we defined and several constants defined in
  1063. * the <code>Math</code> module.
  1064. *
  1065. * If no block is given, an enumerator is returned instead.
  1066. *
  1067. * a = 102.7
  1068. * b = 95 # Won't be returned
  1069. * c = 12345678987654321
  1070. * count = ObjectSpace.each_object(Numeric) {|x| p x }
  1071. * puts "Total count: #{count}"
  1072. *
  1073. * <em>produces:</em>
  1074. *
  1075. * 12345678987654321
  1076. * 102.7
  1077. * 2.71828182845905
  1078. * 3.14159265358979
  1079. * 2.22044604925031e-16
  1080. * 1.7976931348623157e+308
  1081. * 2.2250738585072e-308
  1082. * Total count: 7
  1083. *
  1084. */
  1085. static VALUE
  1086. os_each_obj(int argc, VALUE *argv, VALUE os)
  1087. {
  1088. VALUE of;
  1089. rb_secure(4);
  1090. if (argc == 0) {
  1091. of = 0;
  1092. }
  1093. else {
  1094. rb_scan_args(argc, argv, "01", &of);
  1095. }
  1096. RETURN_ENUMERATOR(os, 1, &of);
  1097. return os_obj_of(of);
  1098. }
  1099. /*
  1100. * call-seq:
  1101. * ObjectSpace.undefine_finalizer(obj)
  1102. *
  1103. * Removes all finalizers for <i>obj</i>.
  1104. *
  1105. */
  1106. static VALUE
  1107. undefine_final(VALUE os, VALUE obj)
  1108. {
  1109. return rb_undefine_final(obj);
  1110. }
  1111. VALUE
  1112. rb_undefine_final(VALUE obj)
  1113. {
  1114. rb_objspace_t *objspace = &rb_objspace;
  1115. st_data_t data = obj;
  1116. rb_check_frozen(obj);
  1117. st_delete(finalizer_table, &data, 0);
  1118. FL_UNSET(obj, FL_FINALIZE);
  1119. return obj;
  1120. }
  1121. /*
  1122. * call-seq:
  1123. * ObjectSpace.define_finalizer(obj, aProc=proc())
  1124. *
  1125. * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
  1126. * was destroyed.
  1127. *
  1128. */
  1129. static VALUE
  1130. define_final(int argc, VALUE *argv, VALUE os)
  1131. {
  1132. VALUE obj, block;
  1133. rb_scan_args(argc, argv, "11", &obj, &block);
  1134. rb_check_frozen(obj);
  1135. if (argc == 1) {
  1136. block = rb_block_proc();
  1137. }
  1138. else if (!rb_respond_to(block, rb_intern("call"))) {
  1139. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1140. rb_obj_classname(block));
  1141. }
  1142. return define_final0(obj, block);
  1143. }
  1144. static VALUE
  1145. define_final0(VALUE obj, VALUE block)
  1146. {
  1147. rb_objspace_t *objspace = &rb_objspace;
  1148. VALUE table;
  1149. st_data_t data;
  1150. if (!FL_ABLE(obj)) {
  1151. rb_raise(rb_eArgError, "cannot define finalizer for %s",
  1152. rb_obj_classname(obj));
  1153. }
  1154. RBASIC(obj)->flags |= FL_FINALIZE;
  1155. block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
  1156. OBJ_FREEZE(block);
  1157. if (st_lookup(finalizer_table, obj, &data)) {
  1158. table = (VALUE)data;
  1159. rb_ary_push(table, block);
  1160. }
  1161. else {
  1162. table = rb_ary_new3(1, block);
  1163. RBASIC(table)->klass = 0;
  1164. st_add_direct(finalizer_table, obj, table);
  1165. }
  1166. return block;
  1167. }
  1168. VALUE
  1169. rb_define_final(VALUE obj, VALUE block)
  1170. {
  1171. rb_check_frozen(obj);
  1172. if (!rb_respond_to(block, rb_intern("call"))) {
  1173. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1174. rb_obj_classname(block));
  1175. }
  1176. return define_final0(obj, block);
  1177. }
  1178. void
  1179. rb_gc_copy_finalizer(VALUE dest, VALUE obj)
  1180. {
  1181. rb_objspace_t *objspace = &rb_objspace;
  1182. VALUE table;
  1183. st_data_t data;
  1184. if (!FL_TEST(obj, FL_FINALIZE)) return;
  1185. if (st_lookup(finalizer_table, obj, &data)) {
  1186. table = (VALUE)data;
  1187. st_insert(finalizer_table, dest, table);
  1188. }
  1189. FL_SET(dest, FL_FINALIZE);
  1190. }
  1191. static VALUE
  1192. run_single_final(VALUE arg)
  1193. {
  1194. VALUE *args = (VALUE *)arg;
  1195. rb_eval_cmd(args[0], args[1], (int)args[2]);
  1196. return Qnil;
  1197. }
  1198. static void
  1199. run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
  1200. {
  1201. long i;
  1202. int status;
  1203. VALUE args[3];
  1204. VALUE objid = nonspecial_obj_id(obj);
  1205. if (RARRAY_LEN(table) > 0) {
  1206. args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
  1207. }
  1208. else {
  1209. args[1] = 0;
  1210. }
  1211. args[2] = (VALUE)rb_safe_level();
  1212. for (i=0; i<RARRAY_LEN(table); i++) {
  1213. VALUE final = RARRAY_PTR(table)[i];
  1214. args[0] = RARRAY_PTR(final)[1];
  1215. args[2] = FIX2INT(RARRAY_PTR(final)[0]);
  1216. status = 0;
  1217. rb_protect(run_single_final, (VALUE)args, &status);
  1218. if (status)
  1219. rb_set_errinfo(Qnil);
  1220. }
  1221. }
  1222. static void
  1223. run_final(rb_objspace_t *objspace, VALUE obj)
  1224. {
  1225. RUBY_DATA_FUNC free_func = 0;
  1226. st_data_t key, table;
  1227. objspace->heap.final_num--;
  1228. RBASIC(obj)->klass = 0;
  1229. if (RTYPEDDATA_P(obj)) {
  1230. free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
  1231. }
  1232. else {
  1233. free_func = RDATA(obj)->dfree;
  1234. }
  1235. if (free_func) {
  1236. (*free_func)(DATA_PTR(obj));
  1237. }
  1238. key = (st_data_t)obj;
  1239. if (st_delete(finalizer_table, &key, &table)) {
  1240. run_finalizer(objspace, obj, (VALUE)table);
  1241. }
  1242. }
  1243. static void
  1244. finalize_list(rb_objspace_t *objspace, RVALUE *p)
  1245. {
  1246. while (p) {
  1247. RVALUE *tmp = p->as.free.next;
  1248. run_final(objspace, (VALUE)p);
  1249. if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
  1250. add_slot_local_freelist(objspace, p);
  1251. if (!is_lazy_sweeping(objspace)) {
  1252. gc_prof_dec_live_num(objspace);
  1253. }
  1254. }
  1255. else {
  1256. struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
  1257. slot->limit--;
  1258. }
  1259. p = tmp;
  1260. }
  1261. }
  1262. static void
  1263. finalize_deferred(rb_objspace_t *objspace)
  1264. {
  1265. RVALUE *p = deferred_final_list;
  1266. deferred_final_list = 0;
  1267. if (p) {
  1268. finalize_list(objspace, p);
  1269. }
  1270. }
  1271. void
  1272. rb_gc_finalize_deferred(void)
  1273. {
  1274. rb_objspace_t *objspace = &rb_objspace;
  1275. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1276. finalize_deferred(objspace);
  1277. ATOMIC_SET(finalizing, 0);
  1278. }
  1279. static int
  1280. chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
  1281. {
  1282. RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
  1283. if ((p->as.basic.flags & FL_FINALIZE) == FL_FINALIZE &&
  1284. !MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
  1285. if (BUILTIN_TYPE(p) != T_ZOMBIE) {
  1286. p->as.free.flags = T_ZOMBIE;
  1287. RDATA(p)->dfree = 0;
  1288. }
  1289. p->as.free.next = *final_list;
  1290. *final_list = p;
  1291. }
  1292. return ST_CONTINUE;
  1293. }
  1294. struct force_finalize_list {
  1295. VALUE obj;
  1296. VALUE table;
  1297. struct force_finalize_list *next;
  1298. };
  1299. static int
  1300. force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
  1301. {
  1302. struct force_finalize_list **prev = (struct force_finalize_list **)arg;
  1303. struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
  1304. curr->obj = key;
  1305. curr->table = val;
  1306. curr->next = *prev;
  1307. *prev = curr;
  1308. return ST_CONTINUE;
  1309. }
  1310. void
  1311. rb_gc_call_finalizer_at_exit(void)
  1312. {
  1313. rb_objspace_call_finalizer(&rb_objspace);
  1314. }
  1315. static void
  1316. rb_objspace_call_finalizer(rb_objspace_t *objspace)
  1317. {
  1318. RVALUE *p, *pend;
  1319. RVALUE *final_list = 0;
  1320. size_t i;
  1321. rest_sweep(objspace);
  1322. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1323. /* run finalizers */
  1324. do {
  1325. finalize_deferred(objspace);
  1326. /* mark reachable objects from finalizers */
  1327. /* They might be not referred from any place here */
  1328. mark_tbl(objspace, finalizer_table);
  1329. gc_mark_stacked_objects(objspace);
  1330. st_foreach(finalizer_table, chain_finalized_object,
  1331. (st_data_t)&deferred_final_list);
  1332. } while (deferred_final_list);
  1333. /* force to run finalizer */
  1334. while (finalizer_table->num_entries) {
  1335. struct force_finalize_list *list = 0;
  1336. st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
  1337. while (list) {
  1338. struct force_finalize_list *curr = list;
  1339. st_data_t obj = (st_data_t)curr->obj;
  1340. run_finalizer(objspace, curr->obj, curr->table);
  1341. st_delete(finalizer_table, &obj, 0);
  1342. list = curr->next;
  1343. xfree(curr);
  1344. }
  1345. }
  1346. /* finalizers are part of garbage collection */
  1347. during_gc++;
  1348. /* run data object's finalizers */
  1349. for (i = 0; i < heaps_used; i++) {
  1350. p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
  1351. while (p < pend) {
  1352. if (BUILTIN_TYPE(p) == T_DATA &&
  1353. DATA_PTR(p) && RANY(p)->as.data.dfree &&
  1354. !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
  1355. !rb_obj_is_fiber((VALUE)p)) {
  1356. p->as.free.flags = 0;
  1357. if (RTYPEDDATA_P(p)) {
  1358. RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
  1359. }
  1360. if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  1361. xfree(DATA_PTR(p));
  1362. }
  1363. else if (RANY(p)->as.data.dfree) {
  1364. make_deferred(RANY(p));
  1365. RANY(p)->as.free.next = final_list;
  1366. final_list = p;
  1367. }
  1368. }
  1369. else if (BUILTIN_TYPE(p) == T_FILE) {
  1370. if (RANY(p)->as.file.fptr) {
  1371. make_io_deferred(RANY(p));
  1372. RANY(p)->as.free.next = final_list;
  1373. final_list = p;
  1374. }
  1375. }
  1376. p++;
  1377. }
  1378. }
  1379. during_gc = 0;
  1380. if (final_list) {
  1381. finalize_list(objspace, final_list);
  1382. }
  1383. st_free_table(finalizer_table);
  1384. finalizer_table = 0;
  1385. ATOMIC_SET(finalizing, 0);
  1386. }
  1387. static inline int
  1388. is_id_value(rb_objspace_t *objspace, VALUE ptr)
  1389. {
  1390. if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
  1391. if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
  1392. if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
  1393. return TRUE;
  1394. }
  1395. static inline int
  1396. is_dead_object(rb_objspace_t *objspace, VALUE ptr)
  1397. {
  1398. struct heaps_slot *slot = objspace->heap.sweep_slots;
  1399. if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
  1400. return FALSE;
  1401. while (slot) {
  1402. if ((VALUE)slot->slot <= ptr && ptr < (VALUE)(slot->slot + slot->limit))
  1403. return TRUE;
  1404. slot = slot->next;
  1405. }
  1406. return FALSE;
  1407. }
  1408. static inline int
  1409. is_live_object(rb_objspace_t *objspace, VALUE ptr)
  1410. {
  1411. if (BUILTIN_TYPE(ptr) == 0) return FALSE;
  1412. if (RBASIC(ptr)->klass == 0) return FALSE;
  1413. if (is_dead_object(objspace, ptr)) return FALSE;
  1414. return TRUE;
  1415. }
  1416. /*
  1417. * call-seq:
  1418. * ObjectSpace._id2ref(object_id) -> an_object
  1419. *
  1420. * Converts an object id to a reference to the object. May not be
  1421. * called on an object id passed as a parameter to a finalizer.
  1422. *
  1423. * s = "I am a string" #=> "I am a string"
  1424. * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
  1425. * r == s #=> true
  1426. *
  1427. */
  1428. static VALUE
  1429. id2ref(VALUE obj, VALUE objid)
  1430. {
  1431. #if SIZEOF_LONG == SIZEOF_VOIDP
  1432. #define NUM2PTR(x) NUM2ULONG(x)
  1433. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  1434. #define NUM2PTR(x) NUM2ULL(x)
  1435. #endif
  1436. rb_objspace_t *objspace = &rb_objspace;
  1437. VALUE ptr;
  1438. void *p0;
  1439. rb_secure(4);
  1440. ptr = NUM2PTR(objid);
  1441. p0 = (void *)ptr;
  1442. if (ptr == Qtrue) return Qtrue;
  1443. if (ptr == Qfalse) return Qfalse;
  1444. if (ptr == Qnil) return Qnil;
  1445. if (FIXNUM_P(ptr)) return (VALUE)ptr;
  1446. if (FLONUM_P(ptr)) return (VALUE)ptr;
  1447. ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
  1448. if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
  1449. ID symid = ptr / sizeof(RVALUE);
  1450. if (rb_id2name(symid) == 0)
  1451. rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
  1452. return ID2SYM(symid);
  1453. }
  1454. if (!is_id_value(objspace, ptr)) {
  1455. rb_raise(rb_eRangeError, "%p is not id value", p0);
  1456. }
  1457. if (!is_live_object(objspace, ptr)) {
  1458. rb_raise(rb_eRangeError, "%p is recycled object", p0);
  1459. }
  1460. return (VALUE)ptr;
  1461. }
  1462. /*
  1463. * Document-method: __id__
  1464. * Document-method: object_id
  1465. *
  1466. * call-seq:
  1467. * obj.__id__ -> fixnum
  1468. * obj.object_id -> fixnum
  1469. *
  1470. * Returns an integer identifier for <i>obj</i>. The same number will
  1471. * be returned on all calls to <code>id</code> for a given object, and
  1472. * no two active objects will share an id.
  1473. * <code>Object#object_id</code> is a different concept from the
  1474. * <code>:name</code> notation, which returns the symbol id of
  1475. * <code>name</code>. Replaces the deprecated <code>Object#id</code>.
  1476. */
  1477. /*
  1478. * call-seq:
  1479. * obj.hash -> fixnum
  1480. *
  1481. * Generates a <code>Fixnum</code> hash value for this object. This
  1482. * function must have the property that <code>a.eql?(b)</code> implies
  1483. * <code>a.hash == b.hash</code>. The hash value is used by class
  1484. * <code>Hash</code>. Any hash value that exceeds the capacity of a
  1485. * <code>Fixnum</code> will be truncated before being used.
  1486. */
  1487. VALUE
  1488. rb_obj_id(VALUE obj)
  1489. {
  1490. /*
  1491. * 32-bit VALUE space
  1492. * MSB ------------------------ LSB
  1493. * false 00000000000000000000000000000000
  1494. * true 00000000000000000000000000000010
  1495. * nil 00000000000000000000000000000100
  1496. * undef 00000000000000000000000000000110
  1497. * symbol ssssssssssssssssssssssss00001110
  1498. * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
  1499. * fixnum fffffffffffffffffffffffffffffff1
  1500. *
  1501. * object_id space
  1502. * LSB
  1503. * false 00000000000000000000000000000000
  1504. * true 00000000000000000000000000000010
  1505. * nil 00000000000000000000000000000100
  1506. * undef 00000000000000000000000000000110
  1507. * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
  1508. * object oooooooooooooooooooooooooooooo0 o...o % A = 0
  1509. * fixnum fffffffffffffffffffffffffffffff1 bignum if required
  1510. *
  1511. * where A = sizeof(RVALUE)/4
  1512. *
  1513. * sizeof(RVALUE) is
  1514. * 20 if 32-bit, double is 4-byte aligned
  1515. * 24 if 32-bit, double is 8-byte aligned
  1516. * 40 if 64-bit
  1517. */
  1518. if (SYMBOL_P(obj)) {
  1519. return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
  1520. }
  1521. else if (FLONUM_P(obj)) {
  1522. #if SIZEOF_LONG == SIZEOF_VOIDP
  1523. return LONG2NUM((SIGNED_VALUE)obj);
  1524. #else
  1525. return LL2NUM((SIGNED_VALUE)obj);
  1526. #endif
  1527. }
  1528. else if (SPECIAL_CONST_P(obj)) {
  1529. return LONG2NUM((SIGNED_VALUE)obj);
  1530. }
  1531. return nonspecial_obj_id(obj);
  1532. }
  1533. static int
  1534. set_zero(st_data_t key, st_data_t val, st_data_t arg)
  1535. {
  1536. VALUE k = (VALUE)key;
  1537. VALUE hash = (VALUE)arg;
  1538. rb_hash_aset(hash, k, INT2FIX(0));
  1539. return ST_CONTINUE;
  1540. }
  1541. /*
  1542. * call-seq:
  1543. * ObjectSpace.count_objects([result_hash]) -> hash
  1544. *
  1545. * Counts objects for each type.
  1546. *
  1547. * It returns a hash as:
  1548. * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
  1549. *
  1550. * If the optional argument, result_hash, is given,
  1551. * it is overwritten and returned.
  1552. * This is intended to avoid probe effect.
  1553. *
  1554. * The contents of the returned hash is implementation defined.
  1555. * It may be changed in future.
  1556. *
  1557. * This method is not expected to work except C Ruby.
  1558. *
  1559. */
  1560. static VALUE
  1561. count_objects(int argc, VALUE *argv, VALUE os)
  1562. {
  1563. rb_objspace_t *objspace = &rb_objspace;
  1564. size_t counts[T_MASK+1];
  1565. size_t freed = 0;
  1566. size_t total = 0;
  1567. size_t i;
  1568. VALUE hash;
  1569. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  1570. if (!RB_TYPE_P(hash, T_HASH))
  1571. rb_raise(rb_eTypeError, "non-hash given");
  1572. }
  1573. for (i = 0; i <= T_MASK; i++) {
  1574. counts[i] = 0;
  1575. }
  1576. for (i = 0; i < heaps_used; i++) {
  1577. RVALUE *p, *pend;
  1578. p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
  1579. for (;p < pend; p++) {
  1580. if (p->as.basic.flags) {
  1581. counts[BUILTIN_TYPE(p)]++;
  1582. }
  1583. else {
  1584. freed++;
  1585. }
  1586. }
  1587. total += objspace->heap.sorted[i].slot->limit;
  1588. }
  1589. if (hash == Qnil) {
  1590. hash = rb_hash_new();
  1591. }
  1592. else if (!RHASH_EMPTY_P(hash)) {
  1593. st_foreach(RHASH_TBL(hash), set_zero, hash);
  1594. }
  1595. rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
  1596. rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
  1597. for (i = 0; i <= T_MASK; i++) {
  1598. VALUE type;
  1599. switch (i) {
  1600. #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
  1601. COUNT_TYPE(T_NONE);
  1602. COUNT_TYPE(T_OBJECT);
  1603. COUNT_TYPE(T_CLASS);
  1604. COUNT_TYPE(T_MODULE);
  1605. COUNT_TYPE(T_FLOAT);
  1606. COUNT_TYPE(T_STRING);
  1607. COUNT_TYPE(T_REGEXP);
  1608. COUNT_TYPE(T_ARRAY);
  1609. COUNT_TYPE(T_HASH);
  1610. COUNT_TYPE(T_STRUCT);
  1611. COUNT_TYPE(T_BIGNUM);
  1612. COUNT_TYPE(T_FILE);
  1613. COUNT_TYPE(T_DATA);
  1614. COUNT_TYPE(T_MATCH);
  1615. COUNT_TYPE(T_COMPLEX);
  1616. COUNT_TYPE(T_RATIONAL);
  1617. COUNT_TYPE(T_NIL);
  1618. COUNT_TYPE(T_TRUE);
  1619. COUNT_TYPE(T_FALSE);
  1620. COUNT_TYPE(T_SYMBOL);
  1621. COUNT_TYPE(T_FIXNUM);
  1622. COUNT_TYPE(T_UNDEF);
  1623. COUNT_TYPE(T_NODE);
  1624. COUNT_TYPE(T_ICLASS);
  1625. COUNT_TYPE(T_ZOMBIE);
  1626. #undef COUNT_TYPE
  1627. default: type = INT2NUM(i); break;
  1628. }
  1629. if (counts[i])
  1630. rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
  1631. }
  1632. return hash;
  1633. }
  1634. /*
  1635. ------------------------ Garbage Collection ------------------------
  1636. */
  1637. /* Sweeping */
  1638. static VALUE
  1639. lazy_sweep_enable(void)
  1640. {
  1641. rb_objspace_t *objspace = &rb_objspace;
  1642. objspace->flags.dont_lazy_sweep = FALSE;
  1643. return Qnil;
  1644. }
  1645. static void
  1646. gc_clear_slot_bits(struct heaps_slot *slot)
  1647. {
  1648. memset(GET_HEAP_BITMAP(slot->slot), 0,
  1649. HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  1650. }
  1651. static void
  1652. slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
  1653. {
  1654. size_t free_num = 0, final_num = 0;
  1655. RVALUE *p, *pend;
  1656. RVALUE *final = deferred_final_list;
  1657. int deferred;
  1658. uintptr_t *bits;
  1659. p = sweep_slot->slot; pend = p + sweep_slot->limit;
  1660. bits = GET_HEAP_BITMAP(p);
  1661. while (p < pend) {
  1662. if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
  1663. if (p->as.basic.flags) {
  1664. if ((deferred = obj_free(objspace, (VALUE)p)) ||
  1665. (FL_TEST(p, FL_FINALIZE))) {
  1666. if (!deferred) {
  1667. p->as.free.flags = T_ZOMBIE;
  1668. RDATA(p)->dfree = 0;
  1669. }
  1670. p->as.free.next = deferred_final_list;
  1671. deferred_final_list = p;
  1672. assert(BUILTIN_TYPE(p) == T_ZOMBIE);
  1673. final_num++;
  1674. }
  1675. else {
  1676. VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  1677. p->as.free.flags = 0;
  1678. p->as.free.next = sweep_slot->freelist;
  1679. sweep_slot->freelist = p;
  1680. free_num++;
  1681. }
  1682. }
  1683. else {
  1684. free_num++;
  1685. }
  1686. }
  1687. p++;
  1688. }
  1689. gc_clear_slot_bits(sweep_slot);
  1690. if (final_num + free_num == sweep_slot->limit &&
  1691. objspace->heap.free_num > objspace->heap.do_heap_free) {
  1692. RVALUE *pp;
  1693. for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
  1694. RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
  1695. pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  1696. }
  1697. sweep_slot->limit = final_num;
  1698. unlink_heap_slot(objspace, sweep_slot);
  1699. }
  1700. else {
  1701. if (free_num > 0) {
  1702. link_free_heap_slot(objspace, sweep_slot);
  1703. }
  1704. else {
  1705. sweep_slot->free_next = NULL;
  1706. }
  1707. objspace->heap.free_num += free_num;
  1708. }
  1709. objspace->heap.final_num += final_num;
  1710. if (deferred_final_list && !finalizing) {
  1711. rb_thread_t *th = GET_THREAD();
  1712. if (th) {
  1713. RUBY_VM_SET_FINALIZER_INTERRUPT(th);
  1714. }
  1715. }
  1716. }
  1717. static int
  1718. ready_to_gc(rb_objspace_t *objspace)
  1719. {
  1720. if (dont_gc || during_gc) {
  1721. if (!has_free_object) {
  1722. if (!heaps_increment(objspace)) {
  1723. set_heaps_increment(objspace);
  1724. heaps_increment(objspace);
  1725. }
  1726. }
  1727. return FALSE;
  1728. }
  1729. return TRUE;
  1730. }
  1731. static void
  1732. before_gc_sweep(rb_objspace_t *objspace)
  1733. {
  1734. objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
  1735. objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
  1736. if (objspace->heap.free_min < initial_free_min) {
  1737. objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  1738. objspace->heap.free_min = initial_free_min;
  1739. }
  1740. objspace->heap.sweep_slots = heaps;
  1741. objspace->heap.free_num = 0;
  1742. objspace->heap.free_slots = NULL;
  1743. /* sweep unlinked method entries */
  1744. if (GET_VM()->unlinked_method_entry_list) {
  1745. rb_sweep_method_entry(GET_VM());
  1746. }
  1747. }
  1748. static void
  1749. after_gc_sweep(rb_objspace_t *objspace)
  1750. {
  1751. size_t inc;
  1752. gc_prof_set_malloc_info(objspace);
  1753. if (objspace->heap.free_num < objspace->heap.free_min) {
  1754. set_heaps_increment(objspace);
  1755. heaps_increment(objspace);
  1756. }
  1757. inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
  1758. if (inc > malloc_limit) {
  1759. malloc_limit += (size_t)((inc - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));
  1760. if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
  1761. }
  1762. free_unused_heaps(objspace);
  1763. }
  1764. static int
  1765. lazy_sweep(rb_objspace_t *objspace)
  1766. {
  1767. struct heaps_slot *next;
  1768. heaps_increment(objspace);
  1769. while (objspace->heap.sweep_slots) {
  1770. next = objspace->heap.sweep_slots->next;
  1771. slot_sweep(objspace, objspace->heap.sweep_slots);
  1772. objspace->heap.sweep_slots = next;
  1773. if (has_free_object) {
  1774. during_gc = 0;
  1775. return TRUE;
  1776. }
  1777. }
  1778. return FALSE;
  1779. }
  1780. static void
  1781. rest_sweep(rb_objspace_t *objspace)
  1782. {
  1783. if (objspace->heap.sweep_slots) {
  1784. while (objspace->heap.sweep_slots) {
  1785. lazy_sweep(objspace);
  1786. }
  1787. after_gc_sweep(objspace);
  1788. }
  1789. }
  1790. static void gc_marks(rb_objspace_t *objspace);
  1791. static int
  1792. gc_lazy_sweep(rb_objspace_t *objspace)
  1793. {
  1794. int res;
  1795. if (objspace->flags.dont_lazy_sweep)
  1796. return garbage_collect(objspace);
  1797. if (!ready_to_gc(objspace)) return TRUE;
  1798. during_gc++;
  1799. gc_prof_timer_start(objspace);
  1800. gc_prof_sweep_timer_start(objspace);
  1801. if (objspace->heap.sweep_slots) {
  1802. res = lazy_sweep(objspace);
  1803. if (res) {
  1804. gc_prof_sweep_timer_stop(objspace);
  1805. gc_prof_set_malloc_info(objspace);
  1806. gc_prof_timer_stop(objspace, Qfalse);
  1807. return res;
  1808. }
  1809. after_gc_sweep(objspace);
  1810. }
  1811. else {
  1812. if (heaps_increment(objspace)) {
  1813. during_gc = 0;
  1814. return TRUE;
  1815. }
  1816. }
  1817. gc_marks(objspace);
  1818. before_gc_sweep(objspace);
  1819. if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) {
  1820. set_heaps_increment(objspace);
  1821. }
  1822. gc_prof_sweep_timer_start(objspace);
  1823. if (!(res = lazy_sweep(objspace))) {
  1824. after_gc_sweep(objspace);
  1825. if (has_free_object) {
  1826. res = TRUE;
  1827. during_gc = 0;
  1828. }
  1829. }
  1830. gc_prof_sweep_timer_stop(objspace);
  1831. gc_prof_timer_stop(objspace, Qtrue);
  1832. return res;
  1833. }
  1834. static void
  1835. gc_sweep(rb_objspace_t *objspace)
  1836. {
  1837. struct heaps_slot *next;
  1838. before_gc_sweep(objspace);
  1839. while (objspace->heap.sweep_slots) {
  1840. next = objspace->heap.sweep_slots->next;
  1841. slot_sweep(objspace, objspace->heap.sweep_slots);
  1842. objspace->heap.sweep_slots = next;
  1843. }
  1844. after_gc_sweep(objspace);
  1845. during_gc = 0;
  1846. }
  1847. /* Marking stack */
  1848. static void push_mark_stack(mark_stack_t *, VALUE);
  1849. static int pop_mark_stack(mark_stack_t *, VALUE *);
  1850. static void shrink_stack_chunk_cache(mark_stack_t *stack);
  1851. static stack_chunk_t *
  1852. stack_chunk_alloc(void)
  1853. {
  1854. stack_chunk_t *res;
  1855. res = malloc(sizeof(stack_chunk_t));
  1856. if (!res)
  1857. rb_memerror();
  1858. return res;
  1859. }
  1860. static inline int
  1861. is_mark_stask_empty(mark_stack_t *stack)
  1862. {
  1863. return stack->chunk == NULL;
  1864. }
  1865. static void
  1866. add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
  1867. {
  1868. chunk->next = stack->cache;
  1869. stack->cache = chunk;
  1870. stack->cache_size++;
  1871. }
  1872. static void
  1873. shrink_stack_chunk_cache(mark_stack_t *stack)
  1874. {
  1875. stack_chunk_t *chunk;
  1876. if (stack->unused_cache_size > (stack->cache_size/2)) {
  1877. chunk = stack->cache;
  1878. stack->cache = stack->cache->next;
  1879. stack->cache_size--;
  1880. free(chunk);
  1881. }
  1882. stack->unused_cache_size = stack->cache_size;
  1883. }
  1884. static void
  1885. push_mark_stack_chunk(mark_stack_t *stack)
  1886. {
  1887. stack_chunk_t *next;
  1888. assert(stack->index == stack->limit);
  1889. if (stack->cache_size > 0) {
  1890. next = stack->cache;
  1891. stack->cache = stack->cache->next;
  1892. stack->cache_size--;
  1893. if (stack->unused_cache_size > stack->cache_size)
  1894. stack->unused_cache_size = stack->cache_size;
  1895. }
  1896. else {
  1897. next = stack_chunk_alloc();
  1898. }
  1899. next->next = stack->chunk;
  1900. stack->chunk = next;
  1901. stack->index = 0;
  1902. }
  1903. static void
  1904. pop_mark_stack_chunk(mark_stack_t *stack)
  1905. {
  1906. stack_chunk_t *prev;
  1907. prev = stack->chunk->next;
  1908. assert(stack->index == 0);
  1909. add_stack_chunk_cache(stack, stack->chunk);
  1910. stack->chunk = prev;
  1911. stack->index = stack->limit;
  1912. }
  1913. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  1914. static void
  1915. free_stack_chunks(mark_stack_t *stack)
  1916. {
  1917. stack_chunk_t *chunk = stack->chunk;
  1918. stack_chunk_t *next = NULL;
  1919. while (chunk != NULL) {
  1920. next = chunk->next;
  1921. free(chunk);
  1922. chunk = next;
  1923. }
  1924. }
  1925. #endif
  1926. static void
  1927. push_mark_stack(mark_stack_t *stack, VALUE data)
  1928. {
  1929. if (stack->index == stack->limit) {
  1930. push_mark_stack_chunk(stack);
  1931. }
  1932. stack->chunk->data[stack->index++] = data;
  1933. }
  1934. static int
  1935. pop_mark_stack(mark_stack_t *stack, VALUE *data)
  1936. {
  1937. if (is_mark_stask_empty(stack)) {
  1938. return FALSE;
  1939. }
  1940. if (stack->index == 1) {
  1941. *data = stack->chunk->data[--stack->index];
  1942. pop_mark_stack_chunk(stack);
  1943. return TRUE;
  1944. }
  1945. *data = stack->chunk->data[--stack->index];
  1946. return TRUE;
  1947. }
  1948. static void
  1949. init_mark_stack(mark_stack_t *stack)
  1950. {
  1951. int i;
  1952. push_mark_stack_chunk(stack);
  1953. stack->limit = STACK_CHUNK_SIZE;
  1954. for(i=0; i < 4; i++) {
  1955. add_stack_chunk_cache(stack, stack_chunk_alloc());
  1956. }
  1957. stack->unused_cache_size = stack->cache_size;
  1958. }
  1959. /* Marking */
  1960. #define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
  1961. #ifdef __ia64
  1962. #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
  1963. #else
  1964. #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
  1965. #endif
  1966. #define STACK_START (th->machine_stack_start)
  1967. #define STACK_END (th->machine_stack_end)
  1968. #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
  1969. #if STACK_GROW_DIRECTION < 0
  1970. # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
  1971. #elif STACK_GROW_DIRECTION > 0
  1972. # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
  1973. #else
  1974. # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
  1975. : (size_t)(STACK_END - STACK_START + 1))
  1976. #endif
  1977. #if !STACK_GROW_DIRECTION
  1978. int ruby_stack_grow_direction;
  1979. int
  1980. ruby_get_stack_grow_direction(volatile VALUE *addr)
  1981. {
  1982. VALUE *end;
  1983. SET_MACHINE_STACK_END(&end);
  1984. if (end > addr) return ruby_stack_grow_direction = 1;
  1985. return ruby_stack_grow_direction = -1;
  1986. }
  1987. #endif
  1988. size_t
  1989. ruby_stack_length(VALUE **p)
  1990. {
  1991. rb_thread_t *th = GET_THREAD();
  1992. SET_STACK_END;
  1993. if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
  1994. return STACK_LENGTH;
  1995. }
  1996. #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
  1997. static int
  1998. stack_check(int water_mark)
  1999. {
  2000. int ret;
  2001. rb_thread_t *th = GET_THREAD();
  2002. SET_STACK_END;
  2003. ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
  2004. #ifdef __ia64
  2005. if (!ret) {
  2006. ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
  2007. th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
  2008. }
  2009. #endif
  2010. return ret;
  2011. }
  2012. #endif
  2013. #define STACKFRAME_FOR_CALL_CFUNC 512
  2014. int
  2015. ruby_stack_check(void)
  2016. {
  2017. #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
  2018. return 0;
  2019. #else
  2020. return stack_check(STACKFRAME_FOR_CALL_CFUNC);
  2021. #endif
  2022. }
  2023. static void
  2024. mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
  2025. {
  2026. VALUE v;
  2027. while (n--) {
  2028. v = *x;
  2029. VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
  2030. if (is_pointer_to_heap(objspace, (void *)v)) {
  2031. gc_mark(objspace, v);
  2032. }
  2033. x++;
  2034. }
  2035. }
  2036. static void
  2037. gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
  2038. {
  2039. long n;
  2040. if (end <= start) return;
  2041. n = end - start;
  2042. mark_locations_array(objspace, start, n);
  2043. }
  2044. void
  2045. rb_gc_mark_locations(VALUE *start, VALUE *end)
  2046. {
  2047. gc_mark_locations(&rb_objspace, start, end);
  2048. }
  2049. #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
  2050. struct mark_tbl_arg {
  2051. rb_objspace_t *objspace;
  2052. };
  2053. static int
  2054. mark_entry(st_data_t key, st_data_t value, st_data_t data)
  2055. {
  2056. struct mark_tbl_arg *arg = (void*)data;
  2057. gc_mark(arg->objspace, (VALUE)value);
  2058. return ST_CONTINUE;
  2059. }
  2060. static void
  2061. mark_tbl(rb_objspace_t *objspace, st_table *tbl)
  2062. {
  2063. struct mark_tbl_arg arg;
  2064. if (!tbl || tbl->num_entries == 0) return;
  2065. arg.objspace = objspace;
  2066. st_foreach(tbl, mark_entry, (st_data_t)&arg);
  2067. }
  2068. static int
  2069. mark_key(st_data_t key, st_data_t value, st_data_t data)
  2070. {
  2071. struct mark_tbl_arg *arg = (void*)data;
  2072. gc_mark(arg->objspace, (VALUE)key);
  2073. return ST_CONTINUE;
  2074. }
  2075. static void
  2076. mark_set(rb_objspace_t *objspace, st_table *tbl)
  2077. {
  2078. struct mark_tbl_arg arg;
  2079. if (!tbl) return;
  2080. arg.objspace = objspace;
  2081. st_foreach(tbl, mark_key, (st_data_t)&arg);
  2082. }
  2083. void
  2084. rb_mark_set(st_table *tbl)
  2085. {
  2086. mark_set(&rb_objspace, tbl);
  2087. }
  2088. static int
  2089. mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
  2090. {
  2091. struct mark_tbl_arg *arg = (void*)data;
  2092. gc_mark(arg->objspace, (VALUE)key);
  2093. gc_mark(arg->objspace, (VALUE)value);
  2094. return ST_CONTINUE;
  2095. }
  2096. static void
  2097. mark_hash(rb_objspace_t *objspace, st_table *tbl)
  2098. {
  2099. struct mark_tbl_arg arg;
  2100. if (!tbl) return;
  2101. arg.objspace = objspace;
  2102. st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
  2103. }
  2104. void
  2105. rb_mark_hash(st_table *tbl)
  2106. {
  2107. mark_hash(&rb_objspace, tbl);
  2108. }
  2109. static void
  2110. mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
  2111. {
  2112. const rb_method_definition_t *def = me->def;
  2113. gc_mark(objspace, me->klass);
  2114. if (!def) return;
  2115. switch (def->type) {
  2116. case VM_METHOD_TYPE_ISEQ:
  2117. gc_mark(objspace, def->body.iseq->self);
  2118. break;
  2119. case VM_METHOD_TYPE_BMETHOD:
  2120. gc_mark(objspace, def->body.proc);
  2121. break;
  2122. case VM_METHOD_TYPE_ATTRSET:
  2123. case VM_METHOD_TYPE_IVAR:
  2124. gc_mark(objspace, def->body.attr.location);
  2125. break;
  2126. default:
  2127. break; /* ignore */
  2128. }
  2129. }
  2130. void
  2131. rb_mark_method_entry(const rb_method_entry_t *me)
  2132. {
  2133. mark_method_entry(&rb_objspace, me);
  2134. }
  2135. static int
  2136. mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
  2137. {
  2138. struct mark_tbl_arg *arg = (void*)data;
  2139. mark_method_entry(arg->objspace, me);
  2140. return ST_CONTINUE;
  2141. }
  2142. static void
  2143. mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
  2144. {
  2145. struct mark_tbl_arg arg;
  2146. if (!tbl) return;
  2147. arg.objspace = objspace;
  2148. st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
  2149. }
  2150. static int
  2151. mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
  2152. {
  2153. struct mark_tbl_arg *arg = (void*)data;
  2154. gc_mark(arg->objspace, ce->value);
  2155. gc_mark(arg->objspace, ce->file);
  2156. return ST_CONTINUE;
  2157. }
  2158. static void
  2159. mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
  2160. {
  2161. struct mark_tbl_arg arg;
  2162. if (!tbl) return;
  2163. arg.objspace = objspace;
  2164. st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
  2165. }
  2166. #if STACK_GROW_DIRECTION < 0
  2167. #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
  2168. #elif STACK_GROW_DIRECTION > 0
  2169. #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
  2170. #else
  2171. #define GET_STACK_BOUNDS(start, end, appendix) \
  2172. ((STACK_END < STACK_START) ? \
  2173. ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
  2174. #endif
  2175. #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
  2176. static void
  2177. mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
  2178. {
  2179. union {
  2180. rb_jmp_buf j;
  2181. VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
  2182. } save_regs_gc_mark;
  2183. VALUE *stack_start, *stack_end;
  2184. FLUSH_REGISTER_WINDOWS;
  2185. /* This assumes that all registers are saved into the jmp_buf (and stack) */
  2186. rb_setjmp(save_regs_gc_mark.j);
  2187. SET_STACK_END;
  2188. GET_STACK_BOUNDS(stack_start, stack_end, 1);
  2189. mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
  2190. rb_gc_mark_locations(stack_start, stack_end);
  2191. #ifdef __ia64
  2192. rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
  2193. #endif
  2194. #if defined(__mc68000__)
  2195. mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
  2196. (STACK_START - STACK_END));
  2197. #endif
  2198. }
  2199. void
  2200. rb_gc_mark_machine_stack(rb_thread_t *th)
  2201. {
  2202. rb_objspace_t *objspace = &rb_objspace;
  2203. VALUE *stack_start, *stack_end;
  2204. GET_STACK_BOUNDS(stack_start, stack_end, 0);
  2205. rb_gc_mark_locations(stack_start, stack_end);
  2206. #ifdef __ia64
  2207. rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
  2208. #endif
  2209. }
  2210. void
  2211. rb_mark_tbl(st_table *tbl)
  2212. {
  2213. mark_tbl(&rb_objspace, tbl);
  2214. }
  2215. void
  2216. rb_gc_mark_maybe(VALUE obj)
  2217. {
  2218. if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
  2219. gc_mark(&rb_objspace, obj);
  2220. }
  2221. }
  2222. static int
  2223. gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
  2224. {
  2225. register uintptr_t *bits = GET_HEAP_BITMAP(ptr);
  2226. if (MARKED_IN_BITMAP(bits, ptr)) return 0;
  2227. MARK_IN_BITMAP(bits, ptr);
  2228. objspace->heap.live_num++;
  2229. return 1;
  2230. }
  2231. static int
  2232. markable_object_p(rb_objspace_t *objspace, VALUE ptr)
  2233. {
  2234. register RVALUE *obj = RANY(ptr);
  2235. if (rb_special_const_p(ptr)) return 0; /* special const not marked */
  2236. if (obj->as.basic.flags == 0) return 0 ; /* free cell */
  2237. return 1;
  2238. }
  2239. static void
  2240. gc_mark(rb_objspace_t *objspace, VALUE ptr)
  2241. {
  2242. if (!markable_object_p(objspace, ptr)) {
  2243. return;
  2244. }
  2245. if (LIKELY(objspace->mark_func_data == 0)) {
  2246. if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
  2247. push_mark_stack(&objspace->mark_stack, ptr);
  2248. }
  2249. else {
  2250. objspace->mark_func_data->mark_func(objspace, ptr);
  2251. }
  2252. }
  2253. void
  2254. rb_gc_mark(VALUE ptr)
  2255. {
  2256. gc_mark(&rb_objspace, ptr);
  2257. }
  2258. static void
  2259. gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
  2260. {
  2261. register RVALUE *obj = RANY(ptr);
  2262. goto marking; /* skip */
  2263. again:
  2264. if (LIKELY(objspace->mark_func_data == 0)) {
  2265. obj = RANY(ptr);
  2266. if (!markable_object_p(objspace, ptr)) return;
  2267. if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
  2268. }
  2269. else {
  2270. gc_mark(objspace, ptr);
  2271. return;
  2272. }
  2273. marking:
  2274. if (FL_TEST(obj, FL_EXIVAR)) {
  2275. rb_mark_generic_ivar(ptr);
  2276. }
  2277. switch (BUILTIN_TYPE(obj)) {
  2278. case T_NIL:
  2279. case T_FIXNUM:
  2280. rb_bug("rb_gc_mark() called for broken object");
  2281. break;
  2282. case T_NODE:
  2283. switch (nd_type(obj)) {
  2284. case NODE_IF: /* 1,2,3 */
  2285. case NODE_FOR:
  2286. case NODE_ITER:
  2287. case NODE_WHEN:
  2288. case NODE_MASGN:
  2289. case NODE_RESCUE:
  2290. case NODE_RESBODY:
  2291. case NODE_CLASS:
  2292. case NODE_BLOCK_PASS:
  2293. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2294. /* fall through */
  2295. case NODE_BLOCK: /* 1,3 */
  2296. case NODE_ARRAY:
  2297. case NODE_DSTR:
  2298. case NODE_DXSTR:
  2299. case NODE_DREGX:
  2300. case NODE_DREGX_ONCE:
  2301. case NODE_ENSURE:
  2302. case NODE_CALL:
  2303. case NODE_DEFS:
  2304. case NODE_OP_ASGN1:
  2305. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2306. /* fall through */
  2307. case NODE_SUPER: /* 3 */
  2308. case NODE_FCALL:
  2309. case NODE_DEFN:
  2310. case NODE_ARGS_AUX:
  2311. ptr = (VALUE)obj->as.node.u3.node;
  2312. goto again;
  2313. case NODE_WHILE: /* 1,2 */
  2314. case NODE_UNTIL:
  2315. case NODE_AND:
  2316. case NODE_OR:
  2317. case NODE_CASE:
  2318. case NODE_SCLASS:
  2319. case NODE_DOT2:
  2320. case NODE_DOT3:
  2321. case NODE_FLIP2:
  2322. case NODE_FLIP3:
  2323. case NODE_MATCH2:
  2324. case NODE_MATCH3:
  2325. case NODE_OP_ASGN_OR:
  2326. case NODE_OP_ASGN_AND:
  2327. case NODE_MODULE:
  2328. case NODE_ALIAS:
  2329. case NODE_VALIAS:
  2330. case NODE_ARGSCAT:
  2331. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2332. /* fall through */
  2333. case NODE_GASGN: /* 2 */
  2334. case NODE_LASGN:
  2335. case NODE_DASGN:
  2336. case NODE_DASGN_CURR:
  2337. case NODE_IASGN:
  2338. case NODE_IASGN2:
  2339. case NODE_CVASGN:
  2340. case NODE_COLON3:
  2341. case NODE_OPT_N:
  2342. case NODE_EVSTR:
  2343. case NODE_UNDEF:
  2344. case NODE_POSTEXE:
  2345. ptr = (VALUE)obj->as.node.u2.node;
  2346. goto again;
  2347. case NODE_HASH: /* 1 */
  2348. case NODE_LIT:
  2349. case NODE_STR:
  2350. case NODE_XSTR:
  2351. case NODE_DEFINED:
  2352. case NODE_MATCH:
  2353. case NODE_RETURN:
  2354. case NODE_BREAK:
  2355. case NODE_NEXT:
  2356. case NODE_YIELD:
  2357. case NODE_COLON2:
  2358. case NODE_SPLAT:
  2359. case NODE_TO_ARY:
  2360. ptr = (VALUE)obj->as.node.u1.node;
  2361. goto again;
  2362. case NODE_SCOPE: /* 2,3 */
  2363. case NODE_CDECL:
  2364. case NODE_OPT_ARG:
  2365. gc_mark(objspace, (VALUE)obj->as.node.u3.node);
  2366. ptr = (VALUE)obj->as.node.u2.node;
  2367. goto again;
  2368. case NODE_ARGS: /* custom */
  2369. {
  2370. struct rb_args_info *args = obj->as.node.u3.args;
  2371. if (args) {
  2372. if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init);
  2373. if (args->post_init) gc_mark(objspace, (VALUE)args->post_init);
  2374. if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args);
  2375. if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args);
  2376. if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
  2377. }
  2378. }
  2379. ptr = (VALUE)obj->as.node.u2.node;
  2380. goto again;
  2381. case NODE_ZARRAY: /* - */
  2382. case NODE_ZSUPER:
  2383. case NODE_VCALL:
  2384. case NODE_GVAR:
  2385. case NODE_LVAR:
  2386. case NODE_DVAR:
  2387. case NODE_IVAR:
  2388. case NODE_CVAR:
  2389. case NODE_NTH_REF:
  2390. case NODE_BACK_REF:
  2391. case NODE_REDO:
  2392. case NODE_RETRY:
  2393. case NODE_SELF:
  2394. case NODE_NIL:
  2395. case NODE_TRUE:
  2396. case NODE_FALSE:
  2397. case NODE_ERRINFO:
  2398. case NODE_BLOCK_ARG:
  2399. break;
  2400. case NODE_ALLOCA:
  2401. mark_locations_array(objspace,
  2402. (VALUE*)obj->as.node.u1.value,
  2403. obj->as.node.u3.cnt);
  2404. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2405. break;
  2406. case NODE_CREF:
  2407. gc_mark(objspace, obj->as.node.nd_refinements);
  2408. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2409. ptr = (VALUE)obj->as.node.u3.node;
  2410. goto again;
  2411. default: /* unlisted NODE */
  2412. if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
  2413. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2414. }
  2415. if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
  2416. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2417. }
  2418. if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
  2419. gc_mark(objspace, (VALUE)obj->as.node.u3.node);
  2420. }
  2421. }
  2422. return; /* no need to mark class. */
  2423. }
  2424. gc_mark(objspace, obj->as.basic.klass);
  2425. switch (BUILTIN_TYPE(obj)) {
  2426. case T_ICLASS:
  2427. case T_CLASS:
  2428. case T_MODULE:
  2429. mark_m_tbl(objspace, RCLASS_M_TBL(obj));
  2430. if (!RCLASS_EXT(obj)) break;
  2431. mark_tbl(objspace, RCLASS_IV_TBL(obj));
  2432. mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
  2433. ptr = RCLASS_SUPER(obj);
  2434. goto again;
  2435. case T_ARRAY:
  2436. if (FL_TEST(obj, ELTS_SHARED)) {
  2437. ptr = obj->as.array.as.heap.aux.shared;
  2438. goto again;
  2439. }
  2440. else {
  2441. long i, len = RARRAY_LEN(obj);
  2442. VALUE *ptr = RARRAY_PTR(obj);
  2443. for (i=0; i < len; i++) {
  2444. gc_mark(objspace, *ptr++);
  2445. }
  2446. }
  2447. break;
  2448. case T_HASH:
  2449. mark_hash(objspace, obj->as.hash.ntbl);
  2450. ptr = obj->as.hash.ifnone;
  2451. goto again;
  2452. case T_STRING:
  2453. #define STR_ASSOC FL_USER3 /* copied from string.c */
  2454. if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
  2455. ptr = obj->as.string.as.heap.aux.shared;
  2456. goto again;
  2457. }
  2458. break;
  2459. case T_DATA:
  2460. if (RTYPEDDATA_P(obj)) {
  2461. RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
  2462. if (mark_func) (*mark_func)(DATA_PTR(obj));
  2463. }
  2464. else {
  2465. if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
  2466. }
  2467. break;
  2468. case T_OBJECT:
  2469. {
  2470. long i, len = ROBJECT_NUMIV(obj);
  2471. VALUE *ptr = ROBJECT_IVPTR(obj);
  2472. for (i = 0; i < len; i++) {
  2473. gc_mark(objspace, *ptr++);
  2474. }
  2475. }
  2476. break;
  2477. case T_FILE:
  2478. if (obj->as.file.fptr) {
  2479. gc_mark(objspace, obj->as.file.fptr->pathv);
  2480. gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
  2481. gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
  2482. gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
  2483. gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
  2484. gc_mark(objspace, obj->as.file.fptr->write_lock);
  2485. }
  2486. break;
  2487. case T_REGEXP:
  2488. ptr = obj->as.regexp.src;
  2489. goto again;
  2490. case T_FLOAT:
  2491. case T_BIGNUM:
  2492. case T_ZOMBIE:
  2493. break;
  2494. case T_MATCH:
  2495. gc_mark(objspace, obj->as.match.regexp);
  2496. if (obj->as.match.str) {
  2497. ptr = obj->as.match.str;
  2498. goto again;
  2499. }
  2500. break;
  2501. case T_RATIONAL:
  2502. gc_mark(objspace, obj->as.rational.num);
  2503. ptr = obj->as.rational.den;
  2504. goto again;
  2505. case T_COMPLEX:
  2506. gc_mark(objspace, obj->as.complex.real);
  2507. ptr = obj->as.complex.imag;
  2508. goto again;
  2509. case T_STRUCT:
  2510. {
  2511. long len = RSTRUCT_LEN(obj);
  2512. VALUE *ptr = RSTRUCT_PTR(obj);
  2513. while (len--) {
  2514. gc_mark(objspace, *ptr++);
  2515. }
  2516. }
  2517. break;
  2518. default:
  2519. rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
  2520. BUILTIN_TYPE(obj), (void *)obj,
  2521. is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
  2522. }
  2523. }
  2524. static void
  2525. gc_mark_stacked_objects(rb_objspace_t *objspace)
  2526. {
  2527. mark_stack_t *mstack = &objspace->mark_stack;
  2528. VALUE obj = 0;
  2529. if (!mstack->index) return;
  2530. while (pop_mark_stack(mstack, &obj)) {
  2531. gc_mark_children(objspace, obj);
  2532. }
  2533. shrink_stack_chunk_cache(mstack);
  2534. }
  2535. static void
  2536. gc_marks(rb_objspace_t *objspace)
  2537. {
  2538. struct gc_list *list;
  2539. rb_thread_t *th = GET_THREAD();
  2540. gc_prof_mark_timer_start(objspace);
  2541. objspace->heap.live_num = 0;
  2542. objspace->count++;
  2543. SET_STACK_END;
  2544. th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
  2545. mark_tbl(objspace, finalizer_table);
  2546. mark_current_machine_context(objspace, th);
  2547. rb_gc_mark_symbols();
  2548. rb_gc_mark_encodings();
  2549. /* mark protected global variables */
  2550. for (list = global_List; list; list = list->next) {
  2551. rb_gc_mark_maybe(*list->varptr);
  2552. }
  2553. rb_mark_end_proc();
  2554. rb_gc_mark_global_tbl();
  2555. mark_tbl(objspace, rb_class_tbl);
  2556. /* mark generic instance variables for special constants */
  2557. rb_mark_generic_ivar_tbl();
  2558. rb_gc_mark_parser();
  2559. rb_gc_mark_unlinked_live_method_entries(th->vm);
  2560. /* marking-loop */
  2561. gc_mark_stacked_objects(objspace);
  2562. gc_prof_mark_timer_stop(objspace);
  2563. }
  2564. /* GC */
  2565. void
  2566. rb_gc_force_recycle(VALUE p)
  2567. {
  2568. rb_objspace_t *objspace = &rb_objspace;
  2569. struct heaps_slot *slot;
  2570. if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
  2571. add_slot_local_freelist(objspace, (RVALUE *)p);
  2572. }
  2573. else {
  2574. gc_prof_dec_live_num(objspace);
  2575. slot = add_slot_local_freelist(objspace, (RVALUE *)p);
  2576. if (slot->free_next == NULL) {
  2577. link_free_heap_slot(objspace, slot);
  2578. }
  2579. }
  2580. }
  2581. void
  2582. rb_gc_register_mark_object(VALUE obj)
  2583. {
  2584. VALUE ary = GET_THREAD()->vm->mark_object_ary;
  2585. rb_ary_push(ary, obj);
  2586. }
  2587. void
  2588. rb_gc_register_address(VALUE *addr)
  2589. {
  2590. rb_objspace_t *objspace = &rb_objspace;
  2591. struct gc_list *tmp;
  2592. tmp = ALLOC(struct gc_list);
  2593. tmp->next = global_List;
  2594. tmp->varptr = addr;
  2595. global_List = tmp;
  2596. }
  2597. void
  2598. rb_gc_unregister_address(VALUE *addr)
  2599. {
  2600. rb_objspace_t *objspace = &rb_objspace;
  2601. struct gc_list *tmp = global_List;
  2602. if (tmp->varptr == addr) {
  2603. global_List = tmp->next;
  2604. xfree(tmp);
  2605. return;
  2606. }
  2607. while (tmp->next) {
  2608. if (tmp->next->varptr == addr) {
  2609. struct gc_list *t = tmp->next;
  2610. tmp->next = tmp->next->next;
  2611. xfree(t);
  2612. break;
  2613. }
  2614. tmp = tmp->next;
  2615. }
  2616. }
  2617. #define GC_NOTIFY 0
  2618. static int
  2619. garbage_collect(rb_objspace_t *objspace)
  2620. {
  2621. struct mark_func_data_struct *prev_mark_func_data;
  2622. if (GC_NOTIFY) printf("start garbage_collect()\n");
  2623. if (!heaps) {
  2624. return FALSE;
  2625. }
  2626. if (!ready_to_gc(objspace)) {
  2627. return TRUE;
  2628. }
  2629. gc_prof_timer_start(objspace);
  2630. prev_mark_func_data = objspace->mark_func_data;
  2631. objspace->mark_func_data = 0;
  2632. rest_sweep(objspace);
  2633. during_gc++;
  2634. gc_marks(objspace);
  2635. gc_prof_sweep_timer_start(objspace);
  2636. gc_sweep(objspace);
  2637. gc_prof_sweep_timer_stop(objspace);
  2638. objspace->mark_func_data = prev_mark_func_data;
  2639. gc_prof_timer_stop(objspace, Qtrue);
  2640. if (GC_NOTIFY) printf("end garbage_collect()\n");
  2641. return TRUE;
  2642. }
  2643. static void *
  2644. gc_with_gvl(void *ptr)
  2645. {
  2646. return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
  2647. }
  2648. static int
  2649. garbage_collect_with_gvl(rb_objspace_t *objspace)
  2650. {
  2651. if (dont_gc) return TRUE;
  2652. if (ruby_thread_has_gvl_p()) {
  2653. return garbage_collect(objspace);
  2654. }
  2655. else {
  2656. if (ruby_native_thread_p()) {
  2657. return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
  2658. }
  2659. else {
  2660. /* no ruby thread */
  2661. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2662. exit(EXIT_FAILURE);
  2663. }
  2664. }
  2665. }
  2666. int
  2667. rb_garbage_collect(void)
  2668. {
  2669. return garbage_collect(&rb_objspace);
  2670. }
  2671. #undef Init_stack
  2672. void
  2673. Init_stack(volatile VALUE *addr)
  2674. {
  2675. ruby_init_stack(addr);
  2676. }
  2677. /*
  2678. * call-seq:
  2679. * GC.start -> nil
  2680. * gc.garbage_collect -> nil
  2681. * ObjectSpace.garbage_collect -> nil
  2682. *
  2683. * Initiates garbage collection, unless manually disabled.
  2684. *
  2685. */
  2686. VALUE
  2687. rb_gc_start(void)
  2688. {
  2689. rb_gc();
  2690. return Qnil;
  2691. }
  2692. void
  2693. rb_gc(void)
  2694. {
  2695. rb_objspace_t *objspace = &rb_objspace;
  2696. garbage_collect(objspace);
  2697. if (!finalizing) finalize_deferred(objspace);
  2698. free_unused_heaps(objspace);
  2699. }
  2700. int
  2701. rb_during_gc(void)
  2702. {
  2703. rb_objspace_t *objspace = &rb_objspace;
  2704. return during_gc;
  2705. }
  2706. /*
  2707. * call-seq:
  2708. * GC.count -> Integer
  2709. *
  2710. * The number of times GC occurred.
  2711. *
  2712. * It returns the number of times GC occurred since the process started.
  2713. *
  2714. */
  2715. static VALUE
  2716. gc_count(VALUE self)
  2717. {
  2718. return UINT2NUM(rb_objspace.count);
  2719. }
  2720. /*
  2721. * call-seq:
  2722. * GC.stat -> Hash
  2723. *
  2724. * Returns a Hash containing information about the GC.
  2725. *
  2726. * The hash includes information about internal statistics about GC such as:
  2727. *
  2728. * {
  2729. * :count => 18,
  2730. * :heap_used => 77,
  2731. * :heap_length => 77,
  2732. * :heap_increment => 0,
  2733. * :heap_live_num => 23287,
  2734. * :heap_free_num => 8115,
  2735. * :heap_final_num => 0,
  2736. * }
  2737. *
  2738. * The contents of the hash are implementation defined and may be changed in
  2739. * the future.
  2740. *
  2741. * This method is only expected to work on C Ruby.
  2742. *
  2743. */
  2744. static VALUE
  2745. gc_stat(int argc, VALUE *argv, VALUE self)
  2746. {
  2747. rb_objspace_t *objspace = &rb_objspace;
  2748. VALUE hash;
  2749. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  2750. if (!RB_TYPE_P(hash, T_HASH))
  2751. rb_raise(rb_eTypeError, "non-hash given");
  2752. }
  2753. if (hash == Qnil) {
  2754. hash = rb_hash_new();
  2755. }
  2756. rest_sweep(objspace);
  2757. rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count));
  2758. /* implementation dependent counters */
  2759. rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used));
  2760. rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length));
  2761. rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment));
  2762. rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num));
  2763. rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num));
  2764. rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num));
  2765. return hash;
  2766. }
  2767. /*
  2768. * call-seq:
  2769. * GC.stress -> true or false
  2770. *
  2771. * returns current status of GC stress mode.
  2772. */
  2773. static VALUE
  2774. gc_stress_get(VALUE self)
  2775. {
  2776. rb_objspace_t *objspace = &rb_objspace;
  2777. return ruby_gc_stress ? Qtrue : Qfalse;
  2778. }
  2779. /*
  2780. * call-seq:
  2781. * GC.stress = bool -> bool
  2782. *
  2783. * Updates the GC stress mode.
  2784. *
  2785. * When stress mode is enabled the GC is invoked at every GC opportunity:
  2786. * all memory and object allocations.
  2787. *
  2788. * Enabling stress mode makes Ruby very slow, it is only for debugging.
  2789. */
  2790. static VALUE
  2791. gc_stress_set(VALUE self, VALUE flag)
  2792. {
  2793. rb_objspace_t *objspace = &rb_objspace;
  2794. rb_secure(2);
  2795. ruby_gc_stress = RTEST(flag);
  2796. return flag;
  2797. }
  2798. /*
  2799. * call-seq:
  2800. * GC.enable -> true or false
  2801. *
  2802. * Enables garbage collection, returning <code>true</code> if garbage
  2803. * collection was previously disabled.
  2804. *
  2805. * GC.disable #=> false
  2806. * GC.enable #=> true
  2807. * GC.enable #=> false
  2808. *
  2809. */
  2810. VALUE
  2811. rb_gc_enable(void)
  2812. {
  2813. rb_objspace_t *objspace = &rb_objspace;
  2814. int old = dont_gc;
  2815. dont_gc = FALSE;
  2816. return old ? Qtrue : Qfalse;
  2817. }
  2818. /*
  2819. * call-seq:
  2820. * GC.disable -> true or false
  2821. *
  2822. * Disables garbage collection, returning <code>true</code> if garbage
  2823. * collection was already disabled.
  2824. *
  2825. * GC.disable #=> false
  2826. * GC.disable #=> true
  2827. *
  2828. */
  2829. VALUE
  2830. rb_gc_disable(void)
  2831. {
  2832. rb_objspace_t *objspace = &rb_objspace;
  2833. int old = dont_gc;
  2834. dont_gc = TRUE;
  2835. return old ? Qtrue : Qfalse;
  2836. }
  2837. void
  2838. rb_gc_set_params(void)
  2839. {
  2840. char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
  2841. if (rb_safe_level() > 0) return;
  2842. malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
  2843. if (malloc_limit_ptr != NULL) {
  2844. int malloc_limit_i = atoi(malloc_limit_ptr);
  2845. if (RTEST(ruby_verbose))
  2846. fprintf(stderr, "malloc_limit=%d (%d)\n",
  2847. malloc_limit_i, initial_malloc_limit);
  2848. if (malloc_limit_i > 0) {
  2849. initial_malloc_limit = malloc_limit_i;
  2850. }
  2851. }
  2852. heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
  2853. if (heap_min_slots_ptr != NULL) {
  2854. int heap_min_slots_i = atoi(heap_min_slots_ptr);
  2855. if (RTEST(ruby_verbose))
  2856. fprintf(stderr, "heap_min_slots=%d (%d)\n",
  2857. heap_min_slots_i, initial_heap_min_slots);
  2858. if (heap_min_slots_i > 0) {
  2859. initial_heap_min_slots = heap_min_slots_i;
  2860. initial_expand_heap(&rb_objspace);
  2861. }
  2862. }
  2863. free_min_ptr = getenv("RUBY_FREE_MIN");
  2864. if (free_min_ptr != NULL) {
  2865. int free_min_i = atoi(free_min_ptr);
  2866. if (RTEST(ruby_verbose))
  2867. fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
  2868. if (free_min_i > 0) {
  2869. initial_free_min = free_min_i;
  2870. }
  2871. }
  2872. }
  2873. static void
  2874. collect_refs(rb_objspace_t *objspace, VALUE obj)
  2875. {
  2876. if (markable_object_p(objspace, obj) && !internal_object_p(obj)) {
  2877. st_insert((st_table *)objspace->mark_func_data->data, obj, Qtrue);
  2878. }
  2879. }
  2880. static int
  2881. collect_keys(st_data_t key, st_data_t value, st_data_t data)
  2882. {
  2883. VALUE ary = (VALUE)data;
  2884. rb_ary_push(ary, (VALUE)key);
  2885. return ST_CONTINUE;
  2886. }
  2887. VALUE
  2888. rb_objspace_reachable_objects_from(VALUE obj)
  2889. {
  2890. rb_objspace_t *objspace = &rb_objspace;
  2891. if (markable_object_p(objspace, obj)) {
  2892. st_table *refs = st_init_numtable();
  2893. struct mark_func_data_struct mfd;
  2894. VALUE ret = rb_ary_new();
  2895. mfd.mark_func = collect_refs;
  2896. mfd.data = (VALUE)refs;
  2897. objspace->mark_func_data = &mfd;
  2898. gc_mark_children(objspace, obj);
  2899. objspace->mark_func_data = 0;
  2900. st_foreach(refs, collect_keys, (st_data_t)ret);
  2901. return ret;
  2902. }
  2903. else {
  2904. return Qnil;
  2905. }
  2906. }
  2907. /*
  2908. ------------------------ Extended allocator ------------------------
  2909. */
  2910. static void vm_xfree(rb_objspace_t *objspace, void *ptr);
  2911. static void *
  2912. negative_size_allocation_error_with_gvl(void *ptr)
  2913. {
  2914. rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
  2915. return 0; /* should not be reached */
  2916. }
  2917. static void
  2918. negative_size_allocation_error(const char *msg)
  2919. {
  2920. if (ruby_thread_has_gvl_p()) {
  2921. rb_raise(rb_eNoMemError, "%s", msg);
  2922. }
  2923. else {
  2924. if (ruby_native_thread_p()) {
  2925. rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
  2926. }
  2927. else {
  2928. fprintf(stderr, "[FATAL] %s\n", msg);
  2929. exit(EXIT_FAILURE);
  2930. }
  2931. }
  2932. }
  2933. static void *
  2934. ruby_memerror_body(void *dummy)
  2935. {
  2936. rb_memerror();
  2937. return 0;
  2938. }
  2939. static void
  2940. ruby_memerror(void)
  2941. {
  2942. if (ruby_thread_has_gvl_p()) {
  2943. rb_memerror();
  2944. }
  2945. else {
  2946. if (ruby_native_thread_p()) {
  2947. rb_thread_call_with_gvl(ruby_memerror_body, 0);
  2948. }
  2949. else {
  2950. /* no ruby thread */
  2951. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2952. exit(EXIT_FAILURE);
  2953. }
  2954. }
  2955. }
  2956. void
  2957. rb_memerror(void)
  2958. {
  2959. rb_thread_t *th = GET_THREAD();
  2960. if (!nomem_error ||
  2961. (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
  2962. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2963. exit(EXIT_FAILURE);
  2964. }
  2965. if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
  2966. rb_thread_raised_clear(th);
  2967. GET_THREAD()->errinfo = nomem_error;
  2968. JUMP_TAG(TAG_RAISE);
  2969. }
  2970. rb_thread_raised_set(th, RAISED_NOMEMORY);
  2971. rb_exc_raise(nomem_error);
  2972. }
  2973. static void *
  2974. aligned_malloc(size_t alignment, size_t size)
  2975. {
  2976. void *res;
  2977. #if defined __MINGW32__
  2978. res = __mingw_aligned_malloc(size, alignment);
  2979. #elif defined _WIN32 && !defined __CYGWIN__
  2980. res = _aligned_malloc(size, alignment);
  2981. #elif defined(HAVE_POSIX_MEMALIGN)
  2982. if (posix_memalign(&res, alignment, size) == 0) {
  2983. return res;
  2984. }
  2985. else {
  2986. return NULL;
  2987. }
  2988. #elif defined(HAVE_MEMALIGN)
  2989. res = memalign(alignment, size);
  2990. #else
  2991. char* aligned;
  2992. res = malloc(alignment + size + sizeof(void*));
  2993. aligned = (char*)res + alignment + sizeof(void*);
  2994. aligned -= ((VALUE)aligned & (alignment - 1));
  2995. ((void**)aligned)[-1] = res;
  2996. res = (void*)aligned;
  2997. #endif
  2998. #if defined(_DEBUG) || defined(GC_DEBUG)
  2999. /* alignment must be a power of 2 */
  3000. assert((alignment - 1) & alignment == 0);
  3001. assert(alignment % sizeof(void*) == 0);
  3002. #endif
  3003. return res;
  3004. }
  3005. static void
  3006. aligned_free(void *ptr)
  3007. {
  3008. #if defined __MINGW32__
  3009. __mingw_aligned_free(ptr);
  3010. #elif defined _WIN32 && !defined __CYGWIN__
  3011. _aligned_free(ptr);
  3012. #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
  3013. free(ptr);
  3014. #else
  3015. free(((void**)ptr)[-1]);
  3016. #endif
  3017. }
  3018. static inline size_t
  3019. vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
  3020. {
  3021. if ((ssize_t)size < 0) {
  3022. negative_size_allocation_error("negative allocation size (or too big)");
  3023. }
  3024. if (size == 0) size = 1;
  3025. #if CALC_EXACT_MALLOC_SIZE
  3026. size += sizeof(size_t);
  3027. #endif
  3028. if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
  3029. (malloc_increase+size) > malloc_limit) {
  3030. garbage_collect_with_gvl(objspace);
  3031. }
  3032. return size;
  3033. }
  3034. static inline void *
  3035. vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
  3036. {
  3037. ATOMIC_SIZE_ADD(malloc_increase, size);
  3038. #if CALC_EXACT_MALLOC_SIZE
  3039. ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
  3040. ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
  3041. ((size_t *)mem)[0] = size;
  3042. mem = (size_t *)mem + 1;
  3043. #endif
  3044. return mem;
  3045. }
  3046. #define TRY_WITH_GC(alloc) do { \
  3047. if (!(alloc) && \
  3048. (!garbage_collect_with_gvl(objspace) || \
  3049. !(alloc))) { \
  3050. ruby_memerror(); \
  3051. } \
  3052. } while (0)
  3053. static void *
  3054. vm_xmalloc(rb_objspace_t *objspace, size_t size)
  3055. {
  3056. void *mem;
  3057. size = vm_malloc_prepare(objspace, size);
  3058. TRY_WITH_GC(mem = malloc(size));
  3059. return vm_malloc_fixup(objspace, mem, size);
  3060. }
  3061. static void *
  3062. vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
  3063. {
  3064. void *mem;
  3065. #if CALC_EXACT_MALLOC_SIZE
  3066. size_t oldsize;
  3067. #endif
  3068. if ((ssize_t)size < 0) {
  3069. negative_size_allocation_error("negative re-allocation size");
  3070. }
  3071. if (!ptr) return vm_xmalloc(objspace, size);
  3072. if (size == 0) {
  3073. vm_xfree(objspace, ptr);
  3074. return 0;
  3075. }
  3076. if (ruby_gc_stress && !ruby_disable_gc_stress)
  3077. garbage_collect_with_gvl(objspace);
  3078. #if CALC_EXACT_MALLOC_SIZE
  3079. size += sizeof(size_t);
  3080. ptr = (size_t *)ptr - 1;
  3081. oldsize = ((size_t *)ptr)[0];
  3082. #endif
  3083. mem = realloc(ptr, size);
  3084. if (!mem) {
  3085. if (garbage_collect_with_gvl(objspace)) {
  3086. mem = realloc(ptr, size);
  3087. }
  3088. if (!mem) {
  3089. ruby_memerror();
  3090. }
  3091. }
  3092. ATOMIC_SIZE_ADD(malloc_increase, size);
  3093. #if CALC_EXACT_MALLOC_SIZE
  3094. ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize);
  3095. ((size_t *)mem)[0] = size;
  3096. mem = (size_t *)mem + 1;
  3097. #endif
  3098. return mem;
  3099. }
  3100. static void
  3101. vm_xfree(rb_objspace_t *objspace, void *ptr)
  3102. {
  3103. #if CALC_EXACT_MALLOC_SIZE
  3104. size_t size;
  3105. ptr = ((size_t *)ptr) - 1;
  3106. size = ((size_t*)ptr)[0];
  3107. if (size) {
  3108. ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size);
  3109. ATOMIC_SIZE_DEC(objspace->malloc_params.allocations);
  3110. }
  3111. #endif
  3112. free(ptr);
  3113. }
  3114. void *
  3115. ruby_xmalloc(size_t size)
  3116. {
  3117. return vm_xmalloc(&rb_objspace, size);
  3118. }
  3119. static inline size_t
  3120. xmalloc2_size(size_t n, size_t size)
  3121. {
  3122. size_t len = size * n;
  3123. if (n != 0 && size != len / n) {
  3124. rb_raise(rb_eArgError, "malloc: possible integer overflow");
  3125. }
  3126. return len;
  3127. }
  3128. void *
  3129. ruby_xmalloc2(size_t n, size_t size)
  3130. {
  3131. return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
  3132. }
  3133. static void *
  3134. vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
  3135. {
  3136. void *mem;
  3137. size_t size;
  3138. size = xmalloc2_size(count, elsize);
  3139. size = vm_malloc_prepare(objspace, size);
  3140. TRY_WITH_GC(mem = calloc(1, size));
  3141. return vm_malloc_fixup(objspace, mem, size);
  3142. }
  3143. void *
  3144. ruby_xcalloc(size_t n, size_t size)
  3145. {
  3146. return vm_xcalloc(&rb_objspace, n, size);
  3147. }
  3148. void *
  3149. ruby_xrealloc(void *ptr, size_t size)
  3150. {
  3151. return vm_xrealloc(&rb_objspace, ptr, size);
  3152. }
  3153. void *
  3154. ruby_xrealloc2(void *ptr, size_t n, size_t size)
  3155. {
  3156. size_t len = size * n;
  3157. if (n != 0 && size != len / n) {
  3158. rb_raise(rb_eArgError, "realloc: possible integer overflow");
  3159. }
  3160. return ruby_xrealloc(ptr, len);
  3161. }
  3162. void
  3163. ruby_xfree(void *x)
  3164. {
  3165. if (x)
  3166. vm_xfree(&rb_objspace, x);
  3167. }
  3168. /* Mimic ruby_xmalloc, but need not rb_objspace.
  3169. * should return pointer suitable for ruby_xfree
  3170. */
  3171. void *
  3172. ruby_mimmalloc(size_t size)
  3173. {
  3174. void *mem;
  3175. #if CALC_EXACT_MALLOC_SIZE
  3176. size += sizeof(size_t);
  3177. #endif
  3178. mem = malloc(size);
  3179. #if CALC_EXACT_MALLOC_SIZE
  3180. /* set 0 for consistency of allocated_size/allocations */
  3181. ((size_t *)mem)[0] = 0;
  3182. mem = (size_t *)mem + 1;
  3183. #endif
  3184. return mem;
  3185. }
  3186. #if CALC_EXACT_MALLOC_SIZE
  3187. /*
  3188. * call-seq:
  3189. * GC.malloc_allocated_size -> Integer
  3190. *
  3191. * Returns the size of memory allocated by malloc(). Only available if ruby
  3192. * was built with CALC_EXACT_MALLOC_SIZE.
  3193. */
  3194. static VALUE
  3195. gc_malloc_allocated_size(VALUE self)
  3196. {
  3197. return UINT2NUM(rb_objspace.malloc_params.allocated_size);
  3198. }
  3199. /*
  3200. * call-seq:
  3201. * GC.malloc_allocations -> Integer
  3202. *
  3203. * Returns the number of malloc() allocations. Only available if ruby was
  3204. * built with CALC_EXACT_MALLOC_SIZE.
  3205. */
  3206. static VALUE
  3207. gc_malloc_allocations(VALUE self)
  3208. {
  3209. return UINT2NUM(rb_objspace.malloc_params.allocations);
  3210. }
  3211. #endif
  3212. /*
  3213. ------------------------------ WeakMap ------------------------------
  3214. */
  3215. struct weakmap {
  3216. st_table *obj2wmap; /* obj -> [ref,...] */
  3217. st_table *wmap2obj; /* ref -> obj */
  3218. VALUE final;
  3219. };
  3220. static int
  3221. wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
  3222. {
  3223. gc_mark_ptr((rb_objspace_t *)arg, (VALUE)val);
  3224. return ST_CONTINUE;
  3225. }
  3226. static void
  3227. wmap_mark(void *ptr)
  3228. {
  3229. struct weakmap *w = ptr;
  3230. st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
  3231. rb_gc_mark(w->final);
  3232. }
  3233. static int
  3234. wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
  3235. {
  3236. rb_ary_resize((VALUE)val, 0);
  3237. return ST_CONTINUE;
  3238. }
  3239. static void
  3240. wmap_free(void *ptr)
  3241. {
  3242. struct weakmap *w = ptr;
  3243. st_foreach(w->obj2wmap, wmap_free_map, 0);
  3244. st_free_table(w->obj2wmap);
  3245. st_free_table(w->wmap2obj);
  3246. }
  3247. size_t rb_ary_memsize(VALUE ary);
  3248. static int
  3249. wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
  3250. {
  3251. *(size_t *)arg += rb_ary_memsize((VALUE)val);
  3252. return ST_CONTINUE;
  3253. }
  3254. static size_t
  3255. wmap_memsize(const void *ptr)
  3256. {
  3257. size_t size;
  3258. const struct weakmap *w = ptr;
  3259. if (!w) return 0;
  3260. size = sizeof(*w);
  3261. size += st_memsize(w->obj2wmap);
  3262. size += st_memsize(w->wmap2obj);
  3263. st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
  3264. return size;
  3265. }
  3266. static const rb_data_type_t weakmap_type = {
  3267. "weakmap",
  3268. {
  3269. wmap_mark,
  3270. wmap_free,
  3271. wmap_memsize,
  3272. }
  3273. };
  3274. static VALUE
  3275. wmap_allocate(VALUE klass)
  3276. {
  3277. struct weakmap *w;
  3278. VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
  3279. w->obj2wmap = st_init_numtable();
  3280. w->wmap2obj = st_init_numtable();
  3281. w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
  3282. return obj;
  3283. }
  3284. static int
  3285. wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
  3286. {
  3287. VALUE obj, ary;
  3288. if (!existing) return ST_STOP;
  3289. obj = (VALUE)*key, ary = (VALUE)*value;
  3290. rb_ary_delete(ary, obj);
  3291. if (!RARRAY_LEN(ary)) return ST_DELETE;
  3292. return ST_CONTINUE;
  3293. }
  3294. static VALUE
  3295. wmap_finalize(VALUE self, VALUE obj)
  3296. {
  3297. st_data_t data;
  3298. VALUE rids;
  3299. long i;
  3300. struct weakmap *w;
  3301. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3302. obj = NUM2PTR(obj);
  3303. data = (st_data_t)obj;
  3304. if (st_delete(w->obj2wmap, &data, &data)) {
  3305. rids = (VALUE)data;
  3306. for (i = 0; i < RARRAY_LEN(rids); ++i) {
  3307. data = (st_data_t)RARRAY_PTR(rids)[i];
  3308. st_delete(w->wmap2obj, &data, NULL);
  3309. }
  3310. }
  3311. data = (st_data_t)obj;
  3312. if (st_delete(w->wmap2obj, &data, &data)) {
  3313. st_update(w->obj2wmap, (st_data_t)obj, wmap_final_func, 0);
  3314. }
  3315. return self;
  3316. }
  3317. static VALUE
  3318. wmap_aset(VALUE self, VALUE wmap, VALUE orig)
  3319. {
  3320. st_data_t data;
  3321. VALUE rids;
  3322. struct weakmap *w;
  3323. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3324. rb_define_final(orig, w->final);
  3325. rb_define_final(wmap, w->final);
  3326. if (st_lookup(w->obj2wmap, (st_data_t)orig, &data)) {
  3327. rids = (VALUE)data;
  3328. }
  3329. else {
  3330. rids = rb_ary_tmp_new(1);
  3331. st_insert(w->obj2wmap, (st_data_t)orig, (st_data_t)rids);
  3332. }
  3333. rb_ary_push(rids, orig);
  3334. st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
  3335. return nonspecial_obj_id(orig);
  3336. }
  3337. static VALUE
  3338. wmap_aref(VALUE self, VALUE wmap)
  3339. {
  3340. st_data_t data;
  3341. VALUE obj;
  3342. struct weakmap *w;
  3343. rb_objspace_t *objspace = &rb_objspace;
  3344. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3345. if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
  3346. obj = (VALUE)data;
  3347. if (!is_id_value(objspace, obj)) return Qnil;
  3348. if (!is_live_object(objspace, obj)) return Qnil;
  3349. return obj;
  3350. }
  3351. /*
  3352. ------------------------------ GC profiler ------------------------------
  3353. */
  3354. static inline void gc_prof_set_heap_info(rb_objspace_t *, gc_profile_record *);
  3355. static double
  3356. getrusage_time(void)
  3357. {
  3358. #ifdef RUSAGE_SELF
  3359. struct rusage usage;
  3360. struct timeval time;
  3361. getrusage(RUSAGE_SELF, &usage);
  3362. time = usage.ru_utime;
  3363. return time.tv_sec + time.tv_usec * 1e-6;
  3364. #elif defined _WIN32
  3365. FILETIME creation_time, exit_time, kernel_time, user_time;
  3366. ULARGE_INTEGER ui;
  3367. LONG_LONG q;
  3368. double t;
  3369. if (GetProcessTimes(GetCurrentProcess(),
  3370. &creation_time, &exit_time, &kernel_time, &user_time) == 0)
  3371. {
  3372. return 0.0;
  3373. }
  3374. memcpy(&ui, &user_time, sizeof(FILETIME));
  3375. q = ui.QuadPart / 10L;
  3376. t = (DWORD)(q % 1000000L) * 1e-6;
  3377. q /= 1000000L;
  3378. #ifdef __GNUC__
  3379. t += q;
  3380. #else
  3381. t += (double)(DWORD)(q >> 16) * (1 << 16);
  3382. t += (DWORD)q & ~(~0 << 16);
  3383. #endif
  3384. return t;
  3385. #else
  3386. return 0.0;
  3387. #endif
  3388. }
  3389. static inline void
  3390. gc_prof_timer_start(rb_objspace_t *objspace)
  3391. {
  3392. if (objspace->profile.run) {
  3393. size_t count = objspace->profile.count;
  3394. if (!objspace->profile.record) {
  3395. objspace->profile.size = 1000;
  3396. objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);
  3397. }
  3398. if (count >= objspace->profile.size) {
  3399. objspace->profile.size += 1000;
  3400. objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);
  3401. }
  3402. if (!objspace->profile.record) {
  3403. rb_bug("gc_profile malloc or realloc miss");
  3404. }
  3405. MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);
  3406. objspace->profile.record[count].gc_time = getrusage_time();
  3407. objspace->profile.record[objspace->profile.count].gc_invoke_time =
  3408. objspace->profile.record[count].gc_time - objspace->profile.invoke_time;
  3409. }
  3410. }
  3411. static inline void
  3412. gc_prof_timer_stop(rb_objspace_t *objspace, int marked)
  3413. {
  3414. if (objspace->profile.run) {
  3415. double gc_time = 0;
  3416. size_t count = objspace->profile.count;
  3417. gc_profile_record *record = &objspace->profile.record[count];
  3418. gc_time = getrusage_time() - record->gc_time;
  3419. if (gc_time < 0) gc_time = 0;
  3420. record->gc_time = gc_time;
  3421. record->is_marked = !!(marked);
  3422. gc_prof_set_heap_info(objspace, record);
  3423. objspace->profile.count++;
  3424. }
  3425. }
  3426. #if !GC_PROFILE_MORE_DETAIL
  3427. static inline void
  3428. gc_prof_mark_timer_start(rb_objspace_t *objspace)
  3429. {
  3430. }
  3431. static inline void
  3432. gc_prof_mark_timer_stop(rb_objspace_t *objspace)
  3433. {
  3434. }
  3435. static inline void
  3436. gc_prof_sweep_timer_start(rb_objspace_t *objspace)
  3437. {
  3438. }
  3439. static inline void
  3440. gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
  3441. {
  3442. }
  3443. static inline void
  3444. gc_prof_set_malloc_info(rb_objspace_t *objspace)
  3445. {
  3446. }
  3447. static inline void
  3448. gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
  3449. {
  3450. size_t live = objspace->heap.live_num;
  3451. size_t total = heaps_used * HEAP_OBJ_LIMIT;
  3452. record->heap_total_objects = total;
  3453. record->heap_use_size = live * sizeof(RVALUE);
  3454. record->heap_total_size = total * sizeof(RVALUE);
  3455. }
  3456. static inline void
  3457. gc_prof_inc_live_num(rb_objspace_t *objspace)
  3458. {
  3459. }
  3460. static inline void
  3461. gc_prof_dec_live_num(rb_objspace_t *objspace)
  3462. {
  3463. }
  3464. #else
  3465. static inline void
  3466. gc_prof_mark_timer_start(rb_objspace_t *objspace)
  3467. {
  3468. if (objspace->profile.run) {
  3469. size_t count = objspace->profile.count;
  3470. objspace->profile.record[count].gc_mark_time = getrusage_time();
  3471. }
  3472. }
  3473. static inline void
  3474. gc_prof_mark_timer_stop(rb_objspace_t *objspace)
  3475. {
  3476. if (objspace->profile.run) {
  3477. double mark_time = 0;
  3478. size_t count = count;
  3479. gc_profile_record *record = &objspace->profile.record[count];
  3480. mark_time = getrusage_time() - record->gc_mark_time;
  3481. if (mark_time < 0) mark_time = 0;
  3482. record->gc_mark_time = mark_time;
  3483. }
  3484. }
  3485. static inline void
  3486. gc_prof_sweep_timer_start(rb_objspace_t *objspace)
  3487. {
  3488. if (objspace->profile.run) {
  3489. size_t count = objspace->profile.count;
  3490. objspace->profile.record[count].gc_sweep_time = getrusage_time();
  3491. }
  3492. }
  3493. static inline void
  3494. gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
  3495. {
  3496. if (objspace->profile.run) {
  3497. double sweep_time = 0;
  3498. size_t count = objspace->profile.count;
  3499. gc_profile_record *record = &objspace->profile.record[count];
  3500. sweep_time = getrusage_time() - record->gc_sweep_time;\
  3501. if (sweep_time < 0) sweep_time = 0;\
  3502. record->gc_sweep_time = sweep_time;
  3503. }
  3504. }
  3505. static inline void
  3506. gc_prof_set_malloc_info(rb_objspace_t *objspace)
  3507. {
  3508. if (objspace->profile.run) {
  3509. gc_profile_record *record = &objspace->profile.record[objspace->profile.count];
  3510. if (record) {
  3511. record->allocate_increase = malloc_increase;
  3512. record->allocate_limit = malloc_limit;
  3513. }
  3514. }
  3515. }
  3516. static inline void
  3517. gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
  3518. {
  3519. size_t live = objspace->heap.live_num;
  3520. size_t total = heaps_used * HEAP_OBJ_LIMIT;
  3521. record->heap_use_slots = heaps_used;
  3522. record->heap_live_objects = live;
  3523. record->heap_free_objects = total - live;
  3524. record->heap_total_objects = total;
  3525. record->have_finalize = deferred_final_list ? Qtrue : Qfalse;
  3526. record->heap_use_size = live * sizeof(RVALUE);
  3527. record->heap_total_size = total * sizeof(RVALUE);
  3528. }
  3529. static inline void
  3530. gc_prof_inc_live_num(rb_objspace_t *objspace)
  3531. {
  3532. objspace->heap.live_num++;
  3533. }
  3534. static inline void
  3535. gc_prof_dec_live_num(rb_objspace_t *objspace)
  3536. {
  3537. objspace->heap.live_num--;
  3538. }
  3539. #endif /* !GC_PROFILE_MORE_DETAIL */
  3540. /*
  3541. * call-seq:
  3542. * GC::Profiler.clear -> nil
  3543. *
  3544. * Clears the GC profiler data.
  3545. *
  3546. */
  3547. static VALUE
  3548. gc_profile_clear(void)
  3549. {
  3550. rb_objspace_t *objspace = &rb_objspace;
  3551. MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
  3552. objspace->profile.count = 0;
  3553. return Qnil;
  3554. }
  3555. /*
  3556. * call-seq:
  3557. * GC::Profiler.raw_data -> [Hash, ...]
  3558. *
  3559. * Returns an Array of individual raw profile data Hashes ordered
  3560. * from earliest to latest by <tt>:GC_INVOKE_TIME</tt>. For example:
  3561. *
  3562. * [{:GC_TIME=>1.3000000000000858e-05,
  3563. * :GC_INVOKE_TIME=>0.010634999999999999,
  3564. * :HEAP_USE_SIZE=>289640,
  3565. * :HEAP_TOTAL_SIZE=>588960,
  3566. * :HEAP_TOTAL_OBJECTS=>14724,
  3567. * :GC_IS_MARKED=>false},
  3568. * ...
  3569. * ]
  3570. *
  3571. * The keys mean:
  3572. *
  3573. * +:GC_TIME+:: Time taken for this run in milliseconds
  3574. * +:GC_INVOKE_TIME+:: Time the GC was invoked since startup in seconds
  3575. * +:HEAP_USE_SIZE+:: Bytes of heap used
  3576. * +:HEAP_TOTAL_SIZE+:: Size of heap in bytes
  3577. * +:HEAP_TOTAL_OBJECTS+:: Number of objects
  3578. * +:GC_IS_MARKED+:: Is the GC in the mark phase
  3579. *
  3580. */
  3581. static VALUE
  3582. gc_profile_record_get(void)
  3583. {
  3584. VALUE prof;
  3585. VALUE gc_profile = rb_ary_new();
  3586. size_t i;
  3587. rb_objspace_t *objspace = (&rb_objspace);
  3588. if (!objspace->profile.run) {
  3589. return Qnil;
  3590. }
  3591. for (i =0; i < objspace->profile.count; i++) {
  3592. prof = rb_hash_new();
  3593. rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
  3594. rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
  3595. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
  3596. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
  3597. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
  3598. rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
  3599. #if GC_PROFILE_MORE_DETAIL
  3600. rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
  3601. rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
  3602. rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
  3603. rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
  3604. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
  3605. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
  3606. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
  3607. rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
  3608. #endif
  3609. rb_ary_push(gc_profile, prof);
  3610. }
  3611. return gc_profile;
  3612. }
  3613. /*
  3614. * call-seq:
  3615. * GC::Profiler.result -> String
  3616. *
  3617. * Returns a profile data report such as:
  3618. *
  3619. * GC 1 invokes.
  3620. * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
  3621. * 1 0.012 159240 212940 10647 0.00000000000001530000
  3622. */
  3623. static VALUE
  3624. gc_profile_result(void)
  3625. {
  3626. rb_objspace_t *objspace = &rb_objspace;
  3627. VALUE record;
  3628. VALUE result;
  3629. int i, index;
  3630. record = gc_profile_record_get();
  3631. if (objspace->profile.run && objspace->profile.count) {
  3632. result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
  3633. index = 1;
  3634. rb_str_cat2(result, "Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n");
  3635. for (i = 0; i < (int)RARRAY_LEN(record); i++) {
  3636. VALUE r = RARRAY_PTR(record)[i];
  3637. #if !GC_PROFILE_MORE_DETAIL
  3638. if (rb_hash_aref(r, ID2SYM(rb_intern("GC_IS_MARKED")))) {
  3639. #endif
  3640. rb_str_catf(result, "%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
  3641. index++, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
  3642. (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
  3643. (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
  3644. (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
  3645. NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
  3646. #if !GC_PROFILE_MORE_DETAIL
  3647. }
  3648. #endif
  3649. }
  3650. #if GC_PROFILE_MORE_DETAIL
  3651. rb_str_cat2(result, "\n\n");
  3652. rb_str_cat2(result, "More detail.\n");
  3653. rb_str_cat2(result, "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n");
  3654. index = 1;
  3655. for (i = 0; i < (int)RARRAY_LEN(record); i++) {
  3656. VALUE r = RARRAY_PTR(record)[i];
  3657. rb_str_catf(result, "%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
  3658. index++, (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
  3659. (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
  3660. (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
  3661. rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
  3662. NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
  3663. NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
  3664. }
  3665. #endif
  3666. }
  3667. else {
  3668. result = rb_str_new2("");
  3669. }
  3670. return result;
  3671. }
  3672. /*
  3673. * call-seq:
  3674. * GC::Profiler.report
  3675. * GC::Profiler.report io
  3676. *
  3677. * Writes the GC::Profiler#result to <tt>$stdout</tt> or the given IO object.
  3678. *
  3679. */
  3680. static VALUE
  3681. gc_profile_report(int argc, VALUE *argv, VALUE self)
  3682. {
  3683. VALUE out;
  3684. if (argc == 0) {
  3685. out = rb_stdout;
  3686. }
  3687. else {
  3688. rb_scan_args(argc, argv, "01", &out);
  3689. }
  3690. rb_io_write(out, gc_profile_result());
  3691. return Qnil;
  3692. }
  3693. /*
  3694. * call-seq:
  3695. * GC::Profiler.total_time -> float
  3696. *
  3697. * The total time used for garbage collection in milliseconds
  3698. */
  3699. static VALUE
  3700. gc_profile_total_time(VALUE self)
  3701. {
  3702. double time = 0;
  3703. rb_objspace_t *objspace = &rb_objspace;
  3704. size_t i;
  3705. if (objspace->profile.run && objspace->profile.count) {
  3706. for (i = 0; i < objspace->profile.count; i++) {
  3707. time += objspace->profile.record[i].gc_time;
  3708. }
  3709. }
  3710. return DBL2NUM(time);
  3711. }
  3712. /*
  3713. * call-seq:
  3714. * GC::Profiler.enable? -> true or false
  3715. *
  3716. * The current status of GC profile mode.
  3717. */
  3718. static VALUE
  3719. gc_profile_enable_get(VALUE self)
  3720. {
  3721. rb_objspace_t *objspace = &rb_objspace;
  3722. return objspace->profile.run ? Qtrue : Qfalse;
  3723. }
  3724. /*
  3725. * call-seq:
  3726. * GC::Profiler.enable -> nil
  3727. *
  3728. * Starts the GC profiler.
  3729. *
  3730. */
  3731. static VALUE
  3732. gc_profile_enable(void)
  3733. {
  3734. rb_objspace_t *objspace = &rb_objspace;
  3735. objspace->profile.run = TRUE;
  3736. return Qnil;
  3737. }
  3738. /*
  3739. * call-seq:
  3740. * GC::Profiler.disable -> nil
  3741. *
  3742. * Stops the GC profiler.
  3743. *
  3744. */
  3745. static VALUE
  3746. gc_profile_disable(void)
  3747. {
  3748. rb_objspace_t *objspace = &rb_objspace;
  3749. objspace->profile.run = FALSE;
  3750. return Qnil;
  3751. }
  3752. /*
  3753. * Document-class: ObjectSpace
  3754. *
  3755. * The <code>ObjectSpace</code> module contains a number of routines
  3756. * that interact with the garbage collection facility and allow you to
  3757. * traverse all living objects with an iterator.
  3758. *
  3759. * <code>ObjectSpace</code> also provides support for object
  3760. * finalizers, procs that will be called when a specific object is
  3761. * about to be destroyed by garbage collection.
  3762. *
  3763. * include ObjectSpace
  3764. *
  3765. *
  3766. * a = "A"
  3767. * b = "B"
  3768. * c = "C"
  3769. *
  3770. *
  3771. * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
  3772. * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
  3773. * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
  3774. *
  3775. * <em>produces:</em>
  3776. *
  3777. * Finalizer three on 537763470
  3778. * Finalizer one on 537763480
  3779. * Finalizer two on 537763480
  3780. *
  3781. */
  3782. /*
  3783. * Document-class: ObjectSpace::WeakMap
  3784. *
  3785. * An <code>ObjectSpace::WeakMap</code> object holds references to
  3786. * any objects, but those objects can get disposed by GC.
  3787. */
  3788. /* Document-class: GC::Profiler
  3789. *
  3790. * The GC profiler provides access to information on GC runs including time,
  3791. * length and object space size.
  3792. *
  3793. * Example:
  3794. *
  3795. * GC::Profiler.enable
  3796. *
  3797. * require 'rdoc/rdoc'
  3798. *
  3799. * puts GC::Profiler.result
  3800. *
  3801. * GC::Profiler.disable
  3802. *
  3803. * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
  3804. */
  3805. /*
  3806. * The <code>GC</code> module provides an interface to Ruby's mark and
  3807. * sweep garbage collection mechanism. Some of the underlying methods
  3808. * are also available via the ObjectSpace module.
  3809. *
  3810. * You may obtain information about the operation of the GC through
  3811. * GC::Profiler.
  3812. */
  3813. void
  3814. Init_GC(void)
  3815. {
  3816. VALUE rb_mObSpace;
  3817. VALUE rb_mProfiler;
  3818. rb_mGC = rb_define_module("GC");
  3819. rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
  3820. rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
  3821. rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
  3822. rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
  3823. rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
  3824. rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
  3825. rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
  3826. rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
  3827. rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
  3828. rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
  3829. rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
  3830. rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
  3831. rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
  3832. rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
  3833. rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
  3834. rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
  3835. rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
  3836. rb_mObSpace = rb_define_module("ObjectSpace");
  3837. rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
  3838. rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
  3839. rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
  3840. rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
  3841. rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
  3842. nomem_error = rb_exc_new3(rb_eNoMemError,
  3843. rb_obj_freeze(rb_str_new2("failed to allocate memory")));
  3844. OBJ_TAINT(nomem_error);
  3845. OBJ_FREEZE(nomem_error);
  3846. rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
  3847. rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
  3848. rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
  3849. {
  3850. VALUE rb_cWeakMap = rb_define_class_under(rb_mObSpace, "WeakMap", rb_cObject);
  3851. rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
  3852. rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
  3853. rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
  3854. rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
  3855. }
  3856. #if CALC_EXACT_MALLOC_SIZE
  3857. rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
  3858. rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
  3859. #endif
  3860. }