PageRenderTime 68ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/gc.c

https://github.com/vuxuandung/ruby
C | 4537 lines | 3457 code | 567 blank | 513 comment | 513 complexity | 7fcc6382a1bf83076268a0cad70800a6 MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause, AGPL-3.0, 0BSD
  1. /**********************************************************************
  2. gc.c -
  3. $Author$
  4. created at: Tue Oct 5 09:44:46 JST 1993
  5. Copyright (C) 1993-2007 Yukihiro Matsumoto
  6. Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
  7. Copyright (C) 2000 Information-technology Promotion Agency, Japan
  8. **********************************************************************/
  9. #include "ruby/ruby.h"
  10. #include "ruby/st.h"
  11. #include "ruby/re.h"
  12. #include "ruby/io.h"
  13. #include "ruby/thread.h"
  14. #include "ruby/util.h"
  15. #include "eval_intern.h"
  16. #include "vm_core.h"
  17. #include "internal.h"
  18. #include "gc.h"
  19. #include "constant.h"
  20. #include "ruby_atomic.h"
  21. #include "probes.h"
  22. #include <stdio.h>
  23. #include <setjmp.h>
  24. #include <sys/types.h>
  25. #include <assert.h>
  26. #ifdef HAVE_SYS_TIME_H
  27. #include <sys/time.h>
  28. #endif
  29. #ifdef HAVE_SYS_RESOURCE_H
  30. #include <sys/resource.h>
  31. #endif
  32. #if defined(__native_client__) && defined(NACL_NEWLIB)
  33. # include "nacl/resource.h"
  34. # undef HAVE_POSIX_MEMALIGN
  35. # undef HAVE_MEMALIGN
  36. #endif
  37. #if defined _WIN32 || defined __CYGWIN__
  38. #include <windows.h>
  39. #elif defined(HAVE_POSIX_MEMALIGN)
  40. #elif defined(HAVE_MEMALIGN)
  41. #include <malloc.h>
  42. #endif
  43. #ifdef HAVE_VALGRIND_MEMCHECK_H
  44. # include <valgrind/memcheck.h>
  45. # ifndef VALGRIND_MAKE_MEM_DEFINED
  46. # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
  47. # endif
  48. # ifndef VALGRIND_MAKE_MEM_UNDEFINED
  49. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
  50. # endif
  51. #else
  52. # define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
  53. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
  54. #endif
  55. #define rb_setjmp(env) RUBY_SETJMP(env)
  56. #define rb_jmp_buf rb_jmpbuf_t
  57. #ifndef GC_MALLOC_LIMIT
  58. #define GC_MALLOC_LIMIT 8000000
  59. #endif
  60. #define HEAP_MIN_SLOTS 10000
  61. #define FREE_MIN 4096
  62. typedef struct {
  63. unsigned int initial_malloc_limit;
  64. unsigned int initial_heap_min_slots;
  65. unsigned int initial_free_min;
  66. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  67. int gc_stress;
  68. #endif
  69. } ruby_gc_params_t;
  70. static ruby_gc_params_t initial_params = {
  71. GC_MALLOC_LIMIT,
  72. HEAP_MIN_SLOTS,
  73. FREE_MIN,
  74. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  75. FALSE,
  76. #endif
  77. };
  78. #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
  79. #ifndef GC_PROFILE_MORE_DETAIL
  80. #define GC_PROFILE_MORE_DETAIL 0
  81. #endif
  82. typedef struct gc_profile_record {
  83. double gc_time;
  84. double gc_invoke_time;
  85. size_t heap_total_objects;
  86. size_t heap_use_size;
  87. size_t heap_total_size;
  88. int is_marked;
  89. #if GC_PROFILE_MORE_DETAIL
  90. double gc_mark_time;
  91. double gc_sweep_time;
  92. size_t heap_use_slots;
  93. size_t heap_live_objects;
  94. size_t heap_free_objects;
  95. int have_finalize;
  96. size_t allocate_increase;
  97. size_t allocate_limit;
  98. #endif
  99. } gc_profile_record;
  100. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  101. #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
  102. #endif
  103. typedef struct RVALUE {
  104. union {
  105. struct {
  106. VALUE flags; /* always 0 for freed obj */
  107. struct RVALUE *next;
  108. } free;
  109. struct RBasic basic;
  110. struct RObject object;
  111. struct RClass klass;
  112. struct RFloat flonum;
  113. struct RString string;
  114. struct RArray array;
  115. struct RRegexp regexp;
  116. struct RHash hash;
  117. struct RData data;
  118. struct RTypedData typeddata;
  119. struct RStruct rstruct;
  120. struct RBignum bignum;
  121. struct RFile file;
  122. struct RNode node;
  123. struct RMatch match;
  124. struct RRational rational;
  125. struct RComplex complex;
  126. } as;
  127. #ifdef GC_DEBUG
  128. const char *file;
  129. int line;
  130. #endif
  131. } RVALUE;
  132. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  133. #pragma pack(pop)
  134. #endif
  135. struct heaps_slot {
  136. struct heaps_header *header;
  137. uintptr_t *bits;
  138. RVALUE *freelist;
  139. struct heaps_slot *next;
  140. struct heaps_slot *prev;
  141. struct heaps_slot *free_next;
  142. };
  143. struct heaps_header {
  144. struct heaps_slot *base;
  145. uintptr_t *bits;
  146. RVALUE *start;
  147. RVALUE *end;
  148. size_t limit;
  149. };
  150. struct heaps_free_bitmap {
  151. struct heaps_free_bitmap *next;
  152. };
  153. struct gc_list {
  154. VALUE *varptr;
  155. struct gc_list *next;
  156. };
  157. #define STACK_CHUNK_SIZE 500
  158. typedef struct stack_chunk {
  159. VALUE data[STACK_CHUNK_SIZE];
  160. struct stack_chunk *next;
  161. } stack_chunk_t;
  162. typedef struct mark_stack {
  163. stack_chunk_t *chunk;
  164. stack_chunk_t *cache;
  165. size_t index;
  166. size_t limit;
  167. size_t cache_size;
  168. size_t unused_cache_size;
  169. } mark_stack_t;
  170. #ifndef CALC_EXACT_MALLOC_SIZE
  171. #define CALC_EXACT_MALLOC_SIZE 0
  172. #endif
  173. typedef struct rb_objspace {
  174. struct {
  175. size_t limit;
  176. size_t increase;
  177. #if CALC_EXACT_MALLOC_SIZE
  178. size_t allocated_size;
  179. size_t allocations;
  180. #endif
  181. } malloc_params;
  182. struct {
  183. size_t increment;
  184. struct heaps_slot *ptr;
  185. struct heaps_slot *sweep_slots;
  186. struct heaps_slot *free_slots;
  187. struct heaps_header **sorted;
  188. size_t length;
  189. size_t used;
  190. struct heaps_free_bitmap *free_bitmap;
  191. RVALUE *range[2];
  192. struct heaps_header *freed;
  193. size_t free_num;
  194. size_t free_min;
  195. size_t final_num;
  196. size_t do_heap_free;
  197. } heap;
  198. struct {
  199. int dont_gc;
  200. int dont_lazy_sweep;
  201. int during_gc;
  202. rb_atomic_t finalizing;
  203. } flags;
  204. struct {
  205. st_table *table;
  206. RVALUE *deferred;
  207. } final;
  208. mark_stack_t mark_stack;
  209. struct {
  210. int run;
  211. gc_profile_record *record;
  212. size_t count;
  213. size_t size;
  214. double invoke_time;
  215. } profile;
  216. struct gc_list *global_list;
  217. size_t count;
  218. size_t total_allocated_object_num;
  219. size_t total_freed_object_num;
  220. int gc_stress;
  221. struct mark_func_data_struct {
  222. void *data;
  223. void (*mark_func)(VALUE v, void *data);
  224. } *mark_func_data;
  225. } rb_objspace_t;
  226. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  227. #define rb_objspace (*GET_VM()->objspace)
  228. #define ruby_initial_gc_stress initial_params.gc_stress
  229. int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
  230. #else
  231. static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
  232. int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
  233. #endif
  234. #define malloc_limit objspace->malloc_params.limit
  235. #define malloc_increase objspace->malloc_params.increase
  236. #define heaps objspace->heap.ptr
  237. #define heaps_length objspace->heap.length
  238. #define heaps_used objspace->heap.used
  239. #define lomem objspace->heap.range[0]
  240. #define himem objspace->heap.range[1]
  241. #define heaps_inc objspace->heap.increment
  242. #define heaps_freed objspace->heap.freed
  243. #define dont_gc objspace->flags.dont_gc
  244. #define during_gc objspace->flags.during_gc
  245. #define finalizing objspace->flags.finalizing
  246. #define finalizer_table objspace->final.table
  247. #define deferred_final_list objspace->final.deferred
  248. #define global_List objspace->global_list
  249. #define ruby_gc_stress objspace->gc_stress
  250. #define initial_malloc_limit initial_params.initial_malloc_limit
  251. #define initial_heap_min_slots initial_params.initial_heap_min_slots
  252. #define initial_free_min initial_params.initial_free_min
  253. #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
  254. #if SIZEOF_LONG == SIZEOF_VOIDP
  255. # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
  256. # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
  257. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  258. # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
  259. # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
  260. ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
  261. #else
  262. # error not supported
  263. #endif
  264. #define RANY(o) ((RVALUE*)(o))
  265. #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
  266. #define HEAP_HEADER(p) ((struct heaps_header *)(p))
  267. #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
  268. #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
  269. #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
  270. #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
  271. #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
  272. #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
  273. #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
  274. #ifndef HEAP_ALIGN_LOG
  275. /* default tiny heap size: 16KB */
  276. #define HEAP_ALIGN_LOG 14
  277. #endif
  278. #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
  279. enum {
  280. HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
  281. HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
  282. REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
  283. HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
  284. HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
  285. HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t) * CHAR_BIT)
  286. };
  287. int ruby_gc_debug_indent = 0;
  288. VALUE rb_mGC;
  289. extern st_table *rb_class_tbl;
  290. int ruby_disable_gc_stress = 0;
  291. static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
  292. static VALUE define_final0(VALUE obj, VALUE block);
  293. VALUE rb_define_final(VALUE obj, VALUE block);
  294. VALUE rb_undefine_final(VALUE obj);
  295. static void run_final(rb_objspace_t *objspace, VALUE obj);
  296. static void initial_expand_heap(rb_objspace_t *objspace);
  297. static void negative_size_allocation_error(const char *);
  298. static void *aligned_malloc(size_t, size_t);
  299. static void aligned_free(void *);
  300. static void init_mark_stack(mark_stack_t *stack);
  301. static VALUE lazy_sweep_enable(void);
  302. static int garbage_collect(rb_objspace_t *);
  303. static int gc_prepare_free_objects(rb_objspace_t *);
  304. static void mark_tbl(rb_objspace_t *, st_table *);
  305. static void rest_sweep(rb_objspace_t *);
  306. static void gc_mark_stacked_objects(rb_objspace_t *);
  307. static double getrusage_time(void);
  308. static inline void gc_prof_timer_start(rb_objspace_t *);
  309. static inline void gc_prof_timer_stop(rb_objspace_t *, int);
  310. static inline void gc_prof_mark_timer_start(rb_objspace_t *);
  311. static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
  312. static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
  313. static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
  314. static inline void gc_prof_set_malloc_info(rb_objspace_t *);
  315. /*
  316. --------------------------- ObjectSpace -----------------------------
  317. */
  318. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  319. rb_objspace_t *
  320. rb_objspace_alloc(void)
  321. {
  322. rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
  323. memset(objspace, 0, sizeof(*objspace));
  324. malloc_limit = initial_malloc_limit;
  325. ruby_gc_stress = ruby_initial_gc_stress;
  326. return objspace;
  327. }
  328. #endif
  329. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  330. static void free_stack_chunks(mark_stack_t *);
  331. void
  332. rb_objspace_free(rb_objspace_t *objspace)
  333. {
  334. rest_sweep(objspace);
  335. if (objspace->profile.record) {
  336. free(objspace->profile.record);
  337. objspace->profile.record = 0;
  338. }
  339. if (global_List) {
  340. struct gc_list *list, *next;
  341. for (list = global_List; list; list = next) {
  342. next = list->next;
  343. xfree(list);
  344. }
  345. }
  346. if (objspace->heap.free_bitmap) {
  347. struct heaps_free_bitmap *list, *next;
  348. for (list = objspace->heap.free_bitmap; list; list = next) {
  349. next = list->next;
  350. free(list);
  351. }
  352. }
  353. if (objspace->heap.sorted) {
  354. size_t i;
  355. for (i = 0; i < heaps_used; ++i) {
  356. free(objspace->heap.sorted[i]->bits);
  357. aligned_free(objspace->heap.sorted[i]);
  358. }
  359. free(objspace->heap.sorted);
  360. heaps_used = 0;
  361. heaps = 0;
  362. }
  363. free_stack_chunks(&objspace->mark_stack);
  364. free(objspace);
  365. }
  366. #endif
  367. void
  368. rb_global_variable(VALUE *var)
  369. {
  370. rb_gc_register_address(var);
  371. }
  372. static void
  373. allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
  374. {
  375. struct heaps_header **p;
  376. struct heaps_free_bitmap *bits;
  377. size_t size, add, i;
  378. size = next_heaps_length*sizeof(struct heaps_header *);
  379. add = next_heaps_length - heaps_used;
  380. if (heaps_used > 0) {
  381. p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
  382. if (p) objspace->heap.sorted = p;
  383. }
  384. else {
  385. p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
  386. }
  387. if (p == 0) {
  388. during_gc = 0;
  389. rb_memerror();
  390. }
  391. for (i = 0; i < add; i++) {
  392. bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  393. if (bits == 0) {
  394. during_gc = 0;
  395. rb_memerror();
  396. return;
  397. }
  398. bits->next = objspace->heap.free_bitmap;
  399. objspace->heap.free_bitmap = bits;
  400. }
  401. }
  402. static void
  403. link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  404. {
  405. slot->free_next = objspace->heap.free_slots;
  406. objspace->heap.free_slots = slot;
  407. }
  408. static void
  409. unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  410. {
  411. objspace->heap.free_slots = slot->free_next;
  412. slot->free_next = NULL;
  413. }
  414. static void
  415. assign_heap_slot(rb_objspace_t *objspace)
  416. {
  417. RVALUE *p, *pend, *membase;
  418. struct heaps_slot *slot;
  419. size_t hi, lo, mid;
  420. size_t objs;
  421. objs = HEAP_OBJ_LIMIT;
  422. p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
  423. if (p == 0) {
  424. during_gc = 0;
  425. rb_memerror();
  426. }
  427. slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
  428. if (slot == 0) {
  429. aligned_free(p);
  430. during_gc = 0;
  431. rb_memerror();
  432. }
  433. MEMZERO((void*)slot, struct heaps_slot, 1);
  434. slot->next = heaps;
  435. if (heaps) heaps->prev = slot;
  436. heaps = slot;
  437. membase = p;
  438. p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
  439. if ((VALUE)p % sizeof(RVALUE) != 0) {
  440. p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
  441. objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
  442. }
  443. lo = 0;
  444. hi = heaps_used;
  445. while (lo < hi) {
  446. register RVALUE *mid_membase;
  447. mid = (lo + hi) / 2;
  448. mid_membase = (RVALUE *)objspace->heap.sorted[mid];
  449. if (mid_membase < membase) {
  450. lo = mid + 1;
  451. }
  452. else if (mid_membase > membase) {
  453. hi = mid;
  454. }
  455. else {
  456. rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
  457. }
  458. }
  459. if (hi < heaps_used) {
  460. MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
  461. }
  462. heaps->header = (struct heaps_header *)membase;
  463. objspace->heap.sorted[hi] = heaps->header;
  464. objspace->heap.sorted[hi]->start = p;
  465. objspace->heap.sorted[hi]->end = (p + objs);
  466. objspace->heap.sorted[hi]->base = heaps;
  467. objspace->heap.sorted[hi]->limit = objs;
  468. assert(objspace->heap.free_bitmap != NULL);
  469. heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
  470. objspace->heap.sorted[hi]->bits = (uintptr_t *)objspace->heap.free_bitmap;
  471. objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
  472. memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  473. pend = p + objs;
  474. if (lomem == 0 || lomem > p) lomem = p;
  475. if (himem < pend) himem = pend;
  476. heaps_used++;
  477. while (p < pend) {
  478. p->as.free.flags = 0;
  479. p->as.free.next = heaps->freelist;
  480. heaps->freelist = p;
  481. p++;
  482. }
  483. link_free_heap_slot(objspace, heaps);
  484. }
  485. static void
  486. add_heap_slots(rb_objspace_t *objspace, size_t add)
  487. {
  488. size_t i;
  489. size_t next_heaps_length;
  490. next_heaps_length = heaps_used + add;
  491. if (next_heaps_length > heaps_length) {
  492. allocate_sorted_heaps(objspace, next_heaps_length);
  493. heaps_length = next_heaps_length;
  494. }
  495. for (i = 0; i < add; i++) {
  496. assign_heap_slot(objspace);
  497. }
  498. heaps_inc = 0;
  499. }
  500. static void
  501. init_heap(rb_objspace_t *objspace)
  502. {
  503. add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
  504. init_mark_stack(&objspace->mark_stack);
  505. #ifdef USE_SIGALTSTACK
  506. {
  507. /* altstack of another threads are allocated in another place */
  508. rb_thread_t *th = GET_THREAD();
  509. void *tmp = th->altstack;
  510. th->altstack = malloc(rb_sigaltstack_size());
  511. free(tmp); /* free previously allocated area */
  512. }
  513. #endif
  514. objspace->profile.invoke_time = getrusage_time();
  515. finalizer_table = st_init_numtable();
  516. }
  517. static void
  518. initial_expand_heap(rb_objspace_t *objspace)
  519. {
  520. size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
  521. if (min_size > heaps_used) {
  522. add_heap_slots(objspace, min_size - heaps_used);
  523. }
  524. }
  525. static void
  526. set_heaps_increment(rb_objspace_t *objspace)
  527. {
  528. size_t next_heaps_length = (size_t)(heaps_used * 1.8);
  529. if (next_heaps_length == heaps_used) {
  530. next_heaps_length++;
  531. }
  532. heaps_inc = next_heaps_length - heaps_used;
  533. if (next_heaps_length > heaps_length) {
  534. allocate_sorted_heaps(objspace, next_heaps_length);
  535. heaps_length = next_heaps_length;
  536. }
  537. }
  538. static int
  539. heaps_increment(rb_objspace_t *objspace)
  540. {
  541. if (heaps_inc > 0) {
  542. assign_heap_slot(objspace);
  543. heaps_inc--;
  544. return TRUE;
  545. }
  546. return FALSE;
  547. }
  548. static VALUE
  549. newobj(VALUE klass, VALUE flags)
  550. {
  551. rb_objspace_t *objspace = &rb_objspace;
  552. VALUE obj;
  553. if (UNLIKELY(during_gc)) {
  554. dont_gc = 1;
  555. during_gc = 0;
  556. rb_bug("object allocation during garbage collection phase");
  557. }
  558. if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
  559. if (!garbage_collect(objspace)) {
  560. during_gc = 0;
  561. rb_memerror();
  562. }
  563. }
  564. if (UNLIKELY(!has_free_object)) {
  565. if (!gc_prepare_free_objects(objspace)) {
  566. during_gc = 0;
  567. rb_memerror();
  568. }
  569. }
  570. obj = (VALUE)objspace->heap.free_slots->freelist;
  571. objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
  572. if (objspace->heap.free_slots->freelist == NULL) {
  573. unlink_free_heap_slot(objspace, objspace->heap.free_slots);
  574. }
  575. MEMZERO((void*)obj, RVALUE, 1);
  576. #ifdef GC_DEBUG
  577. RANY(obj)->file = rb_sourcefile();
  578. RANY(obj)->line = rb_sourceline();
  579. #endif
  580. objspace->total_allocated_object_num++;
  581. return obj;
  582. }
  583. VALUE
  584. rb_newobj(void)
  585. {
  586. return newobj(0, T_NONE);
  587. }
  588. VALUE
  589. rb_newobj_of(VALUE klass, VALUE flags)
  590. {
  591. VALUE obj;
  592. obj = newobj(klass, flags);
  593. OBJSETUP(obj, klass, flags);
  594. return obj;
  595. }
  596. NODE*
  597. rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
  598. {
  599. NODE *n = (NODE*)rb_newobj();
  600. n->flags |= T_NODE;
  601. nd_set_type(n, type);
  602. n->u1.value = a0;
  603. n->u2.value = a1;
  604. n->u3.value = a2;
  605. return n;
  606. }
  607. VALUE
  608. rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
  609. {
  610. NEWOBJ(data, struct RData);
  611. if (klass) Check_Type(klass, T_CLASS);
  612. OBJSETUP(data, klass, T_DATA);
  613. data->data = datap;
  614. data->dfree = dfree;
  615. data->dmark = dmark;
  616. return (VALUE)data;
  617. }
  618. VALUE
  619. rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
  620. {
  621. NEWOBJ(data, struct RTypedData);
  622. if (klass) Check_Type(klass, T_CLASS);
  623. OBJSETUP(data, klass, T_DATA);
  624. data->data = datap;
  625. data->typed_flag = 1;
  626. data->type = type;
  627. return (VALUE)data;
  628. }
  629. size_t
  630. rb_objspace_data_type_memsize(VALUE obj)
  631. {
  632. if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
  633. return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
  634. }
  635. else {
  636. return 0;
  637. }
  638. }
  639. const char *
  640. rb_objspace_data_type_name(VALUE obj)
  641. {
  642. if (RTYPEDDATA_P(obj)) {
  643. return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
  644. }
  645. else {
  646. return 0;
  647. }
  648. }
  649. static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
  650. static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
  651. static inline int
  652. is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
  653. {
  654. register RVALUE *p = RANY(ptr);
  655. register struct heaps_header *heap;
  656. register size_t hi, lo, mid;
  657. if (p < lomem || p > himem) return FALSE;
  658. if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
  659. /* check if p looks like a pointer using bsearch*/
  660. lo = 0;
  661. hi = heaps_used;
  662. while (lo < hi) {
  663. mid = (lo + hi) / 2;
  664. heap = objspace->heap.sorted[mid];
  665. if (heap->start <= p) {
  666. if (p < heap->end)
  667. return TRUE;
  668. lo = mid + 1;
  669. }
  670. else {
  671. hi = mid;
  672. }
  673. }
  674. return FALSE;
  675. }
  676. static int
  677. free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
  678. {
  679. if (!me->mark) {
  680. rb_free_method_entry(me);
  681. }
  682. return ST_CONTINUE;
  683. }
  684. void
  685. rb_free_m_table(st_table *tbl)
  686. {
  687. st_foreach(tbl, free_method_entry_i, 0);
  688. st_free_table(tbl);
  689. }
  690. static int
  691. free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
  692. {
  693. xfree(ce);
  694. return ST_CONTINUE;
  695. }
  696. void
  697. rb_free_const_table(st_table *tbl)
  698. {
  699. st_foreach(tbl, free_const_entry_i, 0);
  700. st_free_table(tbl);
  701. }
  702. static int obj_free(rb_objspace_t *, VALUE);
  703. static inline struct heaps_slot *
  704. add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
  705. {
  706. struct heaps_slot *slot;
  707. (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  708. p->as.free.flags = 0;
  709. slot = GET_HEAP_SLOT(p);
  710. p->as.free.next = slot->freelist;
  711. slot->freelist = p;
  712. return slot;
  713. }
  714. static void
  715. unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  716. {
  717. if (slot->prev)
  718. slot->prev->next = slot->next;
  719. if (slot->next)
  720. slot->next->prev = slot->prev;
  721. if (heaps == slot)
  722. heaps = slot->next;
  723. if (objspace->heap.sweep_slots == slot)
  724. objspace->heap.sweep_slots = slot->next;
  725. slot->prev = NULL;
  726. slot->next = NULL;
  727. }
  728. static void
  729. free_unused_heaps(rb_objspace_t *objspace)
  730. {
  731. size_t i, j;
  732. struct heaps_header *last = 0;
  733. for (i = j = 1; j < heaps_used; i++) {
  734. if (objspace->heap.sorted[i]->limit == 0) {
  735. struct heaps_header* h = objspace->heap.sorted[i];
  736. ((struct heaps_free_bitmap *)(h->bits))->next =
  737. objspace->heap.free_bitmap;
  738. objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
  739. if (!last) {
  740. last = objspace->heap.sorted[i];
  741. }
  742. else {
  743. aligned_free(objspace->heap.sorted[i]);
  744. }
  745. heaps_used--;
  746. }
  747. else {
  748. if (i != j) {
  749. objspace->heap.sorted[j] = objspace->heap.sorted[i];
  750. }
  751. j++;
  752. }
  753. }
  754. if (last) {
  755. if (last < heaps_freed) {
  756. aligned_free(heaps_freed);
  757. heaps_freed = last;
  758. }
  759. else {
  760. aligned_free(last);
  761. }
  762. }
  763. }
  764. static inline void
  765. make_deferred(RVALUE *p)
  766. {
  767. p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
  768. }
  769. static inline void
  770. make_io_deferred(RVALUE *p)
  771. {
  772. rb_io_t *fptr = p->as.file.fptr;
  773. make_deferred(p);
  774. p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
  775. p->as.data.data = fptr;
  776. }
  777. static int
  778. obj_free(rb_objspace_t *objspace, VALUE obj)
  779. {
  780. switch (BUILTIN_TYPE(obj)) {
  781. case T_NIL:
  782. case T_FIXNUM:
  783. case T_TRUE:
  784. case T_FALSE:
  785. rb_bug("obj_free() called for broken object");
  786. break;
  787. }
  788. if (FL_TEST(obj, FL_EXIVAR)) {
  789. rb_free_generic_ivar((VALUE)obj);
  790. FL_UNSET(obj, FL_EXIVAR);
  791. }
  792. switch (BUILTIN_TYPE(obj)) {
  793. case T_OBJECT:
  794. if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
  795. RANY(obj)->as.object.as.heap.ivptr) {
  796. xfree(RANY(obj)->as.object.as.heap.ivptr);
  797. }
  798. break;
  799. case T_MODULE:
  800. case T_CLASS:
  801. rb_clear_cache_by_class((VALUE)obj);
  802. if (RCLASS_M_TBL(obj)) {
  803. rb_free_m_table(RCLASS_M_TBL(obj));
  804. }
  805. if (RCLASS_IV_TBL(obj)) {
  806. st_free_table(RCLASS_IV_TBL(obj));
  807. }
  808. if (RCLASS_CONST_TBL(obj)) {
  809. rb_free_const_table(RCLASS_CONST_TBL(obj));
  810. }
  811. if (RCLASS_IV_INDEX_TBL(obj)) {
  812. st_free_table(RCLASS_IV_INDEX_TBL(obj));
  813. }
  814. xfree(RANY(obj)->as.klass.ptr);
  815. break;
  816. case T_STRING:
  817. rb_str_free(obj);
  818. break;
  819. case T_ARRAY:
  820. rb_ary_free(obj);
  821. break;
  822. case T_HASH:
  823. if (RANY(obj)->as.hash.ntbl) {
  824. st_free_table(RANY(obj)->as.hash.ntbl);
  825. }
  826. break;
  827. case T_REGEXP:
  828. if (RANY(obj)->as.regexp.ptr) {
  829. onig_free(RANY(obj)->as.regexp.ptr);
  830. }
  831. break;
  832. case T_DATA:
  833. if (DATA_PTR(obj)) {
  834. if (RTYPEDDATA_P(obj)) {
  835. RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
  836. }
  837. if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  838. xfree(DATA_PTR(obj));
  839. }
  840. else if (RANY(obj)->as.data.dfree) {
  841. make_deferred(RANY(obj));
  842. return 1;
  843. }
  844. }
  845. break;
  846. case T_MATCH:
  847. if (RANY(obj)->as.match.rmatch) {
  848. struct rmatch *rm = RANY(obj)->as.match.rmatch;
  849. onig_region_free(&rm->regs, 0);
  850. if (rm->char_offset)
  851. xfree(rm->char_offset);
  852. xfree(rm);
  853. }
  854. break;
  855. case T_FILE:
  856. if (RANY(obj)->as.file.fptr) {
  857. make_io_deferred(RANY(obj));
  858. return 1;
  859. }
  860. break;
  861. case T_RATIONAL:
  862. case T_COMPLEX:
  863. break;
  864. case T_ICLASS:
  865. /* iClass shares table with the module */
  866. xfree(RANY(obj)->as.klass.ptr);
  867. break;
  868. case T_FLOAT:
  869. break;
  870. case T_BIGNUM:
  871. if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
  872. xfree(RBIGNUM_DIGITS(obj));
  873. }
  874. break;
  875. case T_NODE:
  876. switch (nd_type(obj)) {
  877. case NODE_SCOPE:
  878. if (RANY(obj)->as.node.u1.tbl) {
  879. xfree(RANY(obj)->as.node.u1.tbl);
  880. }
  881. break;
  882. case NODE_ARGS:
  883. if (RANY(obj)->as.node.u3.args) {
  884. xfree(RANY(obj)->as.node.u3.args);
  885. }
  886. break;
  887. case NODE_ALLOCA:
  888. xfree(RANY(obj)->as.node.u1.node);
  889. break;
  890. }
  891. break; /* no need to free iv_tbl */
  892. case T_STRUCT:
  893. if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
  894. RANY(obj)->as.rstruct.as.heap.ptr) {
  895. xfree(RANY(obj)->as.rstruct.as.heap.ptr);
  896. }
  897. break;
  898. default:
  899. rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
  900. BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
  901. }
  902. return 0;
  903. }
  904. void
  905. Init_heap(void)
  906. {
  907. init_heap(&rb_objspace);
  908. }
  909. typedef int each_obj_callback(void *, void *, size_t, void *);
  910. struct each_obj_args {
  911. each_obj_callback *callback;
  912. void *data;
  913. };
  914. static VALUE
  915. objspace_each_objects(VALUE arg)
  916. {
  917. size_t i;
  918. RVALUE *membase = 0;
  919. RVALUE *pstart, *pend;
  920. rb_objspace_t *objspace = &rb_objspace;
  921. struct each_obj_args *args = (struct each_obj_args *)arg;
  922. volatile VALUE v;
  923. i = 0;
  924. while (i < heaps_used) {
  925. while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1])
  926. i--;
  927. while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i] <= (uintptr_t)membase)
  928. i++;
  929. if (heaps_used <= i)
  930. break;
  931. membase = (RVALUE *)objspace->heap.sorted[i];
  932. pstart = objspace->heap.sorted[i]->start;
  933. pend = pstart + objspace->heap.sorted[i]->limit;
  934. for (; pstart != pend; pstart++) {
  935. if (pstart->as.basic.flags) {
  936. v = (VALUE)pstart; /* acquire to save this object */
  937. break;
  938. }
  939. }
  940. if (pstart != pend) {
  941. if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
  942. break;
  943. }
  944. }
  945. }
  946. RB_GC_GUARD(v);
  947. return Qnil;
  948. }
  949. /*
  950. * rb_objspace_each_objects() is special C API to walk through
  951. * Ruby object space. This C API is too difficult to use it.
  952. * To be frank, you should not use it. Or you need to read the
  953. * source code of this function and understand what this function does.
  954. *
  955. * 'callback' will be called several times (the number of heap slot,
  956. * at current implementation) with:
  957. * vstart: a pointer to the first living object of the heap_slot.
  958. * vend: a pointer to next to the valid heap_slot area.
  959. * stride: a distance to next VALUE.
  960. *
  961. * If callback() returns non-zero, the iteration will be stopped.
  962. *
  963. * This is a sample callback code to iterate liveness objects:
  964. *
  965. * int
  966. * sample_callback(void *vstart, void *vend, int stride, void *data) {
  967. * VALUE v = (VALUE)vstart;
  968. * for (; v != (VALUE)vend; v += stride) {
  969. * if (RBASIC(v)->flags) { // liveness check
  970. * // do something with live object 'v'
  971. * }
  972. * return 0; // continue to iteration
  973. * }
  974. *
  975. * Note: 'vstart' is not a top of heap_slot. This point the first
  976. * living object to grasp at least one object to avoid GC issue.
  977. * This means that you can not walk through all Ruby object slot
  978. * including freed object slot.
  979. *
  980. * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
  981. * However, there are possibilities to pass variable values with
  982. * 'stride' with some reasons. You must use stride instead of
  983. * use some constant value in the iteration.
  984. */
  985. void
  986. rb_objspace_each_objects(each_obj_callback *callback, void *data)
  987. {
  988. struct each_obj_args args;
  989. rb_objspace_t *objspace = &rb_objspace;
  990. rest_sweep(objspace);
  991. objspace->flags.dont_lazy_sweep = TRUE;
  992. args.callback = callback;
  993. args.data = data;
  994. rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
  995. }
  996. struct os_each_struct {
  997. size_t num;
  998. VALUE of;
  999. };
  1000. static int
  1001. internal_object_p(VALUE obj)
  1002. {
  1003. RVALUE *p = (RVALUE *)obj;
  1004. if (p->as.basic.flags) {
  1005. switch (BUILTIN_TYPE(p)) {
  1006. case T_NONE:
  1007. case T_ICLASS:
  1008. case T_NODE:
  1009. case T_ZOMBIE:
  1010. break;
  1011. case T_CLASS:
  1012. if (FL_TEST(p, FL_SINGLETON))
  1013. break;
  1014. default:
  1015. if (!p->as.basic.klass) break;
  1016. return 0;
  1017. }
  1018. }
  1019. return 1;
  1020. }
  1021. int
  1022. rb_objspace_internal_object_p(VALUE obj)
  1023. {
  1024. return internal_object_p(obj);
  1025. }
  1026. static int
  1027. os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
  1028. {
  1029. struct os_each_struct *oes = (struct os_each_struct *)data;
  1030. RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
  1031. for (; p != pend; p++) {
  1032. volatile VALUE v = (VALUE)p;
  1033. if (!internal_object_p(v)) {
  1034. if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
  1035. rb_yield(v);
  1036. oes->num++;
  1037. }
  1038. }
  1039. }
  1040. return 0;
  1041. }
  1042. static VALUE
  1043. os_obj_of(VALUE of)
  1044. {
  1045. struct os_each_struct oes;
  1046. oes.num = 0;
  1047. oes.of = of;
  1048. rb_objspace_each_objects(os_obj_of_i, &oes);
  1049. return SIZET2NUM(oes.num);
  1050. }
  1051. /*
  1052. * call-seq:
  1053. * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
  1054. * ObjectSpace.each_object([module]) -> an_enumerator
  1055. *
  1056. * Calls the block once for each living, nonimmediate object in this
  1057. * Ruby process. If <i>module</i> is specified, calls the block
  1058. * for only those classes or modules that match (or are a subclass of)
  1059. * <i>module</i>. Returns the number of objects found. Immediate
  1060. * objects (<code>Fixnum</code>s, <code>Symbol</code>s
  1061. * <code>true</code>, <code>false</code>, and <code>nil</code>) are
  1062. * never returned. In the example below, <code>each_object</code>
  1063. * returns both the numbers we defined and several constants defined in
  1064. * the <code>Math</code> module.
  1065. *
  1066. * If no block is given, an enumerator is returned instead.
  1067. *
  1068. * a = 102.7
  1069. * b = 95 # Won't be returned
  1070. * c = 12345678987654321
  1071. * count = ObjectSpace.each_object(Numeric) {|x| p x }
  1072. * puts "Total count: #{count}"
  1073. *
  1074. * <em>produces:</em>
  1075. *
  1076. * 12345678987654321
  1077. * 102.7
  1078. * 2.71828182845905
  1079. * 3.14159265358979
  1080. * 2.22044604925031e-16
  1081. * 1.7976931348623157e+308
  1082. * 2.2250738585072e-308
  1083. * Total count: 7
  1084. *
  1085. */
  1086. static VALUE
  1087. os_each_obj(int argc, VALUE *argv, VALUE os)
  1088. {
  1089. VALUE of;
  1090. rb_secure(4);
  1091. if (argc == 0) {
  1092. of = 0;
  1093. }
  1094. else {
  1095. rb_scan_args(argc, argv, "01", &of);
  1096. }
  1097. RETURN_ENUMERATOR(os, 1, &of);
  1098. return os_obj_of(of);
  1099. }
  1100. /*
  1101. * call-seq:
  1102. * ObjectSpace.undefine_finalizer(obj)
  1103. *
  1104. * Removes all finalizers for <i>obj</i>.
  1105. *
  1106. */
  1107. static VALUE
  1108. undefine_final(VALUE os, VALUE obj)
  1109. {
  1110. return rb_undefine_final(obj);
  1111. }
  1112. VALUE
  1113. rb_undefine_final(VALUE obj)
  1114. {
  1115. rb_objspace_t *objspace = &rb_objspace;
  1116. st_data_t data = obj;
  1117. rb_check_frozen(obj);
  1118. st_delete(finalizer_table, &data, 0);
  1119. FL_UNSET(obj, FL_FINALIZE);
  1120. return obj;
  1121. }
  1122. /*
  1123. * call-seq:
  1124. * ObjectSpace.define_finalizer(obj, aProc=proc())
  1125. *
  1126. * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
  1127. * was destroyed.
  1128. *
  1129. */
  1130. static VALUE
  1131. define_final(int argc, VALUE *argv, VALUE os)
  1132. {
  1133. VALUE obj, block;
  1134. rb_scan_args(argc, argv, "11", &obj, &block);
  1135. rb_check_frozen(obj);
  1136. if (argc == 1) {
  1137. block = rb_block_proc();
  1138. }
  1139. else if (!rb_respond_to(block, rb_intern("call"))) {
  1140. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1141. rb_obj_classname(block));
  1142. }
  1143. return define_final0(obj, block);
  1144. }
  1145. static VALUE
  1146. define_final0(VALUE obj, VALUE block)
  1147. {
  1148. rb_objspace_t *objspace = &rb_objspace;
  1149. VALUE table;
  1150. st_data_t data;
  1151. if (!FL_ABLE(obj)) {
  1152. rb_raise(rb_eArgError, "cannot define finalizer for %s",
  1153. rb_obj_classname(obj));
  1154. }
  1155. RBASIC(obj)->flags |= FL_FINALIZE;
  1156. block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
  1157. OBJ_FREEZE(block);
  1158. if (st_lookup(finalizer_table, obj, &data)) {
  1159. table = (VALUE)data;
  1160. rb_ary_push(table, block);
  1161. }
  1162. else {
  1163. table = rb_ary_new3(1, block);
  1164. RBASIC(table)->klass = 0;
  1165. st_add_direct(finalizer_table, obj, table);
  1166. }
  1167. return block;
  1168. }
  1169. VALUE
  1170. rb_define_final(VALUE obj, VALUE block)
  1171. {
  1172. rb_check_frozen(obj);
  1173. if (!rb_respond_to(block, rb_intern("call"))) {
  1174. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1175. rb_obj_classname(block));
  1176. }
  1177. return define_final0(obj, block);
  1178. }
  1179. void
  1180. rb_gc_copy_finalizer(VALUE dest, VALUE obj)
  1181. {
  1182. rb_objspace_t *objspace = &rb_objspace;
  1183. VALUE table;
  1184. st_data_t data;
  1185. if (!FL_TEST(obj, FL_FINALIZE)) return;
  1186. if (st_lookup(finalizer_table, obj, &data)) {
  1187. table = (VALUE)data;
  1188. st_insert(finalizer_table, dest, table);
  1189. }
  1190. FL_SET(dest, FL_FINALIZE);
  1191. }
  1192. static VALUE
  1193. run_single_final(VALUE arg)
  1194. {
  1195. VALUE *args = (VALUE *)arg;
  1196. rb_eval_cmd(args[0], args[1], (int)args[2]);
  1197. return Qnil;
  1198. }
  1199. static void
  1200. run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
  1201. {
  1202. long i;
  1203. int status;
  1204. VALUE args[3];
  1205. VALUE objid = nonspecial_obj_id(obj);
  1206. if (RARRAY_LEN(table) > 0) {
  1207. args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
  1208. }
  1209. else {
  1210. args[1] = 0;
  1211. }
  1212. args[2] = (VALUE)rb_safe_level();
  1213. for (i=0; i<RARRAY_LEN(table); i++) {
  1214. VALUE final = RARRAY_PTR(table)[i];
  1215. args[0] = RARRAY_PTR(final)[1];
  1216. args[2] = FIX2INT(RARRAY_PTR(final)[0]);
  1217. status = 0;
  1218. rb_protect(run_single_final, (VALUE)args, &status);
  1219. if (status)
  1220. rb_set_errinfo(Qnil);
  1221. }
  1222. }
  1223. static void
  1224. run_final(rb_objspace_t *objspace, VALUE obj)
  1225. {
  1226. RUBY_DATA_FUNC free_func = 0;
  1227. st_data_t key, table;
  1228. objspace->heap.final_num--;
  1229. RBASIC(obj)->klass = 0;
  1230. if (RTYPEDDATA_P(obj)) {
  1231. free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
  1232. }
  1233. else {
  1234. free_func = RDATA(obj)->dfree;
  1235. }
  1236. if (free_func) {
  1237. (*free_func)(DATA_PTR(obj));
  1238. }
  1239. key = (st_data_t)obj;
  1240. if (st_delete(finalizer_table, &key, &table)) {
  1241. run_finalizer(objspace, obj, (VALUE)table);
  1242. }
  1243. }
  1244. static void
  1245. finalize_list(rb_objspace_t *objspace, RVALUE *p)
  1246. {
  1247. while (p) {
  1248. RVALUE *tmp = p->as.free.next;
  1249. run_final(objspace, (VALUE)p);
  1250. if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
  1251. add_slot_local_freelist(objspace, p);
  1252. if (!is_lazy_sweeping(objspace)) {
  1253. objspace->total_freed_object_num++;
  1254. objspace->heap.free_num++;
  1255. }
  1256. }
  1257. else {
  1258. struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
  1259. slot->header->limit--;
  1260. }
  1261. p = tmp;
  1262. }
  1263. }
  1264. static void
  1265. finalize_deferred(rb_objspace_t *objspace)
  1266. {
  1267. RVALUE *p = deferred_final_list;
  1268. deferred_final_list = 0;
  1269. if (p) {
  1270. finalize_list(objspace, p);
  1271. }
  1272. }
  1273. void
  1274. rb_gc_finalize_deferred(void)
  1275. {
  1276. rb_objspace_t *objspace = &rb_objspace;
  1277. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1278. finalize_deferred(objspace);
  1279. ATOMIC_SET(finalizing, 0);
  1280. }
  1281. struct force_finalize_list {
  1282. VALUE obj;
  1283. VALUE table;
  1284. struct force_finalize_list *next;
  1285. };
  1286. static int
  1287. force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
  1288. {
  1289. struct force_finalize_list **prev = (struct force_finalize_list **)arg;
  1290. struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
  1291. curr->obj = key;
  1292. curr->table = val;
  1293. curr->next = *prev;
  1294. *prev = curr;
  1295. return ST_CONTINUE;
  1296. }
  1297. void
  1298. rb_gc_call_finalizer_at_exit(void)
  1299. {
  1300. rb_objspace_call_finalizer(&rb_objspace);
  1301. }
  1302. static void
  1303. rb_objspace_call_finalizer(rb_objspace_t *objspace)
  1304. {
  1305. RVALUE *p, *pend;
  1306. RVALUE *final_list = 0;
  1307. size_t i;
  1308. rest_sweep(objspace);
  1309. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1310. /* run finalizers */
  1311. finalize_deferred(objspace);
  1312. assert(deferred_final_list == 0);
  1313. /* force to run finalizer */
  1314. while (finalizer_table->num_entries) {
  1315. struct force_finalize_list *list = 0;
  1316. st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
  1317. while (list) {
  1318. struct force_finalize_list *curr = list;
  1319. st_data_t obj = (st_data_t)curr->obj;
  1320. run_finalizer(objspace, curr->obj, curr->table);
  1321. st_delete(finalizer_table, &obj, 0);
  1322. list = curr->next;
  1323. xfree(curr);
  1324. }
  1325. }
  1326. /* finalizers are part of garbage collection */
  1327. during_gc++;
  1328. /* run data object's finalizers */
  1329. for (i = 0; i < heaps_used; i++) {
  1330. p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
  1331. while (p < pend) {
  1332. if (BUILTIN_TYPE(p) == T_DATA &&
  1333. DATA_PTR(p) && RANY(p)->as.data.dfree &&
  1334. !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
  1335. !rb_obj_is_fiber((VALUE)p)) {
  1336. p->as.free.flags = 0;
  1337. if (RTYPEDDATA_P(p)) {
  1338. RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
  1339. }
  1340. if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  1341. xfree(DATA_PTR(p));
  1342. }
  1343. else if (RANY(p)->as.data.dfree) {
  1344. make_deferred(RANY(p));
  1345. RANY(p)->as.free.next = final_list;
  1346. final_list = p;
  1347. }
  1348. }
  1349. else if (BUILTIN_TYPE(p) == T_FILE) {
  1350. if (RANY(p)->as.file.fptr) {
  1351. make_io_deferred(RANY(p));
  1352. RANY(p)->as.free.next = final_list;
  1353. final_list = p;
  1354. }
  1355. }
  1356. p++;
  1357. }
  1358. }
  1359. during_gc = 0;
  1360. if (final_list) {
  1361. finalize_list(objspace, final_list);
  1362. }
  1363. st_free_table(finalizer_table);
  1364. finalizer_table = 0;
  1365. ATOMIC_SET(finalizing, 0);
  1366. }
  1367. static inline int
  1368. is_id_value(rb_objspace_t *objspace, VALUE ptr)
  1369. {
  1370. if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
  1371. if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
  1372. if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
  1373. return TRUE;
  1374. }
  1375. static inline int
  1376. is_swept_object(rb_objspace_t *objspace, VALUE ptr)
  1377. {
  1378. struct heaps_slot *slot = objspace->heap.sweep_slots;
  1379. while (slot) {
  1380. if ((VALUE)slot->header->start <= ptr && ptr < (VALUE)(slot->header->end))
  1381. return FALSE;
  1382. slot = slot->next;
  1383. }
  1384. return TRUE;
  1385. }
  1386. static inline int
  1387. is_dead_object(rb_objspace_t *objspace, VALUE ptr)
  1388. {
  1389. if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
  1390. return FALSE;
  1391. if (!is_swept_object(objspace, ptr))
  1392. return TRUE;
  1393. return FALSE;
  1394. }
  1395. static inline int
  1396. is_live_object(rb_objspace_t *objspace, VALUE ptr)
  1397. {
  1398. if (BUILTIN_TYPE(ptr) == 0) return FALSE;
  1399. if (RBASIC(ptr)->klass == 0) return FALSE;
  1400. if (is_dead_object(objspace, ptr)) return FALSE;
  1401. return TRUE;
  1402. }
  1403. /*
  1404. * call-seq:
  1405. * ObjectSpace._id2ref(object_id) -> an_object
  1406. *
  1407. * Converts an object id to a reference to the object. May not be
  1408. * called on an object id passed as a parameter to a finalizer.
  1409. *
  1410. * s = "I am a string" #=> "I am a string"
  1411. * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
  1412. * r == s #=> true
  1413. *
  1414. */
  1415. static VALUE
  1416. id2ref(VALUE obj, VALUE objid)
  1417. {
  1418. #if SIZEOF_LONG == SIZEOF_VOIDP
  1419. #define NUM2PTR(x) NUM2ULONG(x)
  1420. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  1421. #define NUM2PTR(x) NUM2ULL(x)
  1422. #endif
  1423. rb_objspace_t *objspace = &rb_objspace;
  1424. VALUE ptr;
  1425. void *p0;
  1426. rb_secure(4);
  1427. ptr = NUM2PTR(objid);
  1428. p0 = (void *)ptr;
  1429. if (ptr == Qtrue) return Qtrue;
  1430. if (ptr == Qfalse) return Qfalse;
  1431. if (ptr == Qnil) return Qnil;
  1432. if (FIXNUM_P(ptr)) return (VALUE)ptr;
  1433. if (FLONUM_P(ptr)) return (VALUE)ptr;
  1434. ptr = obj_id_to_ref(objid);
  1435. if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
  1436. ID symid = ptr / sizeof(RVALUE);
  1437. if (rb_id2name(symid) == 0)
  1438. rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
  1439. return ID2SYM(symid);
  1440. }
  1441. if (!is_id_value(objspace, ptr)) {
  1442. rb_raise(rb_eRangeError, "%p is not id value", p0);
  1443. }
  1444. if (!is_live_object(objspace, ptr)) {
  1445. rb_raise(rb_eRangeError, "%p is recycled object", p0);
  1446. }
  1447. return (VALUE)ptr;
  1448. }
  1449. /*
  1450. * Document-method: __id__
  1451. * Document-method: object_id
  1452. *
  1453. * call-seq:
  1454. * obj.__id__ -> integer
  1455. * obj.object_id -> integer
  1456. *
  1457. * Returns an integer identifier for +obj+.
  1458. *
  1459. * The same number will be returned on all calls to +id+ for a given object,
  1460. * and no two active objects will share an id.
  1461. *
  1462. * Object#object_id is a different concept from the +:name+ notation, which
  1463. * returns the symbol id of +name+.
  1464. *
  1465. * Replaces the deprecated Object#id.
  1466. */
  1467. /*
  1468. * call-seq:
  1469. * obj.hash -> fixnum
  1470. *
  1471. * Generates a Fixnum hash value for this object.
  1472. *
  1473. * This function must have the property that <code>a.eql?(b)</code> implies
  1474. * <code>a.hash == b.hash</code>.
  1475. *
  1476. * The hash value is used by Hash class.
  1477. *
  1478. * Any hash value that exceeds the capacity of a Fixnum will be truncated
  1479. * before being used.
  1480. */
  1481. VALUE
  1482. rb_obj_id(VALUE obj)
  1483. {
  1484. /*
  1485. * 32-bit VALUE space
  1486. * MSB ------------------------ LSB
  1487. * false 00000000000000000000000000000000
  1488. * true 00000000000000000000000000000010
  1489. * nil 00000000000000000000000000000100
  1490. * undef 00000000000000000000000000000110
  1491. * symbol ssssssssssssssssssssssss00001110
  1492. * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
  1493. * fixnum fffffffffffffffffffffffffffffff1
  1494. *
  1495. * object_id space
  1496. * LSB
  1497. * false 00000000000000000000000000000000
  1498. * true 00000000000000000000000000000010
  1499. * nil 00000000000000000000000000000100
  1500. * undef 00000000000000000000000000000110
  1501. * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
  1502. * object oooooooooooooooooooooooooooooo0 o...o % A = 0
  1503. * fixnum fffffffffffffffffffffffffffffff1 bignum if required
  1504. *
  1505. * where A = sizeof(RVALUE)/4
  1506. *
  1507. * sizeof(RVALUE) is
  1508. * 20 if 32-bit, double is 4-byte aligned
  1509. * 24 if 32-bit, double is 8-byte aligned
  1510. * 40 if 64-bit
  1511. */
  1512. if (SYMBOL_P(obj)) {
  1513. return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
  1514. }
  1515. else if (FLONUM_P(obj)) {
  1516. #if SIZEOF_LONG == SIZEOF_VOIDP
  1517. return LONG2NUM((SIGNED_VALUE)obj);
  1518. #else
  1519. return LL2NUM((SIGNED_VALUE)obj);
  1520. #endif
  1521. }
  1522. else if (SPECIAL_CONST_P(obj)) {
  1523. return LONG2NUM((SIGNED_VALUE)obj);
  1524. }
  1525. return nonspecial_obj_id(obj);
  1526. }
  1527. static int
  1528. set_zero(st_data_t key, st_data_t val, st_data_t arg)
  1529. {
  1530. VALUE k = (VALUE)key;
  1531. VALUE hash = (VALUE)arg;
  1532. rb_hash_aset(hash, k, INT2FIX(0));
  1533. return ST_CONTINUE;
  1534. }
  1535. /*
  1536. * call-seq:
  1537. * ObjectSpace.count_objects([result_hash]) -> hash
  1538. *
  1539. * Counts objects for each type.
  1540. *
  1541. * It returns a hash, such as:
  1542. * {
  1543. * :TOTAL=>10000,
  1544. * :FREE=>3011,
  1545. * :T_OBJECT=>6,
  1546. * :T_CLASS=>404,
  1547. * # ...
  1548. * }
  1549. *
  1550. * The contents of the returned hash are implementation specific.
  1551. * It may be changed in future.
  1552. *
  1553. * If the optional argument +result_hash+ is given,
  1554. * it is overwritten and returned. This is intended to avoid probe effect.
  1555. *
  1556. * This method is only expected to work on C Ruby.
  1557. *
  1558. */
  1559. static VALUE
  1560. count_objects(int argc, VALUE *argv, VALUE os)
  1561. {
  1562. rb_objspace_t *objspace = &rb_objspace;
  1563. size_t counts[T_MASK+1];
  1564. size_t freed = 0;
  1565. size_t total = 0;
  1566. size_t i;
  1567. VALUE hash;
  1568. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  1569. if (!RB_TYPE_P(hash, T_HASH))
  1570. rb_raise(rb_eTypeError, "non-hash given");
  1571. }
  1572. for (i = 0; i <= T_MASK; i++) {
  1573. counts[i] = 0;
  1574. }
  1575. for (i = 0; i < heaps_used; i++) {
  1576. RVALUE *p, *pend;
  1577. p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
  1578. for (;p < pend; p++) {
  1579. if (p->as.basic.flags) {
  1580. counts[BUILTIN_TYPE(p)]++;
  1581. }
  1582. else {
  1583. freed++;
  1584. }
  1585. }
  1586. total += objspace->heap.sorted[i]->limit;
  1587. }
  1588. if (hash == Qnil) {
  1589. hash = rb_hash_new();
  1590. }
  1591. else if (!RHASH_EMPTY_P(hash)) {
  1592. st_foreach(RHASH_TBL(hash), set_zero, hash);
  1593. }
  1594. rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
  1595. rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
  1596. for (i = 0; i <= T_MASK; i++) {
  1597. VALUE type;
  1598. switch (i) {
  1599. #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
  1600. COUNT_TYPE(T_NONE);
  1601. COUNT_TYPE(T_OBJECT);
  1602. COUNT_TYPE(T_CLASS);
  1603. COUNT_TYPE(T_MODULE);
  1604. COUNT_TYPE(T_FLOAT);
  1605. COUNT_TYPE(T_STRING);
  1606. COUNT_TYPE(T_REGEXP);
  1607. COUNT_TYPE(T_ARRAY);
  1608. COUNT_TYPE(T_HASH);
  1609. COUNT_TYPE(T_STRUCT);
  1610. COUNT_TYPE(T_BIGNUM);
  1611. COUNT_TYPE(T_FILE);
  1612. COUNT_TYPE(T_DATA);
  1613. COUNT_TYPE(T_MATCH);
  1614. COUNT_TYPE(T_COMPLEX);
  1615. COUNT_TYPE(T_RATIONAL);
  1616. COUNT_TYPE(T_NIL);
  1617. COUNT_TYPE(T_TRUE);
  1618. COUNT_TYPE(T_FALSE);
  1619. COUNT_TYPE(T_SYMBOL);
  1620. COUNT_TYPE(T_FIXNUM);
  1621. COUNT_TYPE(T_UNDEF);
  1622. COUNT_TYPE(T_NODE);
  1623. COUNT_TYPE(T_ICLASS);
  1624. COUNT_TYPE(T_ZOMBIE);
  1625. #undef COUNT_TYPE
  1626. default: type = INT2NUM(i); break;
  1627. }
  1628. if (counts[i])
  1629. rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
  1630. }
  1631. return hash;
  1632. }
  1633. /*
  1634. ------------------------ Garbage Collection ------------------------
  1635. */
  1636. /* Sweeping */
  1637. static VALUE
  1638. lazy_sweep_enable(void)
  1639. {
  1640. rb_objspace_t *objspace = &rb_objspace;
  1641. objspace->flags.dont_lazy_sweep = FALSE;
  1642. return Qnil;
  1643. }
  1644. static void
  1645. gc_clear_slot_bits(struct heaps_slot *slot)
  1646. {
  1647. memset(slot->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  1648. }
  1649. static size_t
  1650. objspace_live_num(rb_objspace_t *objspace)
  1651. {
  1652. return objspace->total_allocated_object_num - objspace->total_freed_object_num;
  1653. }
  1654. static void
  1655. slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
  1656. {
  1657. size_t empty_num = 0, freed_num = 0, final_num = 0;
  1658. RVALUE *p, *pend;
  1659. RVALUE *final = deferred_final_list;
  1660. int deferred;
  1661. uintptr_t *bits;
  1662. p = sweep_slot->header->start; pend = p + sweep_slot->header->limit;
  1663. bits = GET_HEAP_BITMAP(p);
  1664. while (p < pend) {
  1665. if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
  1666. if (p->as.basic.flags) {
  1667. if ((deferred = obj_free(objspace, (VALUE)p)) ||
  1668. (FL_TEST(p, FL_FINALIZE))) {
  1669. if (!deferred) {
  1670. p->as.free.flags = T_ZOMBIE;
  1671. RDATA(p)->dfree = 0;
  1672. }
  1673. p->as.free.next = deferred_final_list;
  1674. deferred_final_list = p;
  1675. assert(BUILTIN_TYPE(p) == T_ZOMBIE);
  1676. final_num++;
  1677. }
  1678. else {
  1679. (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  1680. p->as.free.flags = 0;
  1681. p->as.free.next = sweep_slot->freelist;
  1682. sweep_slot->freelist = p;
  1683. freed_num++;
  1684. }
  1685. }
  1686. else {
  1687. empty_num++;
  1688. }
  1689. }
  1690. p++;
  1691. }
  1692. gc_clear_slot_bits(sweep_slot);
  1693. if (final_num + freed_num + empty_num == sweep_slot->header->limit &&
  1694. objspace->heap.free_num > objspace->heap.do_heap_free) {
  1695. RVALUE *pp;
  1696. for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
  1697. RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
  1698. pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  1699. }
  1700. sweep_slot->header->limit = final_num;
  1701. unlink_heap_slot(objspace, sweep_slot);
  1702. }
  1703. else {
  1704. if (freed_num + empty_num > 0) {
  1705. link_free_heap_slot(objspace, sweep_slot);
  1706. }
  1707. else {
  1708. sweep_slot->free_next = NULL;
  1709. }
  1710. objspace->total_freed_object_num += freed_num;
  1711. objspace->heap.free_num += freed_num + empty_num;
  1712. }
  1713. objspace->heap.final_num += final_num;
  1714. if (deferred_final_list && !finalizing) {
  1715. rb_thread_t *th = GET_THREAD();
  1716. if (th) {
  1717. RUBY_VM_SET_FINALIZER_INTERRUPT(th);
  1718. }
  1719. }
  1720. }
  1721. static int
  1722. ready_to_gc(rb_objspace_t *objspace)
  1723. {
  1724. if (dont_gc || during_gc) {
  1725. if (!has_free_object) {
  1726. if (!heaps_increment(objspace)) {
  1727. set_heaps_increment(objspace);
  1728. heaps_increment(objspace);
  1729. }
  1730. }
  1731. return FALSE;
  1732. }
  1733. return TRUE;
  1734. }
  1735. static void
  1736. before_gc_sweep(rb_objspace_t *objspace)
  1737. {
  1738. objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
  1739. objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
  1740. if (objspace->heap.free_min < initial_free_min) {
  1741. objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  1742. objspace->heap.free_min = initial_free_min;
  1743. }
  1744. objspace->heap.sweep_slots = heaps;
  1745. objspace->heap.free_num = 0;
  1746. objspace->heap.free_slots = NULL;
  1747. /* sweep unlinked method entries */
  1748. if (GET_VM()->unlinked_method_entry_list) {
  1749. rb_sweep_method_entry(GET_VM());
  1750. }
  1751. }
  1752. static void
  1753. after_gc_sweep(rb_objspace_t *objspace)
  1754. {
  1755. size_t inc;
  1756. gc_prof_set_malloc_info(objspace);
  1757. if (objspace->heap.free_num < objspace->heap.free_min) {
  1758. set_heaps_increment(objspace);
  1759. heaps_increment(objspace);
  1760. }
  1761. inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
  1762. if (inc > malloc_limit) {
  1763. malloc_limit +=
  1764. (size_t)((inc - malloc_limit) * (double)objspace_live_num(objspace) / (heaps_used * HEAP_OBJ_LIMIT));
  1765. if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
  1766. }
  1767. free_unused_heaps(objspace);
  1768. }
  1769. static int
  1770. lazy_sweep(rb_objspace_t *objspace)
  1771. {
  1772. struct heaps_slot *next;
  1773. heaps_increment(objspace);
  1774. while (objspace->heap.sweep_slots) {
  1775. next = objspace->heap.sweep_slots->next;
  1776. slot_sweep(objspace, objspace->heap.sweep_slots);
  1777. objspace->heap.sweep_slots = next;
  1778. if (has_free_object) {
  1779. during_gc = 0;
  1780. return TRUE;
  1781. }
  1782. }
  1783. return FALSE;
  1784. }
  1785. static void
  1786. rest_sweep(rb_objspace_t *objspace)
  1787. {
  1788. if (objspace->heap.sweep_slots) {
  1789. while (objspace->heap.sweep_slots) {
  1790. lazy_sweep(objspace);
  1791. }
  1792. after_gc_sweep(objspace);
  1793. }
  1794. }
  1795. static void gc_marks(rb_objspace_t *objspace);
  1796. static int
  1797. gc_prepare_free_objects(rb_objspace_t *objspace)
  1798. {
  1799. int res;
  1800. if (objspace->flags.dont_lazy_sweep)
  1801. return garbage_collect(objspace);
  1802. if (!ready_to_gc(objspace)) return TRUE;
  1803. during_gc++;
  1804. gc_prof_timer_start(objspace);
  1805. gc_prof_sweep_timer_start(objspace);
  1806. if (objspace->heap.sweep_slots) {
  1807. res = lazy_sweep(objspace);
  1808. if (res) {
  1809. gc_prof_sweep_timer_stop(objspace);
  1810. gc_prof_set_malloc_info(objspace);
  1811. gc_prof_timer_stop(objspace, Qfalse);
  1812. return res;
  1813. }
  1814. after_gc_sweep(objspace);
  1815. }
  1816. else {
  1817. if (heaps_increment(objspace)) {
  1818. during_gc = 0;
  1819. return TRUE;
  1820. }
  1821. }
  1822. gc_marks(objspace);
  1823. before_gc_sweep(objspace);
  1824. if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace_live_num(objspace))) {
  1825. set_heaps_increment(objspace);
  1826. }
  1827. gc_prof_sweep_timer_start(objspace);
  1828. if (!(res = lazy_sweep(objspace))) {
  1829. after_gc_sweep(objspace);
  1830. if (has_free_object) {
  1831. res = TRUE;
  1832. during_gc = 0;
  1833. }
  1834. }
  1835. gc_prof_sweep_timer_stop(objspace);
  1836. gc_prof_timer_stop(objspace, Qtrue);
  1837. return res;
  1838. }
  1839. static void
  1840. gc_sweep(rb_objspace_t *objspace)
  1841. {
  1842. struct heaps_slot *next;
  1843. before_gc_sweep(objspace);
  1844. while (objspace->heap.sweep_slots) {
  1845. next = objspace->heap.sweep_slots->next;
  1846. slot_sweep(objspace, objspace->heap.sweep_slots);
  1847. objspace->heap.sweep_slots = next;
  1848. }
  1849. after_gc_sweep(objspace);
  1850. during_gc = 0;
  1851. }
  1852. /* Marking stack */
  1853. static void push_mark_stack(mark_stack_t *, VALUE);
  1854. static int pop_mark_stack(mark_stack_t *, VALUE *);
  1855. static void shrink_stack_chunk_cache(mark_stack_t *stack);
  1856. static stack_chunk_t *
  1857. stack_chunk_alloc(void)
  1858. {
  1859. stack_chunk_t *res;
  1860. res = malloc(sizeof(stack_chunk_t));
  1861. if (!res)
  1862. rb_memerror();
  1863. return res;
  1864. }
  1865. static inline int
  1866. is_mark_stask_empty(mark_stack_t *stack)
  1867. {
  1868. return stack->chunk == NULL;
  1869. }
  1870. static void
  1871. add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
  1872. {
  1873. chunk->next = stack->cache;
  1874. stack->cache = chunk;
  1875. stack->cache_size++;
  1876. }
  1877. static void
  1878. shrink_stack_chunk_cache(mark_stack_t *stack)
  1879. {
  1880. stack_chunk_t *chunk;
  1881. if (stack->unused_cache_size > (stack->cache_size/2)) {
  1882. chunk = stack->cache;
  1883. stack->cache = stack->cache->next;
  1884. stack->cache_size--;
  1885. free(chunk);
  1886. }
  1887. stack->unused_cache_size = stack->cache_size;
  1888. }
  1889. static void
  1890. push_mark_stack_chunk(mark_stack_t *stack)
  1891. {
  1892. stack_chunk_t *next;
  1893. assert(stack->index == stack->limit);
  1894. if (stack->cache_size > 0) {
  1895. next = stack->cache;
  1896. stack->cache = stack->cache->next;
  1897. stack->cache_size--;
  1898. if (stack->unused_cache_size > stack->cache_size)
  1899. stack->unused_cache_size = stack->cache_size;
  1900. }
  1901. else {
  1902. next = stack_chunk_alloc();
  1903. }
  1904. next->next = stack->chunk;
  1905. stack->chunk = next;
  1906. stack->index = 0;
  1907. }
  1908. static void
  1909. pop_mark_stack_chunk(mark_stack_t *stack)
  1910. {
  1911. stack_chunk_t *prev;
  1912. prev = stack->chunk->next;
  1913. assert(stack->index == 0);
  1914. add_stack_chunk_cache(stack, stack->chunk);
  1915. stack->chunk = prev;
  1916. stack->index = stack->limit;
  1917. }
  1918. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  1919. static void
  1920. free_stack_chunks(mark_stack_t *stack)
  1921. {
  1922. stack_chunk_t *chunk = stack->chunk;
  1923. stack_chunk_t *next = NULL;
  1924. while (chunk != NULL) {
  1925. next = chunk->next;
  1926. free(chunk);
  1927. chunk = next;
  1928. }
  1929. }
  1930. #endif
  1931. static void
  1932. push_mark_stack(mark_stack_t *stack, VALUE data)
  1933. {
  1934. if (stack->index == stack->limit) {
  1935. push_mark_stack_chunk(stack);
  1936. }
  1937. stack->chunk->data[stack->index++] = data;
  1938. }
  1939. static int
  1940. pop_mark_stack(mark_stack_t *stack, VALUE *data)
  1941. {
  1942. if (is_mark_stask_empty(stack)) {
  1943. return FALSE;
  1944. }
  1945. if (stack->index == 1) {
  1946. *data = stack->chunk->data[--stack->index];
  1947. pop_mark_stack_chunk(stack);
  1948. return TRUE;
  1949. }
  1950. *data = stack->chunk->data[--stack->index];
  1951. return TRUE;
  1952. }
  1953. static void
  1954. init_mark_stack(mark_stack_t *stack)
  1955. {
  1956. int i;
  1957. push_mark_stack_chunk(stack);
  1958. stack->limit = STACK_CHUNK_SIZE;
  1959. for (i=0; i < 4; i++) {
  1960. add_stack_chunk_cache(stack, stack_chunk_alloc());
  1961. }
  1962. stack->unused_cache_size = stack->cache_size;
  1963. }
  1964. /* Marking */
  1965. #define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
  1966. #ifdef __ia64
  1967. #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
  1968. #else
  1969. #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
  1970. #endif
  1971. #define STACK_START (th->machine_stack_start)
  1972. #define STACK_END (th->machine_stack_end)
  1973. #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
  1974. #if STACK_GROW_DIRECTION < 0
  1975. # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
  1976. #elif STACK_GROW_DIRECTION > 0
  1977. # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
  1978. #else
  1979. # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
  1980. : (size_t)(STACK_END - STACK_START + 1))
  1981. #endif
  1982. #if !STACK_GROW_DIRECTION
  1983. int ruby_stack_grow_direction;
  1984. int
  1985. ruby_get_stack_grow_direction(volatile VALUE *addr)
  1986. {
  1987. VALUE *end;
  1988. SET_MACHINE_STACK_END(&end);
  1989. if (end > addr) return ruby_stack_grow_direction = 1;
  1990. return ruby_stack_grow_direction = -1;
  1991. }
  1992. #endif
  1993. size_t
  1994. ruby_stack_length(VALUE **p)
  1995. {
  1996. rb_thread_t *th = GET_THREAD();
  1997. SET_STACK_END;
  1998. if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
  1999. return STACK_LENGTH;
  2000. }
  2001. #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
  2002. static int
  2003. stack_check(int water_mark)
  2004. {
  2005. int ret;
  2006. rb_thread_t *th = GET_THREAD();
  2007. SET_STACK_END;
  2008. ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
  2009. #ifdef __ia64
  2010. if (!ret) {
  2011. ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
  2012. th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
  2013. }
  2014. #endif
  2015. return ret;
  2016. }
  2017. #endif
  2018. #define STACKFRAME_FOR_CALL_CFUNC 512
  2019. int
  2020. ruby_stack_check(void)
  2021. {
  2022. #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
  2023. return 0;
  2024. #else
  2025. return stack_check(STACKFRAME_FOR_CALL_CFUNC);
  2026. #endif
  2027. }
  2028. static void
  2029. mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
  2030. {
  2031. VALUE v;
  2032. while (n--) {
  2033. v = *x;
  2034. (void)VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
  2035. if (is_pointer_to_heap(objspace, (void *)v)) {
  2036. gc_mark(objspace, v);
  2037. }
  2038. x++;
  2039. }
  2040. }
  2041. static void
  2042. gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
  2043. {
  2044. long n;
  2045. if (end <= start) return;
  2046. n = end - start;
  2047. mark_locations_array(objspace, start, n);
  2048. }
  2049. void
  2050. rb_gc_mark_locations(VALUE *start, VALUE *end)
  2051. {
  2052. gc_mark_locations(&rb_objspace, start, end);
  2053. }
  2054. #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
  2055. struct mark_tbl_arg {
  2056. rb_objspace_t *objspace;
  2057. };
  2058. static int
  2059. mark_entry(st_data_t key, st_data_t value, st_data_t data)
  2060. {
  2061. struct mark_tbl_arg *arg = (void*)data;
  2062. gc_mark(arg->objspace, (VALUE)value);
  2063. return ST_CONTINUE;
  2064. }
  2065. static void
  2066. mark_tbl(rb_objspace_t *objspace, st_table *tbl)
  2067. {
  2068. struct mark_tbl_arg arg;
  2069. if (!tbl || tbl->num_entries == 0) return;
  2070. arg.objspace = objspace;
  2071. st_foreach(tbl, mark_entry, (st_data_t)&arg);
  2072. }
  2073. static int
  2074. mark_key(st_data_t key, st_data_t value, st_data_t data)
  2075. {
  2076. struct mark_tbl_arg *arg = (void*)data;
  2077. gc_mark(arg->objspace, (VALUE)key);
  2078. return ST_CONTINUE;
  2079. }
  2080. static void
  2081. mark_set(rb_objspace_t *objspace, st_table *tbl)
  2082. {
  2083. struct mark_tbl_arg arg;
  2084. if (!tbl) return;
  2085. arg.objspace = objspace;
  2086. st_foreach(tbl, mark_key, (st_data_t)&arg);
  2087. }
  2088. void
  2089. rb_mark_set(st_table *tbl)
  2090. {
  2091. mark_set(&rb_objspace, tbl);
  2092. }
  2093. static int
  2094. mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
  2095. {
  2096. struct mark_tbl_arg *arg = (void*)data;
  2097. gc_mark(arg->objspace, (VALUE)key);
  2098. gc_mark(arg->objspace, (VALUE)value);
  2099. return ST_CONTINUE;
  2100. }
  2101. static void
  2102. mark_hash(rb_objspace_t *objspace, st_table *tbl)
  2103. {
  2104. struct mark_tbl_arg arg;
  2105. if (!tbl) return;
  2106. arg.objspace = objspace;
  2107. st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
  2108. }
  2109. void
  2110. rb_mark_hash(st_table *tbl)
  2111. {
  2112. mark_hash(&rb_objspace, tbl);
  2113. }
  2114. static void
  2115. mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
  2116. {
  2117. const rb_method_definition_t *def = me->def;
  2118. gc_mark(objspace, me->klass);
  2119. again:
  2120. if (!def) return;
  2121. switch (def->type) {
  2122. case VM_METHOD_TYPE_ISEQ:
  2123. gc_mark(objspace, def->body.iseq->self);
  2124. break;
  2125. case VM_METHOD_TYPE_BMETHOD:
  2126. gc_mark(objspace, def->body.proc);
  2127. break;
  2128. case VM_METHOD_TYPE_ATTRSET:
  2129. case VM_METHOD_TYPE_IVAR:
  2130. gc_mark(objspace, def->body.attr.location);
  2131. break;
  2132. case VM_METHOD_TYPE_REFINED:
  2133. if (def->body.orig_me) {
  2134. def = def->body.orig_me->def;
  2135. goto again;
  2136. }
  2137. break;
  2138. default:
  2139. break; /* ignore */
  2140. }
  2141. }
  2142. void
  2143. rb_mark_method_entry(const rb_method_entry_t *me)
  2144. {
  2145. mark_method_entry(&rb_objspace, me);
  2146. }
  2147. static int
  2148. mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
  2149. {
  2150. struct mark_tbl_arg *arg = (void*)data;
  2151. mark_method_entry(arg->objspace, me);
  2152. return ST_CONTINUE;
  2153. }
  2154. static void
  2155. mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
  2156. {
  2157. struct mark_tbl_arg arg;
  2158. if (!tbl) return;
  2159. arg.objspace = objspace;
  2160. st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
  2161. }
  2162. static int
  2163. mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
  2164. {
  2165. struct mark_tbl_arg *arg = (void*)data;
  2166. gc_mark(arg->objspace, ce->value);
  2167. gc_mark(arg->objspace, ce->file);
  2168. return ST_CONTINUE;
  2169. }
  2170. static void
  2171. mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
  2172. {
  2173. struct mark_tbl_arg arg;
  2174. if (!tbl) return;
  2175. arg.objspace = objspace;
  2176. st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
  2177. }
  2178. #if STACK_GROW_DIRECTION < 0
  2179. #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
  2180. #elif STACK_GROW_DIRECTION > 0
  2181. #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
  2182. #else
  2183. #define GET_STACK_BOUNDS(start, end, appendix) \
  2184. ((STACK_END < STACK_START) ? \
  2185. ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
  2186. #endif
  2187. #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
  2188. static void
  2189. mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
  2190. {
  2191. union {
  2192. rb_jmp_buf j;
  2193. VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
  2194. } save_regs_gc_mark;
  2195. VALUE *stack_start, *stack_end;
  2196. FLUSH_REGISTER_WINDOWS;
  2197. /* This assumes that all registers are saved into the jmp_buf (and stack) */
  2198. rb_setjmp(save_regs_gc_mark.j);
  2199. SET_STACK_END;
  2200. GET_STACK_BOUNDS(stack_start, stack_end, 1);
  2201. mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
  2202. rb_gc_mark_locations(stack_start, stack_end);
  2203. #ifdef __ia64
  2204. rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
  2205. #endif
  2206. #if defined(__mc68000__)
  2207. mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
  2208. (STACK_START - STACK_END));
  2209. #endif
  2210. }
  2211. void
  2212. rb_gc_mark_machine_stack(rb_thread_t *th)
  2213. {
  2214. rb_objspace_t *objspace = &rb_objspace;
  2215. VALUE *stack_start, *stack_end;
  2216. GET_STACK_BOUNDS(stack_start, stack_end, 0);
  2217. rb_gc_mark_locations(stack_start, stack_end);
  2218. #ifdef __ia64
  2219. rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
  2220. #endif
  2221. }
  2222. void
  2223. rb_mark_tbl(st_table *tbl)
  2224. {
  2225. mark_tbl(&rb_objspace, tbl);
  2226. }
  2227. void
  2228. rb_gc_mark_maybe(VALUE obj)
  2229. {
  2230. if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
  2231. gc_mark(&rb_objspace, obj);
  2232. }
  2233. }
  2234. static int
  2235. gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
  2236. {
  2237. register uintptr_t *bits = GET_HEAP_BITMAP(ptr);
  2238. if (MARKED_IN_BITMAP(bits, ptr)) return 0;
  2239. MARK_IN_BITMAP(bits, ptr);
  2240. return 1;
  2241. }
  2242. static int
  2243. markable_object_p(rb_objspace_t *objspace, VALUE ptr)
  2244. {
  2245. register RVALUE *obj = RANY(ptr);
  2246. if (rb_special_const_p(ptr)) return 0; /* special const not marked */
  2247. if (obj->as.basic.flags == 0) return 0 ; /* free cell */
  2248. return 1;
  2249. }
  2250. int
  2251. rb_objspace_markable_object_p(VALUE obj)
  2252. {
  2253. return markable_object_p(/* now it doesn't use &rb_objspace */ 0, obj);
  2254. }
  2255. static void
  2256. gc_mark(rb_objspace_t *objspace, VALUE ptr)
  2257. {
  2258. if (!markable_object_p(objspace, ptr)) {
  2259. return;
  2260. }
  2261. if (LIKELY(objspace->mark_func_data == 0)) {
  2262. if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
  2263. push_mark_stack(&objspace->mark_stack, ptr);
  2264. }
  2265. else {
  2266. objspace->mark_func_data->mark_func(ptr, objspace->mark_func_data->data);
  2267. }
  2268. }
  2269. void
  2270. rb_gc_mark(VALUE ptr)
  2271. {
  2272. gc_mark(&rb_objspace, ptr);
  2273. }
  2274. static void
  2275. gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
  2276. {
  2277. register RVALUE *obj = RANY(ptr);
  2278. goto marking; /* skip */
  2279. again:
  2280. if (LIKELY(objspace->mark_func_data == 0)) {
  2281. obj = RANY(ptr);
  2282. if (!markable_object_p(objspace, ptr)) return;
  2283. if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
  2284. }
  2285. else {
  2286. gc_mark(objspace, ptr);
  2287. return;
  2288. }
  2289. marking:
  2290. if (FL_TEST(obj, FL_EXIVAR)) {
  2291. rb_mark_generic_ivar(ptr);
  2292. }
  2293. switch (BUILTIN_TYPE(obj)) {
  2294. case T_NIL:
  2295. case T_FIXNUM:
  2296. rb_bug("rb_gc_mark() called for broken object");
  2297. break;
  2298. case T_NODE:
  2299. switch (nd_type(obj)) {
  2300. case NODE_IF: /* 1,2,3 */
  2301. case NODE_FOR:
  2302. case NODE_ITER:
  2303. case NODE_WHEN:
  2304. case NODE_MASGN:
  2305. case NODE_RESCUE:
  2306. case NODE_RESBODY:
  2307. case NODE_CLASS:
  2308. case NODE_BLOCK_PASS:
  2309. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2310. /* fall through */
  2311. case NODE_BLOCK: /* 1,3 */
  2312. case NODE_ARRAY:
  2313. case NODE_DSTR:
  2314. case NODE_DXSTR:
  2315. case NODE_DREGX:
  2316. case NODE_DREGX_ONCE:
  2317. case NODE_ENSURE:
  2318. case NODE_CALL:
  2319. case NODE_DEFS:
  2320. case NODE_OP_ASGN1:
  2321. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2322. /* fall through */
  2323. case NODE_SUPER: /* 3 */
  2324. case NODE_FCALL:
  2325. case NODE_DEFN:
  2326. case NODE_ARGS_AUX:
  2327. ptr = (VALUE)obj->as.node.u3.node;
  2328. goto again;
  2329. case NODE_WHILE: /* 1,2 */
  2330. case NODE_UNTIL:
  2331. case NODE_AND:
  2332. case NODE_OR:
  2333. case NODE_CASE:
  2334. case NODE_SCLASS:
  2335. case NODE_DOT2:
  2336. case NODE_DOT3:
  2337. case NODE_FLIP2:
  2338. case NODE_FLIP3:
  2339. case NODE_MATCH2:
  2340. case NODE_MATCH3:
  2341. case NODE_OP_ASGN_OR:
  2342. case NODE_OP_ASGN_AND:
  2343. case NODE_MODULE:
  2344. case NODE_ALIAS:
  2345. case NODE_VALIAS:
  2346. case NODE_ARGSCAT:
  2347. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2348. /* fall through */
  2349. case NODE_GASGN: /* 2 */
  2350. case NODE_LASGN:
  2351. case NODE_DASGN:
  2352. case NODE_DASGN_CURR:
  2353. case NODE_IASGN:
  2354. case NODE_IASGN2:
  2355. case NODE_CVASGN:
  2356. case NODE_COLON3:
  2357. case NODE_OPT_N:
  2358. case NODE_EVSTR:
  2359. case NODE_UNDEF:
  2360. case NODE_POSTEXE:
  2361. ptr = (VALUE)obj->as.node.u2.node;
  2362. goto again;
  2363. case NODE_HASH: /* 1 */
  2364. case NODE_LIT:
  2365. case NODE_STR:
  2366. case NODE_XSTR:
  2367. case NODE_DEFINED:
  2368. case NODE_MATCH:
  2369. case NODE_RETURN:
  2370. case NODE_BREAK:
  2371. case NODE_NEXT:
  2372. case NODE_YIELD:
  2373. case NODE_COLON2:
  2374. case NODE_SPLAT:
  2375. case NODE_TO_ARY:
  2376. ptr = (VALUE)obj->as.node.u1.node;
  2377. goto again;
  2378. case NODE_SCOPE: /* 2,3 */
  2379. case NODE_CDECL:
  2380. case NODE_OPT_ARG:
  2381. gc_mark(objspace, (VALUE)obj->as.node.u3.node);
  2382. ptr = (VALUE)obj->as.node.u2.node;
  2383. goto again;
  2384. case NODE_ARGS: /* custom */
  2385. {
  2386. struct rb_args_info *args = obj->as.node.u3.args;
  2387. if (args) {
  2388. if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init);
  2389. if (args->post_init) gc_mark(objspace, (VALUE)args->post_init);
  2390. if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args);
  2391. if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args);
  2392. if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
  2393. }
  2394. }
  2395. ptr = (VALUE)obj->as.node.u2.node;
  2396. goto again;
  2397. case NODE_ZARRAY: /* - */
  2398. case NODE_ZSUPER:
  2399. case NODE_VCALL:
  2400. case NODE_GVAR:
  2401. case NODE_LVAR:
  2402. case NODE_DVAR:
  2403. case NODE_IVAR:
  2404. case NODE_CVAR:
  2405. case NODE_NTH_REF:
  2406. case NODE_BACK_REF:
  2407. case NODE_REDO:
  2408. case NODE_RETRY:
  2409. case NODE_SELF:
  2410. case NODE_NIL:
  2411. case NODE_TRUE:
  2412. case NODE_FALSE:
  2413. case NODE_ERRINFO:
  2414. case NODE_BLOCK_ARG:
  2415. break;
  2416. case NODE_ALLOCA:
  2417. mark_locations_array(objspace,
  2418. (VALUE*)obj->as.node.u1.value,
  2419. obj->as.node.u3.cnt);
  2420. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2421. break;
  2422. case NODE_CREF:
  2423. gc_mark(objspace, obj->as.node.nd_refinements);
  2424. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2425. ptr = (VALUE)obj->as.node.u3.node;
  2426. goto again;
  2427. default: /* unlisted NODE */
  2428. if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
  2429. gc_mark(objspace, (VALUE)obj->as.node.u1.node);
  2430. }
  2431. if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
  2432. gc_mark(objspace, (VALUE)obj->as.node.u2.node);
  2433. }
  2434. if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
  2435. gc_mark(objspace, (VALUE)obj->as.node.u3.node);
  2436. }
  2437. }
  2438. return; /* no need to mark class. */
  2439. }
  2440. gc_mark(objspace, obj->as.basic.klass);
  2441. switch (BUILTIN_TYPE(obj)) {
  2442. case T_ICLASS:
  2443. case T_CLASS:
  2444. case T_MODULE:
  2445. mark_m_tbl(objspace, RCLASS_M_TBL(obj));
  2446. if (!RCLASS_EXT(obj)) break;
  2447. mark_tbl(objspace, RCLASS_IV_TBL(obj));
  2448. mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
  2449. ptr = RCLASS_SUPER(obj);
  2450. goto again;
  2451. case T_ARRAY:
  2452. if (FL_TEST(obj, ELTS_SHARED)) {
  2453. ptr = obj->as.array.as.heap.aux.shared;
  2454. goto again;
  2455. }
  2456. else {
  2457. long i, len = RARRAY_LEN(obj);
  2458. VALUE *ptr = RARRAY_PTR(obj);
  2459. for (i=0; i < len; i++) {
  2460. gc_mark(objspace, *ptr++);
  2461. }
  2462. }
  2463. break;
  2464. case T_HASH:
  2465. mark_hash(objspace, obj->as.hash.ntbl);
  2466. ptr = obj->as.hash.ifnone;
  2467. goto again;
  2468. case T_STRING:
  2469. #define STR_ASSOC FL_USER3 /* copied from string.c */
  2470. if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
  2471. ptr = obj->as.string.as.heap.aux.shared;
  2472. goto again;
  2473. }
  2474. break;
  2475. case T_DATA:
  2476. if (RTYPEDDATA_P(obj)) {
  2477. RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
  2478. if (mark_func) (*mark_func)(DATA_PTR(obj));
  2479. }
  2480. else {
  2481. if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
  2482. }
  2483. break;
  2484. case T_OBJECT:
  2485. {
  2486. long i, len = ROBJECT_NUMIV(obj);
  2487. VALUE *ptr = ROBJECT_IVPTR(obj);
  2488. for (i = 0; i < len; i++) {
  2489. gc_mark(objspace, *ptr++);
  2490. }
  2491. }
  2492. break;
  2493. case T_FILE:
  2494. if (obj->as.file.fptr) {
  2495. gc_mark(objspace, obj->as.file.fptr->pathv);
  2496. gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
  2497. gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
  2498. gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
  2499. gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
  2500. gc_mark(objspace, obj->as.file.fptr->write_lock);
  2501. }
  2502. break;
  2503. case T_REGEXP:
  2504. ptr = obj->as.regexp.src;
  2505. goto again;
  2506. case T_FLOAT:
  2507. case T_BIGNUM:
  2508. case T_ZOMBIE:
  2509. break;
  2510. case T_MATCH:
  2511. gc_mark(objspace, obj->as.match.regexp);
  2512. if (obj->as.match.str) {
  2513. ptr = obj->as.match.str;
  2514. goto again;
  2515. }
  2516. break;
  2517. case T_RATIONAL:
  2518. gc_mark(objspace, obj->as.rational.num);
  2519. ptr = obj->as.rational.den;
  2520. goto again;
  2521. case T_COMPLEX:
  2522. gc_mark(objspace, obj->as.complex.real);
  2523. ptr = obj->as.complex.imag;
  2524. goto again;
  2525. case T_STRUCT:
  2526. {
  2527. long len = RSTRUCT_LEN(obj);
  2528. VALUE *ptr = RSTRUCT_PTR(obj);
  2529. while (len--) {
  2530. gc_mark(objspace, *ptr++);
  2531. }
  2532. }
  2533. break;
  2534. default:
  2535. rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
  2536. BUILTIN_TYPE(obj), (void *)obj,
  2537. is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
  2538. }
  2539. }
  2540. static void
  2541. gc_mark_stacked_objects(rb_objspace_t *objspace)
  2542. {
  2543. mark_stack_t *mstack = &objspace->mark_stack;
  2544. VALUE obj = 0;
  2545. if (!mstack->index) return;
  2546. while (pop_mark_stack(mstack, &obj)) {
  2547. gc_mark_children(objspace, obj);
  2548. }
  2549. shrink_stack_chunk_cache(mstack);
  2550. }
  2551. static void
  2552. gc_marks(rb_objspace_t *objspace)
  2553. {
  2554. struct gc_list *list;
  2555. rb_thread_t *th = GET_THREAD();
  2556. struct mark_func_data_struct *prev_mark_func_data;
  2557. prev_mark_func_data = objspace->mark_func_data;
  2558. objspace->mark_func_data = 0;
  2559. gc_prof_mark_timer_start(objspace);
  2560. objspace->count++;
  2561. SET_STACK_END;
  2562. th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
  2563. mark_tbl(objspace, finalizer_table);
  2564. mark_current_machine_context(objspace, th);
  2565. rb_gc_mark_symbols();
  2566. rb_gc_mark_encodings();
  2567. /* mark protected global variables */
  2568. for (list = global_List; list; list = list->next) {
  2569. rb_gc_mark_maybe(*list->varptr);
  2570. }
  2571. rb_mark_end_proc();
  2572. rb_gc_mark_global_tbl();
  2573. mark_tbl(objspace, rb_class_tbl);
  2574. /* mark generic instance variables for special constants */
  2575. rb_mark_generic_ivar_tbl();
  2576. rb_gc_mark_parser();
  2577. rb_gc_mark_unlinked_live_method_entries(th->vm);
  2578. /* marking-loop */
  2579. gc_mark_stacked_objects(objspace);
  2580. gc_prof_mark_timer_stop(objspace);
  2581. objspace->mark_func_data = prev_mark_func_data;
  2582. }
  2583. /* GC */
  2584. void
  2585. rb_gc_force_recycle(VALUE p)
  2586. {
  2587. rb_objspace_t *objspace = &rb_objspace;
  2588. struct heaps_slot *slot;
  2589. if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
  2590. add_slot_local_freelist(objspace, (RVALUE *)p);
  2591. }
  2592. else {
  2593. objspace->total_freed_object_num++;
  2594. objspace->heap.free_num++;
  2595. slot = add_slot_local_freelist(objspace, (RVALUE *)p);
  2596. if (slot->free_next == NULL) {
  2597. link_free_heap_slot(objspace, slot);
  2598. }
  2599. }
  2600. }
  2601. void
  2602. rb_gc_register_mark_object(VALUE obj)
  2603. {
  2604. VALUE ary = GET_THREAD()->vm->mark_object_ary;
  2605. rb_ary_push(ary, obj);
  2606. }
  2607. void
  2608. rb_gc_register_address(VALUE *addr)
  2609. {
  2610. rb_objspace_t *objspace = &rb_objspace;
  2611. struct gc_list *tmp;
  2612. tmp = ALLOC(struct gc_list);
  2613. tmp->next = global_List;
  2614. tmp->varptr = addr;
  2615. global_List = tmp;
  2616. }
  2617. void
  2618. rb_gc_unregister_address(VALUE *addr)
  2619. {
  2620. rb_objspace_t *objspace = &rb_objspace;
  2621. struct gc_list *tmp = global_List;
  2622. if (tmp->varptr == addr) {
  2623. global_List = tmp->next;
  2624. xfree(tmp);
  2625. return;
  2626. }
  2627. while (tmp->next) {
  2628. if (tmp->next->varptr == addr) {
  2629. struct gc_list *t = tmp->next;
  2630. tmp->next = tmp->next->next;
  2631. xfree(t);
  2632. break;
  2633. }
  2634. tmp = tmp->next;
  2635. }
  2636. }
  2637. #define GC_NOTIFY 0
  2638. static int
  2639. garbage_collect(rb_objspace_t *objspace)
  2640. {
  2641. if (GC_NOTIFY) printf("start garbage_collect()\n");
  2642. if (!heaps) {
  2643. return FALSE;
  2644. }
  2645. if (!ready_to_gc(objspace)) {
  2646. return TRUE;
  2647. }
  2648. gc_prof_timer_start(objspace);
  2649. rest_sweep(objspace);
  2650. during_gc++;
  2651. gc_marks(objspace);
  2652. gc_prof_sweep_timer_start(objspace);
  2653. gc_sweep(objspace);
  2654. gc_prof_sweep_timer_stop(objspace);
  2655. gc_prof_timer_stop(objspace, Qtrue);
  2656. if (GC_NOTIFY) printf("end garbage_collect()\n");
  2657. return TRUE;
  2658. }
  2659. static void *
  2660. gc_with_gvl(void *ptr)
  2661. {
  2662. return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
  2663. }
  2664. static int
  2665. garbage_collect_with_gvl(rb_objspace_t *objspace)
  2666. {
  2667. if (dont_gc) return TRUE;
  2668. if (ruby_thread_has_gvl_p()) {
  2669. return garbage_collect(objspace);
  2670. }
  2671. else {
  2672. if (ruby_native_thread_p()) {
  2673. return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
  2674. }
  2675. else {
  2676. /* no ruby thread */
  2677. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2678. exit(EXIT_FAILURE);
  2679. }
  2680. }
  2681. }
  2682. int
  2683. rb_garbage_collect(void)
  2684. {
  2685. return garbage_collect(&rb_objspace);
  2686. }
  2687. #undef Init_stack
  2688. void
  2689. Init_stack(volatile VALUE *addr)
  2690. {
  2691. ruby_init_stack(addr);
  2692. }
  2693. /*
  2694. * call-seq:
  2695. * GC.start -> nil
  2696. * gc.garbage_collect -> nil
  2697. * ObjectSpace.garbage_collect -> nil
  2698. *
  2699. * Initiates garbage collection, unless manually disabled.
  2700. *
  2701. */
  2702. VALUE
  2703. rb_gc_start(void)
  2704. {
  2705. rb_gc();
  2706. return Qnil;
  2707. }
  2708. void
  2709. rb_gc(void)
  2710. {
  2711. rb_objspace_t *objspace = &rb_objspace;
  2712. garbage_collect(objspace);
  2713. if (!finalizing) finalize_deferred(objspace);
  2714. free_unused_heaps(objspace);
  2715. }
  2716. int
  2717. rb_during_gc(void)
  2718. {
  2719. rb_objspace_t *objspace = &rb_objspace;
  2720. return during_gc;
  2721. }
  2722. /*
  2723. * call-seq:
  2724. * GC.count -> Integer
  2725. *
  2726. * The number of times GC occurred.
  2727. *
  2728. * It returns the number of times GC occurred since the process started.
  2729. *
  2730. */
  2731. static VALUE
  2732. gc_count(VALUE self)
  2733. {
  2734. return UINT2NUM(rb_objspace.count);
  2735. }
  2736. /*
  2737. * call-seq:
  2738. * GC.stat -> Hash
  2739. *
  2740. * Returns a Hash containing information about the GC.
  2741. *
  2742. * The hash includes information about internal statistics about GC such as:
  2743. *
  2744. * {
  2745. * :count=>0,
  2746. * :heap_used=>12,
  2747. * :heap_length=>12,
  2748. * :heap_increment=>0,
  2749. * :heap_live_num=>7539,
  2750. * :heap_free_num=>88,
  2751. * :heap_final_num=>0,
  2752. * :total_allocated_object=>7630,
  2753. * :total_freed_object=>88
  2754. * }
  2755. *
  2756. * The contents of the hash are implementation specific and may be changed in
  2757. * the future.
  2758. *
  2759. * This method is only expected to work on C Ruby.
  2760. *
  2761. */
  2762. static VALUE
  2763. gc_stat(int argc, VALUE *argv, VALUE self)
  2764. {
  2765. rb_objspace_t *objspace = &rb_objspace;
  2766. VALUE hash;
  2767. static VALUE sym_count;
  2768. static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
  2769. static VALUE sym_heap_live_num, sym_heap_free_num, sym_heap_final_num;
  2770. static VALUE sym_total_allocated_object, sym_total_freed_object;
  2771. if (sym_count == 0) {
  2772. sym_count = ID2SYM(rb_intern_const("count"));
  2773. sym_heap_used = ID2SYM(rb_intern_const("heap_used"));
  2774. sym_heap_length = ID2SYM(rb_intern_const("heap_length"));
  2775. sym_heap_increment = ID2SYM(rb_intern_const("heap_increment"));
  2776. sym_heap_live_num = ID2SYM(rb_intern_const("heap_live_num"));
  2777. sym_heap_free_num = ID2SYM(rb_intern_const("heap_free_num"));
  2778. sym_heap_final_num = ID2SYM(rb_intern_const("heap_final_num"));
  2779. sym_total_allocated_object = ID2SYM(rb_intern_const("total_allocated_object"));
  2780. sym_total_freed_object = ID2SYM(rb_intern_const("total_freed_object"));
  2781. }
  2782. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  2783. if (!RB_TYPE_P(hash, T_HASH))
  2784. rb_raise(rb_eTypeError, "non-hash given");
  2785. }
  2786. if (hash == Qnil) {
  2787. hash = rb_hash_new();
  2788. }
  2789. rest_sweep(objspace);
  2790. rb_hash_aset(hash, sym_count, SIZET2NUM(objspace->count));
  2791. /* implementation dependent counters */
  2792. rb_hash_aset(hash, sym_heap_used, SIZET2NUM(objspace->heap.used));
  2793. rb_hash_aset(hash, sym_heap_length, SIZET2NUM(objspace->heap.length));
  2794. rb_hash_aset(hash, sym_heap_increment, SIZET2NUM(objspace->heap.increment));
  2795. rb_hash_aset(hash, sym_heap_live_num, SIZET2NUM(objspace_live_num(objspace)));
  2796. rb_hash_aset(hash, sym_heap_free_num, SIZET2NUM(objspace->heap.free_num));
  2797. rb_hash_aset(hash, sym_heap_final_num, SIZET2NUM(objspace->heap.final_num));
  2798. rb_hash_aset(hash, sym_total_allocated_object, SIZET2NUM(objspace->total_allocated_object_num));
  2799. rb_hash_aset(hash, sym_total_freed_object, SIZET2NUM(objspace->total_freed_object_num));
  2800. return hash;
  2801. }
  2802. /*
  2803. * call-seq:
  2804. * GC.stress -> true or false
  2805. *
  2806. * Returns current status of GC stress mode.
  2807. */
  2808. static VALUE
  2809. gc_stress_get(VALUE self)
  2810. {
  2811. rb_objspace_t *objspace = &rb_objspace;
  2812. return ruby_gc_stress ? Qtrue : Qfalse;
  2813. }
  2814. /*
  2815. * call-seq:
  2816. * GC.stress = bool -> bool
  2817. *
  2818. * Updates the GC stress mode.
  2819. *
  2820. * When stress mode is enabled, the GC is invoked at every GC opportunity:
  2821. * all memory and object allocations.
  2822. *
  2823. * Enabling stress mode will degrade performance, it is only for debugging.
  2824. */
  2825. static VALUE
  2826. gc_stress_set(VALUE self, VALUE flag)
  2827. {
  2828. rb_objspace_t *objspace = &rb_objspace;
  2829. rb_secure(2);
  2830. ruby_gc_stress = RTEST(flag);
  2831. return flag;
  2832. }
  2833. /*
  2834. * call-seq:
  2835. * GC.enable -> true or false
  2836. *
  2837. * Enables garbage collection, returning +true+ if garbage
  2838. * collection was previously disabled.
  2839. *
  2840. * GC.disable #=> false
  2841. * GC.enable #=> true
  2842. * GC.enable #=> false
  2843. *
  2844. */
  2845. VALUE
  2846. rb_gc_enable(void)
  2847. {
  2848. rb_objspace_t *objspace = &rb_objspace;
  2849. int old = dont_gc;
  2850. dont_gc = FALSE;
  2851. return old ? Qtrue : Qfalse;
  2852. }
  2853. /*
  2854. * call-seq:
  2855. * GC.disable -> true or false
  2856. *
  2857. * Disables garbage collection, returning +true+ if garbage
  2858. * collection was already disabled.
  2859. *
  2860. * GC.disable #=> false
  2861. * GC.disable #=> true
  2862. *
  2863. */
  2864. VALUE
  2865. rb_gc_disable(void)
  2866. {
  2867. rb_objspace_t *objspace = &rb_objspace;
  2868. int old = dont_gc;
  2869. dont_gc = TRUE;
  2870. return old ? Qtrue : Qfalse;
  2871. }
  2872. void
  2873. rb_gc_set_params(void)
  2874. {
  2875. char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
  2876. if (rb_safe_level() > 0) return;
  2877. malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
  2878. if (malloc_limit_ptr != NULL) {
  2879. int malloc_limit_i = atoi(malloc_limit_ptr);
  2880. if (RTEST(ruby_verbose))
  2881. fprintf(stderr, "malloc_limit=%d (%d)\n",
  2882. malloc_limit_i, initial_malloc_limit);
  2883. if (malloc_limit_i > 0) {
  2884. initial_malloc_limit = malloc_limit_i;
  2885. }
  2886. }
  2887. heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
  2888. if (heap_min_slots_ptr != NULL) {
  2889. int heap_min_slots_i = atoi(heap_min_slots_ptr);
  2890. if (RTEST(ruby_verbose))
  2891. fprintf(stderr, "heap_min_slots=%d (%d)\n",
  2892. heap_min_slots_i, initial_heap_min_slots);
  2893. if (heap_min_slots_i > 0) {
  2894. initial_heap_min_slots = heap_min_slots_i;
  2895. initial_expand_heap(&rb_objspace);
  2896. }
  2897. }
  2898. free_min_ptr = getenv("RUBY_FREE_MIN");
  2899. if (free_min_ptr != NULL) {
  2900. int free_min_i = atoi(free_min_ptr);
  2901. if (RTEST(ruby_verbose))
  2902. fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
  2903. if (free_min_i > 0) {
  2904. initial_free_min = free_min_i;
  2905. }
  2906. }
  2907. }
  2908. void
  2909. rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
  2910. {
  2911. rb_objspace_t *objspace = &rb_objspace;
  2912. if (markable_object_p(objspace, obj)) {
  2913. struct mark_func_data_struct mfd;
  2914. mfd.mark_func = func;
  2915. mfd.data = data;
  2916. objspace->mark_func_data = &mfd;
  2917. gc_mark_children(objspace, obj);
  2918. objspace->mark_func_data = 0;
  2919. }
  2920. }
  2921. /*
  2922. ------------------------ Extended allocator ------------------------
  2923. */
  2924. static void vm_xfree(rb_objspace_t *objspace, void *ptr);
  2925. static void *
  2926. negative_size_allocation_error_with_gvl(void *ptr)
  2927. {
  2928. rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
  2929. return 0; /* should not be reached */
  2930. }
  2931. static void
  2932. negative_size_allocation_error(const char *msg)
  2933. {
  2934. if (ruby_thread_has_gvl_p()) {
  2935. rb_raise(rb_eNoMemError, "%s", msg);
  2936. }
  2937. else {
  2938. if (ruby_native_thread_p()) {
  2939. rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
  2940. }
  2941. else {
  2942. fprintf(stderr, "[FATAL] %s\n", msg);
  2943. exit(EXIT_FAILURE);
  2944. }
  2945. }
  2946. }
  2947. static void *
  2948. ruby_memerror_body(void *dummy)
  2949. {
  2950. rb_memerror();
  2951. return 0;
  2952. }
  2953. static void
  2954. ruby_memerror(void)
  2955. {
  2956. if (ruby_thread_has_gvl_p()) {
  2957. rb_memerror();
  2958. }
  2959. else {
  2960. if (ruby_native_thread_p()) {
  2961. rb_thread_call_with_gvl(ruby_memerror_body, 0);
  2962. }
  2963. else {
  2964. /* no ruby thread */
  2965. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2966. exit(EXIT_FAILURE);
  2967. }
  2968. }
  2969. }
  2970. void
  2971. rb_memerror(void)
  2972. {
  2973. rb_thread_t *th = GET_THREAD();
  2974. if (!nomem_error ||
  2975. (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
  2976. fprintf(stderr, "[FATAL] failed to allocate memory\n");
  2977. exit(EXIT_FAILURE);
  2978. }
  2979. if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
  2980. rb_thread_raised_clear(th);
  2981. GET_THREAD()->errinfo = nomem_error;
  2982. JUMP_TAG(TAG_RAISE);
  2983. }
  2984. rb_thread_raised_set(th, RAISED_NOMEMORY);
  2985. rb_exc_raise(nomem_error);
  2986. }
  2987. static void *
  2988. aligned_malloc(size_t alignment, size_t size)
  2989. {
  2990. void *res;
  2991. #if defined __MINGW32__
  2992. res = __mingw_aligned_malloc(size, alignment);
  2993. #elif defined _WIN32 && !defined __CYGWIN__
  2994. res = _aligned_malloc(size, alignment);
  2995. #elif defined(HAVE_POSIX_MEMALIGN)
  2996. if (posix_memalign(&res, alignment, size) == 0) {
  2997. return res;
  2998. }
  2999. else {
  3000. return NULL;
  3001. }
  3002. #elif defined(HAVE_MEMALIGN)
  3003. res = memalign(alignment, size);
  3004. #else
  3005. char* aligned;
  3006. res = malloc(alignment + size + sizeof(void*));
  3007. aligned = (char*)res + alignment + sizeof(void*);
  3008. aligned -= ((VALUE)aligned & (alignment - 1));
  3009. ((void**)aligned)[-1] = res;
  3010. res = (void*)aligned;
  3011. #endif
  3012. #if defined(_DEBUG) || defined(GC_DEBUG)
  3013. /* alignment must be a power of 2 */
  3014. assert((alignment - 1) & alignment == 0);
  3015. assert(alignment % sizeof(void*) == 0);
  3016. #endif
  3017. return res;
  3018. }
  3019. static void
  3020. aligned_free(void *ptr)
  3021. {
  3022. #if defined __MINGW32__
  3023. __mingw_aligned_free(ptr);
  3024. #elif defined _WIN32 && !defined __CYGWIN__
  3025. _aligned_free(ptr);
  3026. #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
  3027. free(ptr);
  3028. #else
  3029. free(((void**)ptr)[-1]);
  3030. #endif
  3031. }
  3032. static inline size_t
  3033. vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
  3034. {
  3035. if ((ssize_t)size < 0) {
  3036. negative_size_allocation_error("negative allocation size (or too big)");
  3037. }
  3038. if (size == 0) size = 1;
  3039. #if CALC_EXACT_MALLOC_SIZE
  3040. size += sizeof(size_t);
  3041. #endif
  3042. if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
  3043. (malloc_increase+size) > malloc_limit) {
  3044. garbage_collect_with_gvl(objspace);
  3045. }
  3046. return size;
  3047. }
  3048. static inline void *
  3049. vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
  3050. {
  3051. ATOMIC_SIZE_ADD(malloc_increase, size);
  3052. #if CALC_EXACT_MALLOC_SIZE
  3053. ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
  3054. ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
  3055. ((size_t *)mem)[0] = size;
  3056. mem = (size_t *)mem + 1;
  3057. #endif
  3058. return mem;
  3059. }
  3060. #define TRY_WITH_GC(alloc) do { \
  3061. if (!(alloc) && \
  3062. (!garbage_collect_with_gvl(objspace) || \
  3063. !(alloc))) { \
  3064. ruby_memerror(); \
  3065. } \
  3066. } while (0)
  3067. static void *
  3068. vm_xmalloc(rb_objspace_t *objspace, size_t size)
  3069. {
  3070. void *mem;
  3071. size = vm_malloc_prepare(objspace, size);
  3072. TRY_WITH_GC(mem = malloc(size));
  3073. return vm_malloc_fixup(objspace, mem, size);
  3074. }
  3075. static void *
  3076. vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
  3077. {
  3078. void *mem;
  3079. #if CALC_EXACT_MALLOC_SIZE
  3080. size_t oldsize;
  3081. #endif
  3082. if ((ssize_t)size < 0) {
  3083. negative_size_allocation_error("negative re-allocation size");
  3084. }
  3085. if (!ptr) return vm_xmalloc(objspace, size);
  3086. /*
  3087. * The behavior of realloc(ptr, 0) is implementation defined.
  3088. * Therefore we don't use realloc(ptr, 0) for portability reason.
  3089. * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
  3090. */
  3091. if (size == 0) {
  3092. vm_xfree(objspace, ptr);
  3093. return 0;
  3094. }
  3095. if (ruby_gc_stress && !ruby_disable_gc_stress)
  3096. garbage_collect_with_gvl(objspace);
  3097. #if CALC_EXACT_MALLOC_SIZE
  3098. size += sizeof(size_t);
  3099. ptr = (size_t *)ptr - 1;
  3100. oldsize = ((size_t *)ptr)[0];
  3101. #endif
  3102. mem = realloc(ptr, size);
  3103. if (!mem) {
  3104. if (garbage_collect_with_gvl(objspace)) {
  3105. mem = realloc(ptr, size);
  3106. }
  3107. if (!mem) {
  3108. ruby_memerror();
  3109. }
  3110. }
  3111. ATOMIC_SIZE_ADD(malloc_increase, size);
  3112. #if CALC_EXACT_MALLOC_SIZE
  3113. ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize);
  3114. ((size_t *)mem)[0] = size;
  3115. mem = (size_t *)mem + 1;
  3116. #endif
  3117. return mem;
  3118. }
  3119. static void
  3120. vm_xfree(rb_objspace_t *objspace, void *ptr)
  3121. {
  3122. #if CALC_EXACT_MALLOC_SIZE
  3123. size_t size;
  3124. ptr = ((size_t *)ptr) - 1;
  3125. size = ((size_t*)ptr)[0];
  3126. if (size) {
  3127. ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size);
  3128. ATOMIC_SIZE_DEC(objspace->malloc_params.allocations);
  3129. }
  3130. #endif
  3131. free(ptr);
  3132. }
  3133. void *
  3134. ruby_xmalloc(size_t size)
  3135. {
  3136. return vm_xmalloc(&rb_objspace, size);
  3137. }
  3138. static inline size_t
  3139. xmalloc2_size(size_t n, size_t size)
  3140. {
  3141. size_t len = size * n;
  3142. if (n != 0 && size != len / n) {
  3143. rb_raise(rb_eArgError, "malloc: possible integer overflow");
  3144. }
  3145. return len;
  3146. }
  3147. void *
  3148. ruby_xmalloc2(size_t n, size_t size)
  3149. {
  3150. return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
  3151. }
  3152. static void *
  3153. vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
  3154. {
  3155. void *mem;
  3156. size_t size;
  3157. size = xmalloc2_size(count, elsize);
  3158. size = vm_malloc_prepare(objspace, size);
  3159. TRY_WITH_GC(mem = calloc(1, size));
  3160. return vm_malloc_fixup(objspace, mem, size);
  3161. }
  3162. void *
  3163. ruby_xcalloc(size_t n, size_t size)
  3164. {
  3165. return vm_xcalloc(&rb_objspace, n, size);
  3166. }
  3167. void *
  3168. ruby_xrealloc(void *ptr, size_t size)
  3169. {
  3170. return vm_xrealloc(&rb_objspace, ptr, size);
  3171. }
  3172. void *
  3173. ruby_xrealloc2(void *ptr, size_t n, size_t size)
  3174. {
  3175. size_t len = size * n;
  3176. if (n != 0 && size != len / n) {
  3177. rb_raise(rb_eArgError, "realloc: possible integer overflow");
  3178. }
  3179. return ruby_xrealloc(ptr, len);
  3180. }
  3181. void
  3182. ruby_xfree(void *x)
  3183. {
  3184. if (x)
  3185. vm_xfree(&rb_objspace, x);
  3186. }
  3187. /* Mimic ruby_xmalloc, but need not rb_objspace.
  3188. * should return pointer suitable for ruby_xfree
  3189. */
  3190. void *
  3191. ruby_mimmalloc(size_t size)
  3192. {
  3193. void *mem;
  3194. #if CALC_EXACT_MALLOC_SIZE
  3195. size += sizeof(size_t);
  3196. #endif
  3197. mem = malloc(size);
  3198. #if CALC_EXACT_MALLOC_SIZE
  3199. /* set 0 for consistency of allocated_size/allocations */
  3200. ((size_t *)mem)[0] = 0;
  3201. mem = (size_t *)mem + 1;
  3202. #endif
  3203. return mem;
  3204. }
  3205. #if CALC_EXACT_MALLOC_SIZE
  3206. /*
  3207. * call-seq:
  3208. * GC.malloc_allocated_size -> Integer
  3209. *
  3210. * Returns the size of memory allocated by malloc().
  3211. *
  3212. * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
  3213. */
  3214. static VALUE
  3215. gc_malloc_allocated_size(VALUE self)
  3216. {
  3217. return UINT2NUM(rb_objspace.malloc_params.allocated_size);
  3218. }
  3219. /*
  3220. * call-seq:
  3221. * GC.malloc_allocations -> Integer
  3222. *
  3223. * Returns the number of malloc() allocations.
  3224. *
  3225. * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
  3226. */
  3227. static VALUE
  3228. gc_malloc_allocations(VALUE self)
  3229. {
  3230. return UINT2NUM(rb_objspace.malloc_params.allocations);
  3231. }
  3232. #endif
  3233. /*
  3234. ------------------------------ WeakMap ------------------------------
  3235. */
  3236. struct weakmap {
  3237. st_table *obj2wmap; /* obj -> [ref,...] */
  3238. st_table *wmap2obj; /* ref -> obj */
  3239. VALUE final;
  3240. };
  3241. static int
  3242. wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
  3243. {
  3244. gc_mark_ptr((rb_objspace_t *)arg, (VALUE)val);
  3245. return ST_CONTINUE;
  3246. }
  3247. static void
  3248. wmap_mark(void *ptr)
  3249. {
  3250. struct weakmap *w = ptr;
  3251. st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
  3252. rb_gc_mark(w->final);
  3253. }
  3254. static int
  3255. wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
  3256. {
  3257. rb_ary_resize((VALUE)val, 0);
  3258. return ST_CONTINUE;
  3259. }
  3260. static void
  3261. wmap_free(void *ptr)
  3262. {
  3263. struct weakmap *w = ptr;
  3264. st_foreach(w->obj2wmap, wmap_free_map, 0);
  3265. st_free_table(w->obj2wmap);
  3266. st_free_table(w->wmap2obj);
  3267. }
  3268. size_t rb_ary_memsize(VALUE ary);
  3269. static int
  3270. wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
  3271. {
  3272. *(size_t *)arg += rb_ary_memsize((VALUE)val);
  3273. return ST_CONTINUE;
  3274. }
  3275. static size_t
  3276. wmap_memsize(const void *ptr)
  3277. {
  3278. size_t size;
  3279. const struct weakmap *w = ptr;
  3280. if (!w) return 0;
  3281. size = sizeof(*w);
  3282. size += st_memsize(w->obj2wmap);
  3283. size += st_memsize(w->wmap2obj);
  3284. st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
  3285. return size;
  3286. }
  3287. static const rb_data_type_t weakmap_type = {
  3288. "weakmap",
  3289. {
  3290. wmap_mark,
  3291. wmap_free,
  3292. wmap_memsize,
  3293. }
  3294. };
  3295. static VALUE
  3296. wmap_allocate(VALUE klass)
  3297. {
  3298. struct weakmap *w;
  3299. VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
  3300. w->obj2wmap = st_init_numtable();
  3301. w->wmap2obj = st_init_numtable();
  3302. w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
  3303. return obj;
  3304. }
  3305. static int
  3306. wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
  3307. {
  3308. VALUE wmap, ary;
  3309. if (!existing) return ST_STOP;
  3310. wmap = (VALUE)arg, ary = (VALUE)*value;
  3311. rb_ary_delete_same(ary, wmap);
  3312. if (!RARRAY_LEN(ary)) return ST_DELETE;
  3313. return ST_CONTINUE;
  3314. }
  3315. static VALUE
  3316. wmap_finalize(VALUE self, VALUE objid)
  3317. {
  3318. st_data_t orig, wmap, data;
  3319. VALUE obj, rids;
  3320. long i;
  3321. struct weakmap *w;
  3322. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3323. /* Get reference from object id. */
  3324. obj = obj_id_to_ref(objid);
  3325. /* obj is original referenced object and/or weak reference. */
  3326. orig = (st_data_t)obj;
  3327. if (st_delete(w->obj2wmap, &orig, &data)) {
  3328. rids = (VALUE)data;
  3329. for (i = 0; i < RARRAY_LEN(rids); ++i) {
  3330. wmap = (st_data_t)RARRAY_PTR(rids)[i];
  3331. st_delete(w->wmap2obj, &wmap, NULL);
  3332. }
  3333. }
  3334. wmap = (st_data_t)obj;
  3335. if (st_delete(w->wmap2obj, &wmap, &orig)) {
  3336. wmap = (st_data_t)obj;
  3337. st_update(w->obj2wmap, orig, wmap_final_func, wmap);
  3338. }
  3339. return self;
  3340. }
  3341. /* Creates a weak reference from the given key to the given value */
  3342. static VALUE
  3343. wmap_aset(VALUE self, VALUE wmap, VALUE orig)
  3344. {
  3345. st_data_t data;
  3346. VALUE rids;
  3347. struct weakmap *w;
  3348. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3349. rb_define_final(orig, w->final);
  3350. rb_define_final(wmap, w->final);
  3351. if (st_lookup(w->obj2wmap, (st_data_t)orig, &data)) {
  3352. rids = (VALUE)data;
  3353. }
  3354. else {
  3355. rids = rb_ary_tmp_new(1);
  3356. st_insert(w->obj2wmap, (st_data_t)orig, (st_data_t)rids);
  3357. }
  3358. rb_ary_push(rids, wmap);
  3359. st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
  3360. return nonspecial_obj_id(orig);
  3361. }
  3362. /* Retrieves a weakly referenced object with the given key */
  3363. static VALUE
  3364. wmap_aref(VALUE self, VALUE wmap)
  3365. {
  3366. st_data_t data;
  3367. VALUE obj;
  3368. struct weakmap *w;
  3369. rb_objspace_t *objspace = &rb_objspace;
  3370. TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
  3371. if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
  3372. obj = (VALUE)data;
  3373. if (!is_id_value(objspace, obj)) return Qnil;
  3374. if (!is_live_object(objspace, obj)) return Qnil;
  3375. return obj;
  3376. }
  3377. /*
  3378. ------------------------------ GC profiler ------------------------------
  3379. */
  3380. static inline void gc_prof_set_heap_info(rb_objspace_t *, gc_profile_record *);
  3381. #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
  3382. static double
  3383. getrusage_time(void)
  3384. {
  3385. #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
  3386. struct timespec ts;
  3387. if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
  3388. return ts.tv_sec + ts.tv_nsec * 1e-9;
  3389. }
  3390. return 0.0;
  3391. #elif defined RUSAGE_SELF
  3392. struct rusage usage;
  3393. struct timeval time;
  3394. getrusage(RUSAGE_SELF, &usage);
  3395. time = usage.ru_utime;
  3396. return time.tv_sec + time.tv_usec * 1e-6;
  3397. #elif defined _WIN32
  3398. FILETIME creation_time, exit_time, kernel_time, user_time;
  3399. ULARGE_INTEGER ui;
  3400. LONG_LONG q;
  3401. double t;
  3402. if (GetProcessTimes(GetCurrentProcess(),
  3403. &creation_time, &exit_time, &kernel_time, &user_time) == 0)
  3404. {
  3405. return 0.0;
  3406. }
  3407. memcpy(&ui, &user_time, sizeof(FILETIME));
  3408. q = ui.QuadPart / 10L;
  3409. t = (DWORD)(q % 1000000L) * 1e-6;
  3410. q /= 1000000L;
  3411. #ifdef __GNUC__
  3412. t += q;
  3413. #else
  3414. t += (double)(DWORD)(q >> 16) * (1 << 16);
  3415. t += (DWORD)q & ~(~0 << 16);
  3416. #endif
  3417. return t;
  3418. #else
  3419. return 0.0;
  3420. #endif
  3421. }
  3422. static inline void
  3423. gc_prof_timer_start(rb_objspace_t *objspace)
  3424. {
  3425. if (objspace->profile.run) {
  3426. size_t count = objspace->profile.count;
  3427. if (!objspace->profile.record) {
  3428. objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
  3429. objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);
  3430. }
  3431. if (count >= objspace->profile.size) {
  3432. objspace->profile.size += 1000;
  3433. objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);
  3434. }
  3435. if (!objspace->profile.record) {
  3436. rb_bug("gc_profile malloc or realloc miss");
  3437. }
  3438. MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);
  3439. objspace->profile.record[count].gc_time = getrusage_time();
  3440. objspace->profile.record[objspace->profile.count].gc_invoke_time =
  3441. objspace->profile.record[count].gc_time - objspace->profile.invoke_time;
  3442. }
  3443. }
  3444. static inline void
  3445. gc_prof_timer_stop(rb_objspace_t *objspace, int marked)
  3446. {
  3447. if (objspace->profile.run) {
  3448. double gc_time = 0;
  3449. size_t count = objspace->profile.count;
  3450. gc_profile_record *record = &objspace->profile.record[count];
  3451. gc_time = getrusage_time() - record->gc_time;
  3452. if (gc_time < 0) gc_time = 0;
  3453. record->gc_time = gc_time;
  3454. record->is_marked = !!(marked);
  3455. gc_prof_set_heap_info(objspace, record);
  3456. objspace->profile.count++;
  3457. }
  3458. }
  3459. #if !GC_PROFILE_MORE_DETAIL
  3460. static inline void
  3461. gc_prof_mark_timer_start(rb_objspace_t *objspace)
  3462. {
  3463. if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
  3464. RUBY_DTRACE_GC_MARK_BEGIN();
  3465. }
  3466. }
  3467. static inline void
  3468. gc_prof_mark_timer_stop(rb_objspace_t *objspace)
  3469. {
  3470. if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
  3471. RUBY_DTRACE_GC_MARK_END();
  3472. }
  3473. }
  3474. static inline void
  3475. gc_prof_sweep_timer_start(rb_objspace_t *objspace)
  3476. {
  3477. if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
  3478. RUBY_DTRACE_GC_SWEEP_BEGIN();
  3479. }
  3480. }
  3481. static inline void
  3482. gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
  3483. {
  3484. if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
  3485. RUBY_DTRACE_GC_SWEEP_END();
  3486. }
  3487. }
  3488. static inline void
  3489. gc_prof_set_malloc_info(rb_objspace_t *objspace)
  3490. {
  3491. }
  3492. static inline void
  3493. gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
  3494. {
  3495. size_t live = objspace_live_num(objspace);
  3496. size_t total = heaps_used * HEAP_OBJ_LIMIT;
  3497. record->heap_total_objects = total;
  3498. record->heap_use_size = live * sizeof(RVALUE);
  3499. record->heap_total_size = total * sizeof(RVALUE);
  3500. }
  3501. #else
  3502. static inline void
  3503. gc_prof_mark_timer_start(rb_objspace_t *objspace)
  3504. {
  3505. if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
  3506. RUBY_DTRACE_GC_MARK_BEGIN();
  3507. }
  3508. if (objspace->profile.run) {
  3509. size_t count = objspace->profile.count;
  3510. objspace->profile.record[count].gc_mark_time = getrusage_time();
  3511. }
  3512. }
  3513. static inline void
  3514. gc_prof_mark_timer_stop(rb_objspace_t *objspace)
  3515. {
  3516. if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
  3517. RUBY_DTRACE_GC_MARK_END();
  3518. }
  3519. if (objspace->profile.run) {
  3520. double mark_time = 0;
  3521. size_t count = objspace->profile.count;
  3522. gc_profile_record *record = &objspace->profile.record[count];
  3523. mark_time = getrusage_time() - record->gc_mark_time;
  3524. if (mark_time < 0) mark_time = 0;
  3525. record->gc_mark_time = mark_time;
  3526. }
  3527. }
  3528. static inline void
  3529. gc_prof_sweep_timer_start(rb_objspace_t *objspace)
  3530. {
  3531. if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
  3532. RUBY_DTRACE_GC_SWEEP_BEGIN();
  3533. }
  3534. if (objspace->profile.run) {
  3535. size_t count = objspace->profile.count;
  3536. objspace->profile.record[count].gc_sweep_time = getrusage_time();
  3537. }
  3538. }
  3539. static inline void
  3540. gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
  3541. {
  3542. if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
  3543. RUBY_DTRACE_GC_SWEEP_END();
  3544. }
  3545. if (objspace->profile.run) {
  3546. double sweep_time = 0;
  3547. size_t count = objspace->profile.count;
  3548. gc_profile_record *record = &objspace->profile.record[count];
  3549. sweep_time = getrusage_time() - record->gc_sweep_time;\
  3550. if (sweep_time < 0) sweep_time = 0;\
  3551. record->gc_sweep_time = sweep_time;
  3552. }
  3553. }
  3554. static inline void
  3555. gc_prof_set_malloc_info(rb_objspace_t *objspace)
  3556. {
  3557. if (objspace->profile.run) {
  3558. gc_profile_record *record = &objspace->profile.record[objspace->profile.count];
  3559. if (record) {
  3560. record->allocate_increase = malloc_increase;
  3561. record->allocate_limit = malloc_limit;
  3562. }
  3563. }
  3564. }
  3565. static inline void
  3566. gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
  3567. {
  3568. size_t live = objspace->heap.live_num;
  3569. size_t total = heaps_used * HEAP_OBJ_LIMIT;
  3570. record->heap_use_slots = heaps_used;
  3571. record->heap_live_objects = live;
  3572. record->heap_free_objects = total - live;
  3573. record->heap_total_objects = total;
  3574. record->have_finalize = deferred_final_list ? Qtrue : Qfalse;
  3575. record->heap_use_size = live * sizeof(RVALUE);
  3576. record->heap_total_size = total * sizeof(RVALUE);
  3577. }
  3578. #endif /* !GC_PROFILE_MORE_DETAIL */
  3579. /*
  3580. * call-seq:
  3581. * GC::Profiler.clear -> nil
  3582. *
  3583. * Clears the GC profiler data.
  3584. *
  3585. */
  3586. static VALUE
  3587. gc_profile_clear(void)
  3588. {
  3589. rb_objspace_t *objspace = &rb_objspace;
  3590. if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
  3591. objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2;
  3592. objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);
  3593. if (!objspace->profile.record) {
  3594. rb_memerror();
  3595. }
  3596. }
  3597. MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
  3598. objspace->profile.count = 0;
  3599. return Qnil;
  3600. }
  3601. /*
  3602. * call-seq:
  3603. * GC::Profiler.raw_data -> [Hash, ...]
  3604. *
  3605. * Returns an Array of individual raw profile data Hashes ordered
  3606. * from earliest to latest by +:GC_INVOKE_TIME+.
  3607. *
  3608. * For example:
  3609. *
  3610. * [
  3611. * {
  3612. * :GC_TIME=>1.3000000000000858e-05,
  3613. * :GC_INVOKE_TIME=>0.010634999999999999,
  3614. * :HEAP_USE_SIZE=>289640,
  3615. * :HEAP_TOTAL_SIZE=>588960,
  3616. * :HEAP_TOTAL_OBJECTS=>14724,
  3617. * :GC_IS_MARKED=>false
  3618. * },
  3619. * # ...
  3620. * ]
  3621. *
  3622. * The keys mean:
  3623. *
  3624. * +:GC_TIME+::
  3625. * Time elapsed in seconds for this GC run
  3626. * +:GC_INVOKE_TIME+::
  3627. * Time elapsed in seconds from startup to when the GC was invoked
  3628. * +:HEAP_USE_SIZE+::
  3629. * Total bytes of heap used
  3630. * +:HEAP_TOTAL_SIZE+::
  3631. * Total size of heap in bytes
  3632. * +:HEAP_TOTAL_OBJECTS+::
  3633. * Total number of objects
  3634. * +:GC_IS_MARKED+::
  3635. * Returns +true+ if the GC is in mark phase
  3636. *
  3637. * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
  3638. * to the following hash keys:
  3639. *
  3640. * +:GC_MARK_TIME+::
  3641. * +:GC_SWEEP_TIME+::
  3642. * +:ALLOCATE_INCREASE+::
  3643. * +:ALLOCATE_LIMIT+::
  3644. * +:HEAP_USE_SLOTS+::
  3645. * +:HEAP_LIVE_OBJECTS+::
  3646. * +:HEAP_FREE_OBJECTS+::
  3647. * +:HAVE_FINALIZE+::
  3648. *
  3649. */
  3650. static VALUE
  3651. gc_profile_record_get(void)
  3652. {
  3653. VALUE prof;
  3654. VALUE gc_profile = rb_ary_new();
  3655. size_t i;
  3656. rb_objspace_t *objspace = (&rb_objspace);
  3657. if (!objspace->profile.run) {
  3658. return Qnil;
  3659. }
  3660. for (i =0; i < objspace->profile.count; i++) {
  3661. prof = rb_hash_new();
  3662. rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
  3663. rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
  3664. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
  3665. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
  3666. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
  3667. rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
  3668. #if GC_PROFILE_MORE_DETAIL
  3669. rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
  3670. rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
  3671. rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
  3672. rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
  3673. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
  3674. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
  3675. rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
  3676. rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
  3677. #endif
  3678. rb_ary_push(gc_profile, prof);
  3679. }
  3680. return gc_profile;
  3681. }
  3682. static void
  3683. gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
  3684. {
  3685. rb_objspace_t *objspace = &rb_objspace;
  3686. size_t count = objspace->profile.count;
  3687. if (objspace->profile.run && count) {
  3688. int index = 1;
  3689. size_t i;
  3690. gc_profile_record r;
  3691. append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->count));
  3692. append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
  3693. for (i = 0; i < count; i++) {
  3694. r = objspace->profile.record[i];
  3695. #if !GC_PROFILE_MORE_DETAIL
  3696. if (r.is_marked) {
  3697. #endif
  3698. append(out, rb_sprintf("%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
  3699. index++, r.gc_invoke_time, r.heap_use_size,
  3700. r.heap_total_size, r.heap_total_objects, r.gc_time*1000));
  3701. #if !GC_PROFILE_MORE_DETAIL
  3702. }
  3703. #endif
  3704. }
  3705. #if GC_PROFILE_MORE_DETAIL
  3706. append(out, rb_str_new_cstr("\n\n" \
  3707. "More detail.\n" \
  3708. "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n"));
  3709. index = 1;
  3710. for (i = 0; i < count; i++) {
  3711. r = objspace->profile.record[i];
  3712. append(out, rb_sprintf("%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
  3713. index++, r.allocate_increase, r.allocate_limit,
  3714. r.heap_use_slots, (r.have_finalize ? "true" : "false"),
  3715. r.gc_mark_time*1000, r.gc_sweep_time*1000));
  3716. }
  3717. #endif
  3718. }
  3719. }
  3720. /*
  3721. * call-seq:
  3722. * GC::Profiler.result -> String
  3723. *
  3724. * Returns a profile data report such as:
  3725. *
  3726. * GC 1 invokes.
  3727. * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
  3728. * 1 0.012 159240 212940 10647 0.00000000000001530000
  3729. */
  3730. static VALUE
  3731. gc_profile_result(void)
  3732. {
  3733. VALUE str = rb_str_buf_new(0);
  3734. gc_profile_dump_on(str, rb_str_buf_append);
  3735. return str;
  3736. }
  3737. /*
  3738. * call-seq:
  3739. * GC::Profiler.report
  3740. * GC::Profiler.report(io)
  3741. *
  3742. * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
  3743. *
  3744. */
  3745. static VALUE
  3746. gc_profile_report(int argc, VALUE *argv, VALUE self)
  3747. {
  3748. VALUE out;
  3749. if (argc == 0) {
  3750. out = rb_stdout;
  3751. }
  3752. else {
  3753. rb_scan_args(argc, argv, "01", &out);
  3754. }
  3755. gc_profile_dump_on(out, rb_io_write);
  3756. return Qnil;
  3757. }
  3758. /*
  3759. * call-seq:
  3760. * GC::Profiler.total_time -> float
  3761. *
  3762. * The total time used for garbage collection in seconds
  3763. */
  3764. static VALUE
  3765. gc_profile_total_time(VALUE self)
  3766. {
  3767. double time = 0;
  3768. rb_objspace_t *objspace = &rb_objspace;
  3769. size_t i;
  3770. if (objspace->profile.run && objspace->profile.count) {
  3771. for (i = 0; i < objspace->profile.count; i++) {
  3772. time += objspace->profile.record[i].gc_time;
  3773. }
  3774. }
  3775. return DBL2NUM(time);
  3776. }
  3777. /*
  3778. * call-seq:
  3779. * GC::Profiler.enabled? -> true or false
  3780. *
  3781. * The current status of GC profile mode.
  3782. */
  3783. static VALUE
  3784. gc_profile_enable_get(VALUE self)
  3785. {
  3786. rb_objspace_t *objspace = &rb_objspace;
  3787. return objspace->profile.run ? Qtrue : Qfalse;
  3788. }
  3789. /*
  3790. * call-seq:
  3791. * GC::Profiler.enable -> nil
  3792. *
  3793. * Starts the GC profiler.
  3794. *
  3795. */
  3796. static VALUE
  3797. gc_profile_enable(void)
  3798. {
  3799. rb_objspace_t *objspace = &rb_objspace;
  3800. objspace->profile.run = TRUE;
  3801. return Qnil;
  3802. }
  3803. /*
  3804. * call-seq:
  3805. * GC::Profiler.disable -> nil
  3806. *
  3807. * Stops the GC profiler.
  3808. *
  3809. */
  3810. static VALUE
  3811. gc_profile_disable(void)
  3812. {
  3813. rb_objspace_t *objspace = &rb_objspace;
  3814. objspace->profile.run = FALSE;
  3815. return Qnil;
  3816. }
  3817. #ifdef GC_DEBUG
  3818. /*
  3819. ------------------------------ DEBUG ------------------------------
  3820. */
  3821. void
  3822. rb_gcdebug_print_obj_condition(VALUE obj)
  3823. {
  3824. rb_objspace_t *objspace = &rb_objspace;
  3825. if (is_pointer_to_heap(objspace, (void *)obj)) {
  3826. fprintf(stderr, "pointer to heap?: true\n");
  3827. }
  3828. else {
  3829. fprintf(stderr, "pointer to heap?: false\n");
  3830. return;
  3831. }
  3832. fprintf(stderr, "marked?: %s\n",
  3833. MARKED_IN_BITMAP(GET_HEAP_BITMAP(obj), obj) ? "true" : "false");
  3834. if (is_lazy_sweeping(objspace)) {
  3835. fprintf(stderr, "lazy sweeping?: true\n");
  3836. fprintf(stderr, "swept?: %s\n",
  3837. is_swept_object(objspace, obj) ? "done" : "not yet");
  3838. }
  3839. else {
  3840. fprintf(stderr, "lazy sweeping?: false\n");
  3841. }
  3842. }
  3843. static VALUE
  3844. gcdebug_sential(VALUE obj, VALUE name)
  3845. {
  3846. fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
  3847. return Qnil;
  3848. }
  3849. void
  3850. rb_gcdebug_sentinel(VALUE obj, const char *name)
  3851. {
  3852. rb_define_final(obj, rb_proc_new(gcdebug_sential, (VALUE)name));
  3853. }
  3854. #endif /* GC_DEBUG */
  3855. /*
  3856. * Document-class: ObjectSpace
  3857. *
  3858. * The ObjectSpace module contains a number of routines
  3859. * that interact with the garbage collection facility and allow you to
  3860. * traverse all living objects with an iterator.
  3861. *
  3862. * ObjectSpace also provides support for object finalizers, procs that will be
  3863. * called when a specific object is about to be destroyed by garbage
  3864. * collection.
  3865. *
  3866. * include ObjectSpace
  3867. *
  3868. * a = "A"
  3869. * b = "B"
  3870. * c = "C"
  3871. *
  3872. * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
  3873. * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
  3874. * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
  3875. *
  3876. * _produces:_
  3877. *
  3878. * Finalizer three on 537763470
  3879. * Finalizer one on 537763480
  3880. * Finalizer two on 537763480
  3881. *
  3882. */
  3883. /*
  3884. * Document-class: ObjectSpace::WeakMap
  3885. *
  3886. * An ObjectSpace::WeakMap object holds references to
  3887. * any objects, but those objects can get be garbage collected.
  3888. *
  3889. * This class is mostly used internally by WeakRef, please use
  3890. * +lib/weakref.rb+ for the public interface.
  3891. */
  3892. /* Document-class: GC::Profiler
  3893. *
  3894. * The GC profiler provides access to information on GC runs including time,
  3895. * length and object space size.
  3896. *
  3897. * Example:
  3898. *
  3899. * GC::Profiler.enable
  3900. *
  3901. * require 'rdoc/rdoc'
  3902. *
  3903. * GC::Profiler.report
  3904. *
  3905. * GC::Profiler.disable
  3906. *
  3907. * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
  3908. */
  3909. /*
  3910. * The GC module provides an interface to Ruby's mark and
  3911. * sweep garbage collection mechanism.
  3912. *
  3913. * Some of the underlying methods are also available via the ObjectSpace
  3914. * module.
  3915. *
  3916. * You may obtain information about the operation of the GC through
  3917. * GC::Profiler.
  3918. */
  3919. void
  3920. Init_GC(void)
  3921. {
  3922. VALUE rb_mObSpace;
  3923. VALUE rb_mProfiler;
  3924. rb_mGC = rb_define_module("GC");
  3925. rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
  3926. rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
  3927. rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
  3928. rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
  3929. rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
  3930. rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
  3931. rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
  3932. rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
  3933. rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
  3934. rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
  3935. rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
  3936. rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
  3937. rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
  3938. rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
  3939. rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
  3940. rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
  3941. rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
  3942. rb_mObSpace = rb_define_module("ObjectSpace");
  3943. rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
  3944. rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
  3945. rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
  3946. rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
  3947. rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
  3948. nomem_error = rb_exc_new3(rb_eNoMemError,
  3949. rb_obj_freeze(rb_str_new2("failed to allocate memory")));
  3950. OBJ_TAINT(nomem_error);
  3951. OBJ_FREEZE(nomem_error);
  3952. rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
  3953. rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
  3954. rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
  3955. {
  3956. VALUE rb_cWeakMap = rb_define_class_under(rb_mObSpace, "WeakMap", rb_cObject);
  3957. rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
  3958. rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
  3959. rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
  3960. rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
  3961. }
  3962. #if CALC_EXACT_MALLOC_SIZE
  3963. rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
  3964. rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
  3965. #endif
  3966. }