PageRenderTime 67ms CodeModel.GetById 8ms RepoModel.GetById 1ms app.codeStats 1ms

/gc.c

https://github.com/vuxuandung/ruby
C | 4537 lines | 3457 code | 567 blank | 513 comment | 513 complexity | 7fcc6382a1bf83076268a0cad70800a6 MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause, AGPL-3.0, 0BSD

Large files files are truncated, but you can click here to view the full file

  1. /**********************************************************************
  2. gc.c -
  3. $Author$
  4. created at: Tue Oct 5 09:44:46 JST 1993
  5. Copyright (C) 1993-2007 Yukihiro Matsumoto
  6. Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
  7. Copyright (C) 2000 Information-technology Promotion Agency, Japan
  8. **********************************************************************/
  9. #include "ruby/ruby.h"
  10. #include "ruby/st.h"
  11. #include "ruby/re.h"
  12. #include "ruby/io.h"
  13. #include "ruby/thread.h"
  14. #include "ruby/util.h"
  15. #include "eval_intern.h"
  16. #include "vm_core.h"
  17. #include "internal.h"
  18. #include "gc.h"
  19. #include "constant.h"
  20. #include "ruby_atomic.h"
  21. #include "probes.h"
  22. #include <stdio.h>
  23. #include <setjmp.h>
  24. #include <sys/types.h>
  25. #include <assert.h>
  26. #ifdef HAVE_SYS_TIME_H
  27. #include <sys/time.h>
  28. #endif
  29. #ifdef HAVE_SYS_RESOURCE_H
  30. #include <sys/resource.h>
  31. #endif
  32. #if defined(__native_client__) && defined(NACL_NEWLIB)
  33. # include "nacl/resource.h"
  34. # undef HAVE_POSIX_MEMALIGN
  35. # undef HAVE_MEMALIGN
  36. #endif
  37. #if defined _WIN32 || defined __CYGWIN__
  38. #include <windows.h>
  39. #elif defined(HAVE_POSIX_MEMALIGN)
  40. #elif defined(HAVE_MEMALIGN)
  41. #include <malloc.h>
  42. #endif
  43. #ifdef HAVE_VALGRIND_MEMCHECK_H
  44. # include <valgrind/memcheck.h>
  45. # ifndef VALGRIND_MAKE_MEM_DEFINED
  46. # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
  47. # endif
  48. # ifndef VALGRIND_MAKE_MEM_UNDEFINED
  49. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
  50. # endif
  51. #else
  52. # define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
  53. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
  54. #endif
  55. #define rb_setjmp(env) RUBY_SETJMP(env)
  56. #define rb_jmp_buf rb_jmpbuf_t
  57. #ifndef GC_MALLOC_LIMIT
  58. #define GC_MALLOC_LIMIT 8000000
  59. #endif
  60. #define HEAP_MIN_SLOTS 10000
  61. #define FREE_MIN 4096
  62. typedef struct {
  63. unsigned int initial_malloc_limit;
  64. unsigned int initial_heap_min_slots;
  65. unsigned int initial_free_min;
  66. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  67. int gc_stress;
  68. #endif
  69. } ruby_gc_params_t;
  70. static ruby_gc_params_t initial_params = {
  71. GC_MALLOC_LIMIT,
  72. HEAP_MIN_SLOTS,
  73. FREE_MIN,
  74. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  75. FALSE,
  76. #endif
  77. };
  78. #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
  79. #ifndef GC_PROFILE_MORE_DETAIL
  80. #define GC_PROFILE_MORE_DETAIL 0
  81. #endif
  82. typedef struct gc_profile_record {
  83. double gc_time;
  84. double gc_invoke_time;
  85. size_t heap_total_objects;
  86. size_t heap_use_size;
  87. size_t heap_total_size;
  88. int is_marked;
  89. #if GC_PROFILE_MORE_DETAIL
  90. double gc_mark_time;
  91. double gc_sweep_time;
  92. size_t heap_use_slots;
  93. size_t heap_live_objects;
  94. size_t heap_free_objects;
  95. int have_finalize;
  96. size_t allocate_increase;
  97. size_t allocate_limit;
  98. #endif
  99. } gc_profile_record;
  100. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  101. #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
  102. #endif
  103. typedef struct RVALUE {
  104. union {
  105. struct {
  106. VALUE flags; /* always 0 for freed obj */
  107. struct RVALUE *next;
  108. } free;
  109. struct RBasic basic;
  110. struct RObject object;
  111. struct RClass klass;
  112. struct RFloat flonum;
  113. struct RString string;
  114. struct RArray array;
  115. struct RRegexp regexp;
  116. struct RHash hash;
  117. struct RData data;
  118. struct RTypedData typeddata;
  119. struct RStruct rstruct;
  120. struct RBignum bignum;
  121. struct RFile file;
  122. struct RNode node;
  123. struct RMatch match;
  124. struct RRational rational;
  125. struct RComplex complex;
  126. } as;
  127. #ifdef GC_DEBUG
  128. const char *file;
  129. int line;
  130. #endif
  131. } RVALUE;
  132. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  133. #pragma pack(pop)
  134. #endif
  135. struct heaps_slot {
  136. struct heaps_header *header;
  137. uintptr_t *bits;
  138. RVALUE *freelist;
  139. struct heaps_slot *next;
  140. struct heaps_slot *prev;
  141. struct heaps_slot *free_next;
  142. };
  143. struct heaps_header {
  144. struct heaps_slot *base;
  145. uintptr_t *bits;
  146. RVALUE *start;
  147. RVALUE *end;
  148. size_t limit;
  149. };
  150. struct heaps_free_bitmap {
  151. struct heaps_free_bitmap *next;
  152. };
  153. struct gc_list {
  154. VALUE *varptr;
  155. struct gc_list *next;
  156. };
  157. #define STACK_CHUNK_SIZE 500
  158. typedef struct stack_chunk {
  159. VALUE data[STACK_CHUNK_SIZE];
  160. struct stack_chunk *next;
  161. } stack_chunk_t;
  162. typedef struct mark_stack {
  163. stack_chunk_t *chunk;
  164. stack_chunk_t *cache;
  165. size_t index;
  166. size_t limit;
  167. size_t cache_size;
  168. size_t unused_cache_size;
  169. } mark_stack_t;
  170. #ifndef CALC_EXACT_MALLOC_SIZE
  171. #define CALC_EXACT_MALLOC_SIZE 0
  172. #endif
  173. typedef struct rb_objspace {
  174. struct {
  175. size_t limit;
  176. size_t increase;
  177. #if CALC_EXACT_MALLOC_SIZE
  178. size_t allocated_size;
  179. size_t allocations;
  180. #endif
  181. } malloc_params;
  182. struct {
  183. size_t increment;
  184. struct heaps_slot *ptr;
  185. struct heaps_slot *sweep_slots;
  186. struct heaps_slot *free_slots;
  187. struct heaps_header **sorted;
  188. size_t length;
  189. size_t used;
  190. struct heaps_free_bitmap *free_bitmap;
  191. RVALUE *range[2];
  192. struct heaps_header *freed;
  193. size_t free_num;
  194. size_t free_min;
  195. size_t final_num;
  196. size_t do_heap_free;
  197. } heap;
  198. struct {
  199. int dont_gc;
  200. int dont_lazy_sweep;
  201. int during_gc;
  202. rb_atomic_t finalizing;
  203. } flags;
  204. struct {
  205. st_table *table;
  206. RVALUE *deferred;
  207. } final;
  208. mark_stack_t mark_stack;
  209. struct {
  210. int run;
  211. gc_profile_record *record;
  212. size_t count;
  213. size_t size;
  214. double invoke_time;
  215. } profile;
  216. struct gc_list *global_list;
  217. size_t count;
  218. size_t total_allocated_object_num;
  219. size_t total_freed_object_num;
  220. int gc_stress;
  221. struct mark_func_data_struct {
  222. void *data;
  223. void (*mark_func)(VALUE v, void *data);
  224. } *mark_func_data;
  225. } rb_objspace_t;
  226. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  227. #define rb_objspace (*GET_VM()->objspace)
  228. #define ruby_initial_gc_stress initial_params.gc_stress
  229. int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
  230. #else
  231. static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
  232. int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
  233. #endif
  234. #define malloc_limit objspace->malloc_params.limit
  235. #define malloc_increase objspace->malloc_params.increase
  236. #define heaps objspace->heap.ptr
  237. #define heaps_length objspace->heap.length
  238. #define heaps_used objspace->heap.used
  239. #define lomem objspace->heap.range[0]
  240. #define himem objspace->heap.range[1]
  241. #define heaps_inc objspace->heap.increment
  242. #define heaps_freed objspace->heap.freed
  243. #define dont_gc objspace->flags.dont_gc
  244. #define during_gc objspace->flags.during_gc
  245. #define finalizing objspace->flags.finalizing
  246. #define finalizer_table objspace->final.table
  247. #define deferred_final_list objspace->final.deferred
  248. #define global_List objspace->global_list
  249. #define ruby_gc_stress objspace->gc_stress
  250. #define initial_malloc_limit initial_params.initial_malloc_limit
  251. #define initial_heap_min_slots initial_params.initial_heap_min_slots
  252. #define initial_free_min initial_params.initial_free_min
  253. #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
  254. #if SIZEOF_LONG == SIZEOF_VOIDP
  255. # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
  256. # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
  257. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  258. # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
  259. # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
  260. ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
  261. #else
  262. # error not supported
  263. #endif
  264. #define RANY(o) ((RVALUE*)(o))
  265. #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
  266. #define HEAP_HEADER(p) ((struct heaps_header *)(p))
  267. #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
  268. #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
  269. #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
  270. #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
  271. #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
  272. #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
  273. #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
  274. #ifndef HEAP_ALIGN_LOG
  275. /* default tiny heap size: 16KB */
  276. #define HEAP_ALIGN_LOG 14
  277. #endif
  278. #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
  279. enum {
  280. HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
  281. HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
  282. REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
  283. HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
  284. HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
  285. HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t) * CHAR_BIT)
  286. };
  287. int ruby_gc_debug_indent = 0;
  288. VALUE rb_mGC;
  289. extern st_table *rb_class_tbl;
  290. int ruby_disable_gc_stress = 0;
  291. static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
  292. static VALUE define_final0(VALUE obj, VALUE block);
  293. VALUE rb_define_final(VALUE obj, VALUE block);
  294. VALUE rb_undefine_final(VALUE obj);
  295. static void run_final(rb_objspace_t *objspace, VALUE obj);
  296. static void initial_expand_heap(rb_objspace_t *objspace);
  297. static void negative_size_allocation_error(const char *);
  298. static void *aligned_malloc(size_t, size_t);
  299. static void aligned_free(void *);
  300. static void init_mark_stack(mark_stack_t *stack);
  301. static VALUE lazy_sweep_enable(void);
  302. static int garbage_collect(rb_objspace_t *);
  303. static int gc_prepare_free_objects(rb_objspace_t *);
  304. static void mark_tbl(rb_objspace_t *, st_table *);
  305. static void rest_sweep(rb_objspace_t *);
  306. static void gc_mark_stacked_objects(rb_objspace_t *);
  307. static double getrusage_time(void);
  308. static inline void gc_prof_timer_start(rb_objspace_t *);
  309. static inline void gc_prof_timer_stop(rb_objspace_t *, int);
  310. static inline void gc_prof_mark_timer_start(rb_objspace_t *);
  311. static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
  312. static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
  313. static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
  314. static inline void gc_prof_set_malloc_info(rb_objspace_t *);
  315. /*
  316. --------------------------- ObjectSpace -----------------------------
  317. */
  318. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  319. rb_objspace_t *
  320. rb_objspace_alloc(void)
  321. {
  322. rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
  323. memset(objspace, 0, sizeof(*objspace));
  324. malloc_limit = initial_malloc_limit;
  325. ruby_gc_stress = ruby_initial_gc_stress;
  326. return objspace;
  327. }
  328. #endif
  329. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  330. static void free_stack_chunks(mark_stack_t *);
  331. void
  332. rb_objspace_free(rb_objspace_t *objspace)
  333. {
  334. rest_sweep(objspace);
  335. if (objspace->profile.record) {
  336. free(objspace->profile.record);
  337. objspace->profile.record = 0;
  338. }
  339. if (global_List) {
  340. struct gc_list *list, *next;
  341. for (list = global_List; list; list = next) {
  342. next = list->next;
  343. xfree(list);
  344. }
  345. }
  346. if (objspace->heap.free_bitmap) {
  347. struct heaps_free_bitmap *list, *next;
  348. for (list = objspace->heap.free_bitmap; list; list = next) {
  349. next = list->next;
  350. free(list);
  351. }
  352. }
  353. if (objspace->heap.sorted) {
  354. size_t i;
  355. for (i = 0; i < heaps_used; ++i) {
  356. free(objspace->heap.sorted[i]->bits);
  357. aligned_free(objspace->heap.sorted[i]);
  358. }
  359. free(objspace->heap.sorted);
  360. heaps_used = 0;
  361. heaps = 0;
  362. }
  363. free_stack_chunks(&objspace->mark_stack);
  364. free(objspace);
  365. }
  366. #endif
  367. void
  368. rb_global_variable(VALUE *var)
  369. {
  370. rb_gc_register_address(var);
  371. }
  372. static void
  373. allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
  374. {
  375. struct heaps_header **p;
  376. struct heaps_free_bitmap *bits;
  377. size_t size, add, i;
  378. size = next_heaps_length*sizeof(struct heaps_header *);
  379. add = next_heaps_length - heaps_used;
  380. if (heaps_used > 0) {
  381. p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
  382. if (p) objspace->heap.sorted = p;
  383. }
  384. else {
  385. p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
  386. }
  387. if (p == 0) {
  388. during_gc = 0;
  389. rb_memerror();
  390. }
  391. for (i = 0; i < add; i++) {
  392. bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  393. if (bits == 0) {
  394. during_gc = 0;
  395. rb_memerror();
  396. return;
  397. }
  398. bits->next = objspace->heap.free_bitmap;
  399. objspace->heap.free_bitmap = bits;
  400. }
  401. }
  402. static void
  403. link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  404. {
  405. slot->free_next = objspace->heap.free_slots;
  406. objspace->heap.free_slots = slot;
  407. }
  408. static void
  409. unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  410. {
  411. objspace->heap.free_slots = slot->free_next;
  412. slot->free_next = NULL;
  413. }
  414. static void
  415. assign_heap_slot(rb_objspace_t *objspace)
  416. {
  417. RVALUE *p, *pend, *membase;
  418. struct heaps_slot *slot;
  419. size_t hi, lo, mid;
  420. size_t objs;
  421. objs = HEAP_OBJ_LIMIT;
  422. p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
  423. if (p == 0) {
  424. during_gc = 0;
  425. rb_memerror();
  426. }
  427. slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
  428. if (slot == 0) {
  429. aligned_free(p);
  430. during_gc = 0;
  431. rb_memerror();
  432. }
  433. MEMZERO((void*)slot, struct heaps_slot, 1);
  434. slot->next = heaps;
  435. if (heaps) heaps->prev = slot;
  436. heaps = slot;
  437. membase = p;
  438. p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
  439. if ((VALUE)p % sizeof(RVALUE) != 0) {
  440. p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
  441. objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
  442. }
  443. lo = 0;
  444. hi = heaps_used;
  445. while (lo < hi) {
  446. register RVALUE *mid_membase;
  447. mid = (lo + hi) / 2;
  448. mid_membase = (RVALUE *)objspace->heap.sorted[mid];
  449. if (mid_membase < membase) {
  450. lo = mid + 1;
  451. }
  452. else if (mid_membase > membase) {
  453. hi = mid;
  454. }
  455. else {
  456. rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
  457. }
  458. }
  459. if (hi < heaps_used) {
  460. MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
  461. }
  462. heaps->header = (struct heaps_header *)membase;
  463. objspace->heap.sorted[hi] = heaps->header;
  464. objspace->heap.sorted[hi]->start = p;
  465. objspace->heap.sorted[hi]->end = (p + objs);
  466. objspace->heap.sorted[hi]->base = heaps;
  467. objspace->heap.sorted[hi]->limit = objs;
  468. assert(objspace->heap.free_bitmap != NULL);
  469. heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
  470. objspace->heap.sorted[hi]->bits = (uintptr_t *)objspace->heap.free_bitmap;
  471. objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
  472. memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  473. pend = p + objs;
  474. if (lomem == 0 || lomem > p) lomem = p;
  475. if (himem < pend) himem = pend;
  476. heaps_used++;
  477. while (p < pend) {
  478. p->as.free.flags = 0;
  479. p->as.free.next = heaps->freelist;
  480. heaps->freelist = p;
  481. p++;
  482. }
  483. link_free_heap_slot(objspace, heaps);
  484. }
  485. static void
  486. add_heap_slots(rb_objspace_t *objspace, size_t add)
  487. {
  488. size_t i;
  489. size_t next_heaps_length;
  490. next_heaps_length = heaps_used + add;
  491. if (next_heaps_length > heaps_length) {
  492. allocate_sorted_heaps(objspace, next_heaps_length);
  493. heaps_length = next_heaps_length;
  494. }
  495. for (i = 0; i < add; i++) {
  496. assign_heap_slot(objspace);
  497. }
  498. heaps_inc = 0;
  499. }
  500. static void
  501. init_heap(rb_objspace_t *objspace)
  502. {
  503. add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
  504. init_mark_stack(&objspace->mark_stack);
  505. #ifdef USE_SIGALTSTACK
  506. {
  507. /* altstack of another threads are allocated in another place */
  508. rb_thread_t *th = GET_THREAD();
  509. void *tmp = th->altstack;
  510. th->altstack = malloc(rb_sigaltstack_size());
  511. free(tmp); /* free previously allocated area */
  512. }
  513. #endif
  514. objspace->profile.invoke_time = getrusage_time();
  515. finalizer_table = st_init_numtable();
  516. }
  517. static void
  518. initial_expand_heap(rb_objspace_t *objspace)
  519. {
  520. size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
  521. if (min_size > heaps_used) {
  522. add_heap_slots(objspace, min_size - heaps_used);
  523. }
  524. }
  525. static void
  526. set_heaps_increment(rb_objspace_t *objspace)
  527. {
  528. size_t next_heaps_length = (size_t)(heaps_used * 1.8);
  529. if (next_heaps_length == heaps_used) {
  530. next_heaps_length++;
  531. }
  532. heaps_inc = next_heaps_length - heaps_used;
  533. if (next_heaps_length > heaps_length) {
  534. allocate_sorted_heaps(objspace, next_heaps_length);
  535. heaps_length = next_heaps_length;
  536. }
  537. }
  538. static int
  539. heaps_increment(rb_objspace_t *objspace)
  540. {
  541. if (heaps_inc > 0) {
  542. assign_heap_slot(objspace);
  543. heaps_inc--;
  544. return TRUE;
  545. }
  546. return FALSE;
  547. }
  548. static VALUE
  549. newobj(VALUE klass, VALUE flags)
  550. {
  551. rb_objspace_t *objspace = &rb_objspace;
  552. VALUE obj;
  553. if (UNLIKELY(during_gc)) {
  554. dont_gc = 1;
  555. during_gc = 0;
  556. rb_bug("object allocation during garbage collection phase");
  557. }
  558. if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
  559. if (!garbage_collect(objspace)) {
  560. during_gc = 0;
  561. rb_memerror();
  562. }
  563. }
  564. if (UNLIKELY(!has_free_object)) {
  565. if (!gc_prepare_free_objects(objspace)) {
  566. during_gc = 0;
  567. rb_memerror();
  568. }
  569. }
  570. obj = (VALUE)objspace->heap.free_slots->freelist;
  571. objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
  572. if (objspace->heap.free_slots->freelist == NULL) {
  573. unlink_free_heap_slot(objspace, objspace->heap.free_slots);
  574. }
  575. MEMZERO((void*)obj, RVALUE, 1);
  576. #ifdef GC_DEBUG
  577. RANY(obj)->file = rb_sourcefile();
  578. RANY(obj)->line = rb_sourceline();
  579. #endif
  580. objspace->total_allocated_object_num++;
  581. return obj;
  582. }
  583. VALUE
  584. rb_newobj(void)
  585. {
  586. return newobj(0, T_NONE);
  587. }
  588. VALUE
  589. rb_newobj_of(VALUE klass, VALUE flags)
  590. {
  591. VALUE obj;
  592. obj = newobj(klass, flags);
  593. OBJSETUP(obj, klass, flags);
  594. return obj;
  595. }
  596. NODE*
  597. rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
  598. {
  599. NODE *n = (NODE*)rb_newobj();
  600. n->flags |= T_NODE;
  601. nd_set_type(n, type);
  602. n->u1.value = a0;
  603. n->u2.value = a1;
  604. n->u3.value = a2;
  605. return n;
  606. }
  607. VALUE
  608. rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
  609. {
  610. NEWOBJ(data, struct RData);
  611. if (klass) Check_Type(klass, T_CLASS);
  612. OBJSETUP(data, klass, T_DATA);
  613. data->data = datap;
  614. data->dfree = dfree;
  615. data->dmark = dmark;
  616. return (VALUE)data;
  617. }
  618. VALUE
  619. rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
  620. {
  621. NEWOBJ(data, struct RTypedData);
  622. if (klass) Check_Type(klass, T_CLASS);
  623. OBJSETUP(data, klass, T_DATA);
  624. data->data = datap;
  625. data->typed_flag = 1;
  626. data->type = type;
  627. return (VALUE)data;
  628. }
  629. size_t
  630. rb_objspace_data_type_memsize(VALUE obj)
  631. {
  632. if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
  633. return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
  634. }
  635. else {
  636. return 0;
  637. }
  638. }
  639. const char *
  640. rb_objspace_data_type_name(VALUE obj)
  641. {
  642. if (RTYPEDDATA_P(obj)) {
  643. return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
  644. }
  645. else {
  646. return 0;
  647. }
  648. }
  649. static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
  650. static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
  651. static inline int
  652. is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
  653. {
  654. register RVALUE *p = RANY(ptr);
  655. register struct heaps_header *heap;
  656. register size_t hi, lo, mid;
  657. if (p < lomem || p > himem) return FALSE;
  658. if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
  659. /* check if p looks like a pointer using bsearch*/
  660. lo = 0;
  661. hi = heaps_used;
  662. while (lo < hi) {
  663. mid = (lo + hi) / 2;
  664. heap = objspace->heap.sorted[mid];
  665. if (heap->start <= p) {
  666. if (p < heap->end)
  667. return TRUE;
  668. lo = mid + 1;
  669. }
  670. else {
  671. hi = mid;
  672. }
  673. }
  674. return FALSE;
  675. }
  676. static int
  677. free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
  678. {
  679. if (!me->mark) {
  680. rb_free_method_entry(me);
  681. }
  682. return ST_CONTINUE;
  683. }
  684. void
  685. rb_free_m_table(st_table *tbl)
  686. {
  687. st_foreach(tbl, free_method_entry_i, 0);
  688. st_free_table(tbl);
  689. }
  690. static int
  691. free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
  692. {
  693. xfree(ce);
  694. return ST_CONTINUE;
  695. }
  696. void
  697. rb_free_const_table(st_table *tbl)
  698. {
  699. st_foreach(tbl, free_const_entry_i, 0);
  700. st_free_table(tbl);
  701. }
  702. static int obj_free(rb_objspace_t *, VALUE);
  703. static inline struct heaps_slot *
  704. add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
  705. {
  706. struct heaps_slot *slot;
  707. (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  708. p->as.free.flags = 0;
  709. slot = GET_HEAP_SLOT(p);
  710. p->as.free.next = slot->freelist;
  711. slot->freelist = p;
  712. return slot;
  713. }
  714. static void
  715. unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  716. {
  717. if (slot->prev)
  718. slot->prev->next = slot->next;
  719. if (slot->next)
  720. slot->next->prev = slot->prev;
  721. if (heaps == slot)
  722. heaps = slot->next;
  723. if (objspace->heap.sweep_slots == slot)
  724. objspace->heap.sweep_slots = slot->next;
  725. slot->prev = NULL;
  726. slot->next = NULL;
  727. }
  728. static void
  729. free_unused_heaps(rb_objspace_t *objspace)
  730. {
  731. size_t i, j;
  732. struct heaps_header *last = 0;
  733. for (i = j = 1; j < heaps_used; i++) {
  734. if (objspace->heap.sorted[i]->limit == 0) {
  735. struct heaps_header* h = objspace->heap.sorted[i];
  736. ((struct heaps_free_bitmap *)(h->bits))->next =
  737. objspace->heap.free_bitmap;
  738. objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
  739. if (!last) {
  740. last = objspace->heap.sorted[i];
  741. }
  742. else {
  743. aligned_free(objspace->heap.sorted[i]);
  744. }
  745. heaps_used--;
  746. }
  747. else {
  748. if (i != j) {
  749. objspace->heap.sorted[j] = objspace->heap.sorted[i];
  750. }
  751. j++;
  752. }
  753. }
  754. if (last) {
  755. if (last < heaps_freed) {
  756. aligned_free(heaps_freed);
  757. heaps_freed = last;
  758. }
  759. else {
  760. aligned_free(last);
  761. }
  762. }
  763. }
  764. static inline void
  765. make_deferred(RVALUE *p)
  766. {
  767. p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
  768. }
  769. static inline void
  770. make_io_deferred(RVALUE *p)
  771. {
  772. rb_io_t *fptr = p->as.file.fptr;
  773. make_deferred(p);
  774. p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
  775. p->as.data.data = fptr;
  776. }
  777. static int
  778. obj_free(rb_objspace_t *objspace, VALUE obj)
  779. {
  780. switch (BUILTIN_TYPE(obj)) {
  781. case T_NIL:
  782. case T_FIXNUM:
  783. case T_TRUE:
  784. case T_FALSE:
  785. rb_bug("obj_free() called for broken object");
  786. break;
  787. }
  788. if (FL_TEST(obj, FL_EXIVAR)) {
  789. rb_free_generic_ivar((VALUE)obj);
  790. FL_UNSET(obj, FL_EXIVAR);
  791. }
  792. switch (BUILTIN_TYPE(obj)) {
  793. case T_OBJECT:
  794. if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
  795. RANY(obj)->as.object.as.heap.ivptr) {
  796. xfree(RANY(obj)->as.object.as.heap.ivptr);
  797. }
  798. break;
  799. case T_MODULE:
  800. case T_CLASS:
  801. rb_clear_cache_by_class((VALUE)obj);
  802. if (RCLASS_M_TBL(obj)) {
  803. rb_free_m_table(RCLASS_M_TBL(obj));
  804. }
  805. if (RCLASS_IV_TBL(obj)) {
  806. st_free_table(RCLASS_IV_TBL(obj));
  807. }
  808. if (RCLASS_CONST_TBL(obj)) {
  809. rb_free_const_table(RCLASS_CONST_TBL(obj));
  810. }
  811. if (RCLASS_IV_INDEX_TBL(obj)) {
  812. st_free_table(RCLASS_IV_INDEX_TBL(obj));
  813. }
  814. xfree(RANY(obj)->as.klass.ptr);
  815. break;
  816. case T_STRING:
  817. rb_str_free(obj);
  818. break;
  819. case T_ARRAY:
  820. rb_ary_free(obj);
  821. break;
  822. case T_HASH:
  823. if (RANY(obj)->as.hash.ntbl) {
  824. st_free_table(RANY(obj)->as.hash.ntbl);
  825. }
  826. break;
  827. case T_REGEXP:
  828. if (RANY(obj)->as.regexp.ptr) {
  829. onig_free(RANY(obj)->as.regexp.ptr);
  830. }
  831. break;
  832. case T_DATA:
  833. if (DATA_PTR(obj)) {
  834. if (RTYPEDDATA_P(obj)) {
  835. RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
  836. }
  837. if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  838. xfree(DATA_PTR(obj));
  839. }
  840. else if (RANY(obj)->as.data.dfree) {
  841. make_deferred(RANY(obj));
  842. return 1;
  843. }
  844. }
  845. break;
  846. case T_MATCH:
  847. if (RANY(obj)->as.match.rmatch) {
  848. struct rmatch *rm = RANY(obj)->as.match.rmatch;
  849. onig_region_free(&rm->regs, 0);
  850. if (rm->char_offset)
  851. xfree(rm->char_offset);
  852. xfree(rm);
  853. }
  854. break;
  855. case T_FILE:
  856. if (RANY(obj)->as.file.fptr) {
  857. make_io_deferred(RANY(obj));
  858. return 1;
  859. }
  860. break;
  861. case T_RATIONAL:
  862. case T_COMPLEX:
  863. break;
  864. case T_ICLASS:
  865. /* iClass shares table with the module */
  866. xfree(RANY(obj)->as.klass.ptr);
  867. break;
  868. case T_FLOAT:
  869. break;
  870. case T_BIGNUM:
  871. if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
  872. xfree(RBIGNUM_DIGITS(obj));
  873. }
  874. break;
  875. case T_NODE:
  876. switch (nd_type(obj)) {
  877. case NODE_SCOPE:
  878. if (RANY(obj)->as.node.u1.tbl) {
  879. xfree(RANY(obj)->as.node.u1.tbl);
  880. }
  881. break;
  882. case NODE_ARGS:
  883. if (RANY(obj)->as.node.u3.args) {
  884. xfree(RANY(obj)->as.node.u3.args);
  885. }
  886. break;
  887. case NODE_ALLOCA:
  888. xfree(RANY(obj)->as.node.u1.node);
  889. break;
  890. }
  891. break; /* no need to free iv_tbl */
  892. case T_STRUCT:
  893. if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
  894. RANY(obj)->as.rstruct.as.heap.ptr) {
  895. xfree(RANY(obj)->as.rstruct.as.heap.ptr);
  896. }
  897. break;
  898. default:
  899. rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
  900. BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
  901. }
  902. return 0;
  903. }
  904. void
  905. Init_heap(void)
  906. {
  907. init_heap(&rb_objspace);
  908. }
  909. typedef int each_obj_callback(void *, void *, size_t, void *);
  910. struct each_obj_args {
  911. each_obj_callback *callback;
  912. void *data;
  913. };
  914. static VALUE
  915. objspace_each_objects(VALUE arg)
  916. {
  917. size_t i;
  918. RVALUE *membase = 0;
  919. RVALUE *pstart, *pend;
  920. rb_objspace_t *objspace = &rb_objspace;
  921. struct each_obj_args *args = (struct each_obj_args *)arg;
  922. volatile VALUE v;
  923. i = 0;
  924. while (i < heaps_used) {
  925. while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1])
  926. i--;
  927. while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i] <= (uintptr_t)membase)
  928. i++;
  929. if (heaps_used <= i)
  930. break;
  931. membase = (RVALUE *)objspace->heap.sorted[i];
  932. pstart = objspace->heap.sorted[i]->start;
  933. pend = pstart + objspace->heap.sorted[i]->limit;
  934. for (; pstart != pend; pstart++) {
  935. if (pstart->as.basic.flags) {
  936. v = (VALUE)pstart; /* acquire to save this object */
  937. break;
  938. }
  939. }
  940. if (pstart != pend) {
  941. if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
  942. break;
  943. }
  944. }
  945. }
  946. RB_GC_GUARD(v);
  947. return Qnil;
  948. }
  949. /*
  950. * rb_objspace_each_objects() is special C API to walk through
  951. * Ruby object space. This C API is too difficult to use it.
  952. * To be frank, you should not use it. Or you need to read the
  953. * source code of this function and understand what this function does.
  954. *
  955. * 'callback' will be called several times (the number of heap slot,
  956. * at current implementation) with:
  957. * vstart: a pointer to the first living object of the heap_slot.
  958. * vend: a pointer to next to the valid heap_slot area.
  959. * stride: a distance to next VALUE.
  960. *
  961. * If callback() returns non-zero, the iteration will be stopped.
  962. *
  963. * This is a sample callback code to iterate liveness objects:
  964. *
  965. * int
  966. * sample_callback(void *vstart, void *vend, int stride, void *data) {
  967. * VALUE v = (VALUE)vstart;
  968. * for (; v != (VALUE)vend; v += stride) {
  969. * if (RBASIC(v)->flags) { // liveness check
  970. * // do something with live object 'v'
  971. * }
  972. * return 0; // continue to iteration
  973. * }
  974. *
  975. * Note: 'vstart' is not a top of heap_slot. This point the first
  976. * living object to grasp at least one object to avoid GC issue.
  977. * This means that you can not walk through all Ruby object slot
  978. * including freed object slot.
  979. *
  980. * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
  981. * However, there are possibilities to pass variable values with
  982. * 'stride' with some reasons. You must use stride instead of
  983. * use some constant value in the iteration.
  984. */
  985. void
  986. rb_objspace_each_objects(each_obj_callback *callback, void *data)
  987. {
  988. struct each_obj_args args;
  989. rb_objspace_t *objspace = &rb_objspace;
  990. rest_sweep(objspace);
  991. objspace->flags.dont_lazy_sweep = TRUE;
  992. args.callback = callback;
  993. args.data = data;
  994. rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
  995. }
  996. struct os_each_struct {
  997. size_t num;
  998. VALUE of;
  999. };
  1000. static int
  1001. internal_object_p(VALUE obj)
  1002. {
  1003. RVALUE *p = (RVALUE *)obj;
  1004. if (p->as.basic.flags) {
  1005. switch (BUILTIN_TYPE(p)) {
  1006. case T_NONE:
  1007. case T_ICLASS:
  1008. case T_NODE:
  1009. case T_ZOMBIE:
  1010. break;
  1011. case T_CLASS:
  1012. if (FL_TEST(p, FL_SINGLETON))
  1013. break;
  1014. default:
  1015. if (!p->as.basic.klass) break;
  1016. return 0;
  1017. }
  1018. }
  1019. return 1;
  1020. }
  1021. int
  1022. rb_objspace_internal_object_p(VALUE obj)
  1023. {
  1024. return internal_object_p(obj);
  1025. }
  1026. static int
  1027. os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
  1028. {
  1029. struct os_each_struct *oes = (struct os_each_struct *)data;
  1030. RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
  1031. for (; p != pend; p++) {
  1032. volatile VALUE v = (VALUE)p;
  1033. if (!internal_object_p(v)) {
  1034. if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
  1035. rb_yield(v);
  1036. oes->num++;
  1037. }
  1038. }
  1039. }
  1040. return 0;
  1041. }
  1042. static VALUE
  1043. os_obj_of(VALUE of)
  1044. {
  1045. struct os_each_struct oes;
  1046. oes.num = 0;
  1047. oes.of = of;
  1048. rb_objspace_each_objects(os_obj_of_i, &oes);
  1049. return SIZET2NUM(oes.num);
  1050. }
  1051. /*
  1052. * call-seq:
  1053. * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
  1054. * ObjectSpace.each_object([module]) -> an_enumerator
  1055. *
  1056. * Calls the block once for each living, nonimmediate object in this
  1057. * Ruby process. If <i>module</i> is specified, calls the block
  1058. * for only those classes or modules that match (or are a subclass of)
  1059. * <i>module</i>. Returns the number of objects found. Immediate
  1060. * objects (<code>Fixnum</code>s, <code>Symbol</code>s
  1061. * <code>true</code>, <code>false</code>, and <code>nil</code>) are
  1062. * never returned. In the example below, <code>each_object</code>
  1063. * returns both the numbers we defined and several constants defined in
  1064. * the <code>Math</code> module.
  1065. *
  1066. * If no block is given, an enumerator is returned instead.
  1067. *
  1068. * a = 102.7
  1069. * b = 95 # Won't be returned
  1070. * c = 12345678987654321
  1071. * count = ObjectSpace.each_object(Numeric) {|x| p x }
  1072. * puts "Total count: #{count}"
  1073. *
  1074. * <em>produces:</em>
  1075. *
  1076. * 12345678987654321
  1077. * 102.7
  1078. * 2.71828182845905
  1079. * 3.14159265358979
  1080. * 2.22044604925031e-16
  1081. * 1.7976931348623157e+308
  1082. * 2.2250738585072e-308
  1083. * Total count: 7
  1084. *
  1085. */
  1086. static VALUE
  1087. os_each_obj(int argc, VALUE *argv, VALUE os)
  1088. {
  1089. VALUE of;
  1090. rb_secure(4);
  1091. if (argc == 0) {
  1092. of = 0;
  1093. }
  1094. else {
  1095. rb_scan_args(argc, argv, "01", &of);
  1096. }
  1097. RETURN_ENUMERATOR(os, 1, &of);
  1098. return os_obj_of(of);
  1099. }
  1100. /*
  1101. * call-seq:
  1102. * ObjectSpace.undefine_finalizer(obj)
  1103. *
  1104. * Removes all finalizers for <i>obj</i>.
  1105. *
  1106. */
  1107. static VALUE
  1108. undefine_final(VALUE os, VALUE obj)
  1109. {
  1110. return rb_undefine_final(obj);
  1111. }
  1112. VALUE
  1113. rb_undefine_final(VALUE obj)
  1114. {
  1115. rb_objspace_t *objspace = &rb_objspace;
  1116. st_data_t data = obj;
  1117. rb_check_frozen(obj);
  1118. st_delete(finalizer_table, &data, 0);
  1119. FL_UNSET(obj, FL_FINALIZE);
  1120. return obj;
  1121. }
  1122. /*
  1123. * call-seq:
  1124. * ObjectSpace.define_finalizer(obj, aProc=proc())
  1125. *
  1126. * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
  1127. * was destroyed.
  1128. *
  1129. */
  1130. static VALUE
  1131. define_final(int argc, VALUE *argv, VALUE os)
  1132. {
  1133. VALUE obj, block;
  1134. rb_scan_args(argc, argv, "11", &obj, &block);
  1135. rb_check_frozen(obj);
  1136. if (argc == 1) {
  1137. block = rb_block_proc();
  1138. }
  1139. else if (!rb_respond_to(block, rb_intern("call"))) {
  1140. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1141. rb_obj_classname(block));
  1142. }
  1143. return define_final0(obj, block);
  1144. }
  1145. static VALUE
  1146. define_final0(VALUE obj, VALUE block)
  1147. {
  1148. rb_objspace_t *objspace = &rb_objspace;
  1149. VALUE table;
  1150. st_data_t data;
  1151. if (!FL_ABLE(obj)) {
  1152. rb_raise(rb_eArgError, "cannot define finalizer for %s",
  1153. rb_obj_classname(obj));
  1154. }
  1155. RBASIC(obj)->flags |= FL_FINALIZE;
  1156. block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
  1157. OBJ_FREEZE(block);
  1158. if (st_lookup(finalizer_table, obj, &data)) {
  1159. table = (VALUE)data;
  1160. rb_ary_push(table, block);
  1161. }
  1162. else {
  1163. table = rb_ary_new3(1, block);
  1164. RBASIC(table)->klass = 0;
  1165. st_add_direct(finalizer_table, obj, table);
  1166. }
  1167. return block;
  1168. }
  1169. VALUE
  1170. rb_define_final(VALUE obj, VALUE block)
  1171. {
  1172. rb_check_frozen(obj);
  1173. if (!rb_respond_to(block, rb_intern("call"))) {
  1174. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1175. rb_obj_classname(block));
  1176. }
  1177. return define_final0(obj, block);
  1178. }
  1179. void
  1180. rb_gc_copy_finalizer(VALUE dest, VALUE obj)
  1181. {
  1182. rb_objspace_t *objspace = &rb_objspace;
  1183. VALUE table;
  1184. st_data_t data;
  1185. if (!FL_TEST(obj, FL_FINALIZE)) return;
  1186. if (st_lookup(finalizer_table, obj, &data)) {
  1187. table = (VALUE)data;
  1188. st_insert(finalizer_table, dest, table);
  1189. }
  1190. FL_SET(dest, FL_FINALIZE);
  1191. }
  1192. static VALUE
  1193. run_single_final(VALUE arg)
  1194. {
  1195. VALUE *args = (VALUE *)arg;
  1196. rb_eval_cmd(args[0], args[1], (int)args[2]);
  1197. return Qnil;
  1198. }
  1199. static void
  1200. run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
  1201. {
  1202. long i;
  1203. int status;
  1204. VALUE args[3];
  1205. VALUE objid = nonspecial_obj_id(obj);
  1206. if (RARRAY_LEN(table) > 0) {
  1207. args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
  1208. }
  1209. else {
  1210. args[1] = 0;
  1211. }
  1212. args[2] = (VALUE)rb_safe_level();
  1213. for (i=0; i<RARRAY_LEN(table); i++) {
  1214. VALUE final = RARRAY_PTR(table)[i];
  1215. args[0] = RARRAY_PTR(final)[1];
  1216. args[2] = FIX2INT(RARRAY_PTR(final)[0]);
  1217. status = 0;
  1218. rb_protect(run_single_final, (VALUE)args, &status);
  1219. if (status)
  1220. rb_set_errinfo(Qnil);
  1221. }
  1222. }
  1223. static void
  1224. run_final(rb_objspace_t *objspace, VALUE obj)
  1225. {
  1226. RUBY_DATA_FUNC free_func = 0;
  1227. st_data_t key, table;
  1228. objspace->heap.final_num--;
  1229. RBASIC(obj)->klass = 0;
  1230. if (RTYPEDDATA_P(obj)) {
  1231. free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
  1232. }
  1233. else {
  1234. free_func = RDATA(obj)->dfree;
  1235. }
  1236. if (free_func) {
  1237. (*free_func)(DATA_PTR(obj));
  1238. }
  1239. key = (st_data_t)obj;
  1240. if (st_delete(finalizer_table, &key, &table)) {
  1241. run_finalizer(objspace, obj, (VALUE)table);
  1242. }
  1243. }
  1244. static void
  1245. finalize_list(rb_objspace_t *objspace, RVALUE *p)
  1246. {
  1247. while (p) {
  1248. RVALUE *tmp = p->as.free.next;
  1249. run_final(objspace, (VALUE)p);
  1250. if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
  1251. add_slot_local_freelist(objspace, p);
  1252. if (!is_lazy_sweeping(objspace)) {
  1253. objspace->total_freed_object_num++;
  1254. objspace->heap.free_num++;
  1255. }
  1256. }
  1257. else {
  1258. struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
  1259. slot->header->limit--;
  1260. }
  1261. p = tmp;
  1262. }
  1263. }
  1264. static void
  1265. finalize_deferred(rb_objspace_t *objspace)
  1266. {
  1267. RVALUE *p = deferred_final_list;
  1268. deferred_final_list = 0;
  1269. if (p) {
  1270. finalize_list(objspace, p);
  1271. }
  1272. }
  1273. void
  1274. rb_gc_finalize_deferred(void)
  1275. {
  1276. rb_objspace_t *objspace = &rb_objspace;
  1277. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1278. finalize_deferred(objspace);
  1279. ATOMIC_SET(finalizing, 0);
  1280. }
  1281. struct force_finalize_list {
  1282. VALUE obj;
  1283. VALUE table;
  1284. struct force_finalize_list *next;
  1285. };
  1286. static int
  1287. force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
  1288. {
  1289. struct force_finalize_list **prev = (struct force_finalize_list **)arg;
  1290. struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
  1291. curr->obj = key;
  1292. curr->table = val;
  1293. curr->next = *prev;
  1294. *prev = curr;
  1295. return ST_CONTINUE;
  1296. }
  1297. void
  1298. rb_gc_call_finalizer_at_exit(void)
  1299. {
  1300. rb_objspace_call_finalizer(&rb_objspace);
  1301. }
  1302. static void
  1303. rb_objspace_call_finalizer(rb_objspace_t *objspace)
  1304. {
  1305. RVALUE *p, *pend;
  1306. RVALUE *final_list = 0;
  1307. size_t i;
  1308. rest_sweep(objspace);
  1309. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1310. /* run finalizers */
  1311. finalize_deferred(objspace);
  1312. assert(deferred_final_list == 0);
  1313. /* force to run finalizer */
  1314. while (finalizer_table->num_entries) {
  1315. struct force_finalize_list *list = 0;
  1316. st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
  1317. while (list) {
  1318. struct force_finalize_list *curr = list;
  1319. st_data_t obj = (st_data_t)curr->obj;
  1320. run_finalizer(objspace, curr->obj, curr->table);
  1321. st_delete(finalizer_table, &obj, 0);
  1322. list = curr->next;
  1323. xfree(curr);
  1324. }
  1325. }
  1326. /* finalizers are part of garbage collection */
  1327. during_gc++;
  1328. /* run data object's finalizers */
  1329. for (i = 0; i < heaps_used; i++) {
  1330. p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
  1331. while (p < pend) {
  1332. if (BUILTIN_TYPE(p) == T_DATA &&
  1333. DATA_PTR(p) && RANY(p)->as.data.dfree &&
  1334. !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
  1335. !rb_obj_is_fiber((VALUE)p)) {
  1336. p->as.free.flags = 0;
  1337. if (RTYPEDDATA_P(p)) {
  1338. RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
  1339. }
  1340. if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  1341. xfree(DATA_PTR(p));
  1342. }
  1343. else if (RANY(p)->as.data.dfree) {
  1344. make_deferred(RANY(p));
  1345. RANY(p)->as.free.next = final_list;
  1346. final_list = p;
  1347. }
  1348. }
  1349. else if (BUILTIN_TYPE(p) == T_FILE) {
  1350. if (RANY(p)->as.file.fptr) {
  1351. make_io_deferred(RANY(p));
  1352. RANY(p)->as.free.next = final_list;
  1353. final_list = p;
  1354. }
  1355. }
  1356. p++;
  1357. }
  1358. }
  1359. during_gc = 0;
  1360. if (final_list) {
  1361. finalize_list(objspace, final_list);
  1362. }
  1363. st_free_table(finalizer_table);
  1364. finalizer_table = 0;
  1365. ATOMIC_SET(finalizing, 0);
  1366. }
  1367. static inline int
  1368. is_id_value(rb_objspace_t *objspace, VALUE ptr)
  1369. {
  1370. if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
  1371. if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
  1372. if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
  1373. return TRUE;
  1374. }
  1375. static inline int
  1376. is_swept_object(rb_objspace_t *objspace, VALUE ptr)
  1377. {
  1378. struct heaps_slot *slot = objspace->heap.sweep_slots;
  1379. while (slot) {
  1380. if ((VALUE)slot->header->start <= ptr && ptr < (VALUE)(slot->header->end))
  1381. return FALSE;
  1382. slot = slot->next;
  1383. }
  1384. return TRUE;
  1385. }
  1386. static inline int
  1387. is_dead_object(rb_objspace_t *objspace, VALUE ptr)
  1388. {
  1389. if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
  1390. return FALSE;
  1391. if (!is_swept_object(objspace, ptr))
  1392. return TRUE;
  1393. return FALSE;
  1394. }
  1395. static inline int
  1396. is_live_object(rb_objspace_t *objspace, VALUE ptr)
  1397. {
  1398. if (BUILTIN_TYPE(ptr) == 0) return FALSE;
  1399. if (RBASIC(ptr)->klass == 0) return FALSE;
  1400. if (is_dead_object(objspace, ptr)) return FALSE;
  1401. return TRUE;
  1402. }
  1403. /*
  1404. * call-seq:
  1405. * ObjectSpace._id2ref(object_id) -> an_object
  1406. *
  1407. * Converts an object id to a reference to the object. May not be
  1408. * called on an object id passed as a parameter to a finalizer.
  1409. *
  1410. * s = "I am a string" #=> "I am a string"
  1411. * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
  1412. * r == s #=> true
  1413. *
  1414. */
  1415. static VALUE
  1416. id2ref(VALUE obj, VALUE objid)
  1417. {
  1418. #if SIZEOF_LONG == SIZEOF_VOIDP
  1419. #define NUM2PTR(x) NUM2ULONG(x)
  1420. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  1421. #define NUM2PTR(x) NUM2ULL(x)
  1422. #endif
  1423. rb_objspace_t *objspace = &rb_objspace;
  1424. VALUE ptr;
  1425. void *p0;
  1426. rb_secure(4);
  1427. ptr = NUM2PTR(objid);
  1428. p0 = (void *)ptr;
  1429. if (ptr == Qtrue) return Qtrue;
  1430. if (ptr == Qfalse) return Qfalse;
  1431. if (ptr == Qnil) return Qnil;
  1432. if (FIXNUM_P(ptr)) return (VALUE)ptr;
  1433. if (FLONUM_P(ptr)) return (VALUE)ptr;
  1434. ptr = obj_id_to_ref(objid);
  1435. if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
  1436. ID symid = ptr / sizeof(RVALUE);
  1437. if (rb_id2name(symid) == 0)
  1438. rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
  1439. return ID2SYM(symid);
  1440. }
  1441. if (!is_id_value(objspace, ptr)) {
  1442. rb_raise(rb_eRangeError, "%p is not id value", p0);
  1443. }
  1444. if (!is_live_object(objspace, ptr)) {
  1445. rb_raise(rb_eRangeError, "%p is recycled object", p0);
  1446. }
  1447. return (VALUE)ptr;
  1448. }
  1449. /*
  1450. * Document-method: __id__
  1451. * Document-method: object_id
  1452. *
  1453. * call-seq:
  1454. * obj.__id__ -> integer
  1455. * obj.object_id -> integer
  1456. *
  1457. * Returns an integer identifier for +obj+.
  1458. *
  1459. * The same number will be returned on all calls to +id+ for a given object,
  1460. * and no two active objects will share an id.
  1461. *
  1462. * Object#object_id is a different concept from the +:name+ notation, which
  1463. * returns the symbol id of +name+.
  1464. *
  1465. * Replaces the deprecated Object#id.
  1466. */
  1467. /*
  1468. * call-seq:
  1469. * obj.hash -> fixnum
  1470. *
  1471. * Generates a Fixnum hash value for this object.
  1472. *
  1473. * This function must have the property that <code>a.eql?(b)</code> implies
  1474. * <code>a.hash == b.hash</code>.
  1475. *
  1476. * The hash value is used by Hash class.
  1477. *
  1478. * Any hash value that exceeds the capacity of a Fixnum will be truncated
  1479. * before being used.
  1480. */
  1481. VALUE
  1482. rb_obj_id(VALUE obj)
  1483. {
  1484. /*
  1485. * 32-bit VALUE space
  1486. * MSB ------------------------ LSB
  1487. * false 00000000000000000000000000000000
  1488. * true 00000000000000000000000000000010
  1489. * nil 00000000000000000000000000000100
  1490. * undef 00000000000000000000000000000110
  1491. * symbol ssssssssssssssssssssssss00001110
  1492. * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
  1493. * fixnum fffffffffffffffffffffffffffffff1
  1494. *
  1495. * object_id space
  1496. * LSB
  1497. * false 00000000000000000000000000000000
  1498. * true 00000000000000000000000000000010
  1499. * nil 00000000000000000000000000000100
  1500. * undef 00000000000000000000000000000110
  1501. * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
  1502. * object oooooooooooooooooooooooooooooo0 o...o % A = 0
  1503. * fixnum fffffffffffffffffffffffffffffff1 bignum if required
  1504. *
  1505. * where A = sizeof(RVALUE)/4
  1506. *
  1507. * sizeof(RVALUE) is
  1508. * 20 if 32-bit, double is 4-byte aligned
  1509. * 24 if 32-bit, double is 8-byte aligned
  1510. * 40 if 64-bit
  1511. */
  1512. if (SYMBOL_P(obj)) {
  1513. return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
  1514. }
  1515. else if (FLONUM_P(obj)) {
  1516. #if SIZEOF_LONG == SIZEOF_VOIDP
  1517. return LONG2NUM((SIGNED_VALUE)obj);
  1518. #else
  1519. return LL2NUM((SIGNED_VALUE)obj);
  1520. #endif
  1521. }
  1522. else if (SPECIAL_CONST_P(obj)) {
  1523. return LONG2NUM((SIGNED_VALUE)obj);
  1524. }
  1525. return nonspecial_obj_id(obj);
  1526. }
  1527. static int
  1528. set_zero(st_data_t key, st_data_t val, st_data_t arg)
  1529. {
  1530. VALUE k = (VALUE)key;
  1531. VALUE hash = (VALUE)arg;
  1532. rb_hash_aset(hash, k, INT2FIX(0));
  1533. return ST_CONTINUE;
  1534. }
  1535. /*
  1536. * call-seq:
  1537. * ObjectSpace.count_objects([result_hash]) -> hash
  1538. *
  1539. * Counts objects for each type.
  1540. *
  1541. * It returns a hash, such as:
  1542. * {
  1543. * :TOTAL=>10000,
  1544. * :FREE=>3011,
  1545. * :T_OBJECT=>6,
  1546. * :T_CLASS=>404,
  1547. * # ...
  1548. * }
  1549. *
  1550. * The contents of the returned hash are implementation specific.
  1551. * It may be changed in future.
  1552. *
  1553. * If the optional argument +result_hash+ is given,
  1554. * it is overwritten and returned. This is intended to avoid probe effect.
  1555. *
  1556. * This method is only expected to work on C Ruby.
  1557. *
  1558. */
  1559. static VALUE
  1560. count_objects(int argc, VALUE *argv, VALUE os)
  1561. {
  1562. rb_objspace_t *objspace = &rb_objspace;
  1563. size_t counts[T_MASK+1];
  1564. size_t freed = 0;
  1565. size_t total = 0;
  1566. size_t i;
  1567. VALUE hash;
  1568. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  1569. if (!RB_TYPE_P(hash, T_HASH))
  1570. rb_raise(rb_eTypeError, "non-hash given");
  1571. }
  1572. for (i = 0; i <= T_MASK; i++) {
  1573. counts[i] = 0;
  1574. }
  1575. for (i = 0; i < heaps_used; i++) {
  1576. RVALUE *p, *pend;
  1577. p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
  1578. for (;p < pend; p++) {
  1579. if (p->as.basic.flags) {
  1580. counts[BUILTIN_TYPE(p)]++;
  1581. }
  1582. else {
  1583. freed++;
  1584. }
  1585. }
  1586. total += objspace->heap.sorted[i]->limit;
  1587. }
  1588. if (hash == Qnil) {
  1589. hash = rb_hash_new();
  1590. }
  1591. else if (!RHASH_EMPTY_P(hash)) {
  1592. st_foreach(RHASH_TBL(hash), set_zero, hash);
  1593. }
  1594. rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
  1595. rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
  1596. for (i = 0; i <= T_MASK; i++) {
  1597. VALUE type;
  1598. switch (i) {
  1599. #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
  1600. COUNT_TYPE(T_NONE);
  1601. COUNT_TYPE(T_OBJECT);
  1602. COUNT_TYPE(T_CLASS);
  1603. COUNT_TYPE(T_MODULE);
  1604. COUNT_TYPE(T_FLOAT);
  1605. COUNT_TYPE(T_STRING);
  1606. COUNT_TYPE(T_REGEXP);
  1607. COUNT_TYPE(T_ARRAY);
  1608. COUNT_TYPE(T_HASH);
  1609. COUNT_TYPE(T_STRUCT);
  1610. COUNT_TYPE(T_BIGNUM);
  1611. COUNT_TYPE(T_FILE);
  1612. COUNT_TYPE(T_DATA);
  1613. COUNT_TYPE(T_MATCH);
  1614. COUNT_TYPE(T_COMPLEX);
  1615. COUNT_TYPE(T_RATIONAL);
  1616. COUNT_TYPE(T_NIL);
  1617. COUNT_TYPE(T_TRUE);
  1618. COUNT_TYPE(T_FALSE);
  1619. COUNT_TYPE(T_SYMBOL);
  1620. COUNT_TYPE(T_FIXNUM);
  1621. COUNT_TYPE(T_UNDEF);
  1622. COUNT_TYPE(T_NODE);
  1623. COUNT_TYPE(T_ICLASS);
  1624. COUNT_TYPE(T_ZOMBIE);
  1625. #undef COUNT_TYPE
  1626. default: type = INT2NUM(i); break;
  1627. }
  1628. if (counts[i])
  1629. rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
  1630. }
  1631. return hash;
  1632. }
  1633. /*
  1634. ------------------------ Garbage Collection ------------------------
  1635. */
  1636. /* Sweeping */
  1637. static VALUE
  1638. lazy_sweep_enable(void)
  1639. {
  1640. rb_objspace_t *objspace = &rb_objspace;
  1641. objspace->flags.dont_lazy_sweep = FALSE;
  1642. return Qnil;
  1643. }
  1644. static void
  1645. gc_clear_slot_bits(struct heaps_slot *slot)
  1646. {
  1647. memset(slot->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  1648. }
  1649. static size_t
  1650. objspace_live_num(rb_objspace_t *objspace)
  1651. {
  1652. return objspace->total_allocated_object_num - objspace->total_freed_object_num;
  1653. }
  1654. static void
  1655. slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
  1656. {
  1657. size_t empty_num = 0, freed_num = 0, final_num = 0;
  1658. RVALUE *p, *pend;
  1659. RVALUE *final = deferred_final_list;
  1660. int deferred;
  1661. uintptr_t *bits;
  1662. p = sweep_slot->header->start; pend = p + sweep_slot->header->limit;
  1663. bits = GET_HEAP_BITMAP(p);
  1664. while (p < pend) {
  1665. if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
  1666. if (p->as.basic.flags) {
  1667. if ((deferred = obj_free(objspace, (VALUE)p)) ||
  1668. (FL_TEST(p, FL_FINALIZE))) {
  1669. if (!deferred) {
  1670. p->as.free.flags = T_ZOMBIE;
  1671. RDATA(p)->dfree = 0;
  1672. }
  1673. p->as.free.next = deferred_final_list;
  1674. deferred_final_list = p;
  1675. assert(BUILTIN_TYPE(p) == T_ZOMBIE);
  1676. final_num++;
  1677. }
  1678. else {
  1679. (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  1680. p->as.free.flags = 0;
  1681. p->as.free.next = sweep_slot->freelist;
  1682. sweep_slot->freelist = p;
  1683. freed_num++;
  1684. }
  1685. }
  1686. else {
  1687. empty_num++;
  1688. }
  1689. }
  1690. p++;
  1691. }
  1692. gc_clear_slot_bits(sweep_slot);
  1693. if (final_num + freed_num + empty_num == sweep_slot->header->limit &&
  1694. objspace->heap.free_num > objspace->heap.do_heap_free) {
  1695. RVALUE *pp;
  1696. for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
  1697. RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
  1698. pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  1699. }
  1700. sweep_slot->header->limit = final_num;
  1701. unlink_heap_slot(objspace, sweep_slot);
  1702. }
  1703. else {
  1704. if (freed_num + empty_num > 0) {
  1705. link_free_heap_slot(objspace, sweep_slot);
  1706. }
  1707. else {
  1708. sweep_slot->free_next = NULL;
  1709. }
  1710. objspace->total_freed_object_num += freed_num;
  1711. objspace->heap.free_num += freed_num + empty_num;
  1712. }
  1713. objspace->heap.final_num += final_num;
  1714. if (deferred_final_list && !finalizing) {
  1715. rb_thread_t *th = GET_THREAD();
  1716. if (th) {
  1717. RUBY_VM_SET_FINALIZER_INTERRUPT(th);
  1718. }
  1719. }
  1720. }
  1721. static int
  1722. ready_to_gc(rb_objspace_t *objspace)
  1723. {
  1724. if (dont_gc || during_gc) {
  1725. if (!has_free_object) {
  1726. if (!heaps_increment(objspace)) {
  1727. set_heaps_increment(objspace);
  1728. heaps_increment(objspace);
  1729. }
  1730. }
  1731. return FALSE;
  1732. }
  1733. return TRUE;
  1734. }
  1735. static void
  1736. before_gc_sweep(rb_objspace_t *objspace)
  1737. {
  1738. objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
  1739. objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
  1740. if (objspace->heap.free_min < initial_free_min) {
  1741. objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  1742. objspace->heap.free_min = initial_free_min;
  1743. }
  1744. objspace->heap.sweep_slots = heaps;
  1745. objspace->heap.free_num = 0;
  1746. objspace->heap.free_slots = NULL;
  1747. /* sweep unlinked method entries */
  1748. if (GET_VM()->unlinked_method_entry_list) {
  1749. rb_sweep_method_entry(GET_VM());
  1750. }
  1751. }
  1752. static void
  1753. after_gc_sweep(rb_objspace_t *objspace)
  1754. {
  1755. size_t inc;
  1756. gc_prof_set_malloc_info(objspace);
  1757. if (objspace->heap.free_num < objspace->heap.free_min) {
  1758. set_heaps_increment(objspace);
  1759. heaps_increment(objspace);
  1760. }
  1761. inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
  1762. if (inc > malloc_limit) {
  1763. malloc_limit +=
  1764. (size_t)((inc - malloc_limit) * (double)objspace_live_num(objspace) / (heaps_used * HEAP_OBJ_LIMIT));
  1765. if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
  1766. }
  1767. free_unused_heaps(objspace);
  1768. }
  1769. static int
  1770. lazy_sweep(rb_objspace_t *objspace)
  1771. {
  1772. struct heaps_slot *next;
  1773. heaps_increment(objspace);
  1774. while (objspace->heap.sweep_slots) {
  1775. next = objspace->heap.sweep

Large files files are truncated, but you can click here to view the full file