PageRenderTime 54ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/gc.c

https://github.com/thepelkus/ruby
C | 4421 lines | 3402 code | 557 blank | 462 comment | 498 complexity | 5ce1e16a09ae848f297e43b8822581d3 MD5 | raw file
Possible License(s): 0BSD, Unlicense, GPL-2.0, BSD-3-Clause, AGPL-3.0

Large files files are truncated, but you can click here to view the full file

  1. /**********************************************************************
  2. gc.c -
  3. $Author$
  4. created at: Tue Oct 5 09:44:46 JST 1993
  5. Copyright (C) 1993-2007 Yukihiro Matsumoto
  6. Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
  7. Copyright (C) 2000 Information-technology Promotion Agency, Japan
  8. **********************************************************************/
  9. #include "ruby/ruby.h"
  10. #include "ruby/st.h"
  11. #include "ruby/re.h"
  12. #include "ruby/io.h"
  13. #include "ruby/thread.h"
  14. #include "ruby/util.h"
  15. #include "eval_intern.h"
  16. #include "vm_core.h"
  17. #include "internal.h"
  18. #include "gc.h"
  19. #include "constant.h"
  20. #include "atomic.h"
  21. #include <stdio.h>
  22. #include <setjmp.h>
  23. #include <sys/types.h>
  24. #include <assert.h>
  25. #ifdef HAVE_SYS_TIME_H
  26. #include <sys/time.h>
  27. #endif
  28. #ifdef HAVE_SYS_RESOURCE_H
  29. #include <sys/resource.h>
  30. #endif
  31. #if defined(__native_client__) && defined(NACL_NEWLIB)
  32. # include "nacl/resource.h"
  33. # undef HAVE_POSIX_MEMALIGN
  34. # undef HAVE_MEMALIGN
  35. #endif
  36. #if defined _WIN32 || defined __CYGWIN__
  37. #include <windows.h>
  38. #elif defined(HAVE_POSIX_MEMALIGN)
  39. #elif defined(HAVE_MEMALIGN)
  40. #include <malloc.h>
  41. #endif
  42. #ifdef HAVE_VALGRIND_MEMCHECK_H
  43. # include <valgrind/memcheck.h>
  44. # ifndef VALGRIND_MAKE_MEM_DEFINED
  45. # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
  46. # endif
  47. # ifndef VALGRIND_MAKE_MEM_UNDEFINED
  48. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
  49. # endif
  50. #else
  51. # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
  52. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
  53. #endif
  54. #define rb_setjmp(env) RUBY_SETJMP(env)
  55. #define rb_jmp_buf rb_jmpbuf_t
  56. /* Make alloca work the best possible way. */
  57. #ifdef __GNUC__
  58. # ifndef atarist
  59. # ifndef alloca
  60. # define alloca __builtin_alloca
  61. # endif
  62. # endif /* atarist */
  63. #else
  64. # ifdef HAVE_ALLOCA_H
  65. # include <alloca.h>
  66. # else
  67. # ifdef _AIX
  68. #pragma alloca
  69. # else
  70. # ifndef alloca /* predefined by HP cc +Olibcalls */
  71. void *alloca ();
  72. # endif
  73. # endif /* AIX */
  74. # endif /* HAVE_ALLOCA_H */
  75. #endif /* __GNUC__ */
  76. #ifndef GC_MALLOC_LIMIT
  77. #define GC_MALLOC_LIMIT 8000000
  78. #endif
  79. #define HEAP_MIN_SLOTS 10000
  80. #define FREE_MIN 4096
  81. typedef struct {
  82. unsigned int initial_malloc_limit;
  83. unsigned int initial_heap_min_slots;
  84. unsigned int initial_free_min;
  85. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  86. int gc_stress;
  87. #endif
  88. } ruby_gc_params_t;
  89. static ruby_gc_params_t initial_params = {
  90. GC_MALLOC_LIMIT,
  91. HEAP_MIN_SLOTS,
  92. FREE_MIN,
  93. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  94. FALSE,
  95. #endif
  96. };
  97. #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
  98. #ifndef GC_PROFILE_MORE_DETAIL
  99. #define GC_PROFILE_MORE_DETAIL 0
  100. #endif
  101. typedef struct gc_profile_record {
  102. double gc_time;
  103. double gc_mark_time;
  104. double gc_sweep_time;
  105. double gc_invoke_time;
  106. size_t heap_use_slots;
  107. size_t heap_live_objects;
  108. size_t heap_free_objects;
  109. size_t heap_total_objects;
  110. size_t heap_use_size;
  111. size_t heap_total_size;
  112. int have_finalize;
  113. int is_marked;
  114. size_t allocate_increase;
  115. size_t allocate_limit;
  116. } gc_profile_record;
  117. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  118. #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
  119. #endif
  120. typedef struct RVALUE {
  121. union {
  122. struct {
  123. VALUE flags; /* always 0 for freed obj */
  124. struct RVALUE *next;
  125. } free;
  126. struct RBasic basic;
  127. struct RObject object;
  128. struct RClass klass;
  129. struct RFloat flonum;
  130. struct RString string;
  131. struct RArray array;
  132. struct RRegexp regexp;
  133. struct RHash hash;
  134. struct RData data;
  135. struct RTypedData typeddata;
  136. struct RStruct rstruct;
  137. struct RBignum bignum;
  138. struct RFile file;
  139. struct RNode node;
  140. struct RMatch match;
  141. struct RRational rational;
  142. struct RComplex complex;
  143. } as;
  144. #ifdef GC_DEBUG
  145. const char *file;
  146. int line;
  147. #endif
  148. } RVALUE;
  149. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  150. #pragma pack(pop)
  151. #endif
  152. struct heaps_slot {
  153. void *membase;
  154. RVALUE *slot;
  155. size_t limit;
  156. uintptr_t *bits;
  157. RVALUE *freelist;
  158. struct heaps_slot *next;
  159. struct heaps_slot *prev;
  160. struct heaps_slot *free_next;
  161. };
  162. struct heaps_header {
  163. struct heaps_slot *base;
  164. uintptr_t *bits;
  165. };
  166. struct sorted_heaps_slot {
  167. RVALUE *start;
  168. RVALUE *end;
  169. struct heaps_slot *slot;
  170. };
  171. struct heaps_free_bitmap {
  172. struct heaps_free_bitmap *next;
  173. };
  174. struct gc_list {
  175. VALUE *varptr;
  176. struct gc_list *next;
  177. };
  178. #define STACK_CHUNK_SIZE 500
  179. typedef struct stack_chunk {
  180. VALUE data[STACK_CHUNK_SIZE];
  181. struct stack_chunk *next;
  182. } stack_chunk_t;
  183. typedef struct mark_stack {
  184. stack_chunk_t *chunk;
  185. stack_chunk_t *cache;
  186. size_t index;
  187. size_t limit;
  188. size_t cache_size;
  189. size_t unused_cache_size;
  190. } mark_stack_t;
  191. #ifndef CALC_EXACT_MALLOC_SIZE
  192. #define CALC_EXACT_MALLOC_SIZE 0
  193. #endif
  194. typedef struct rb_objspace {
  195. struct {
  196. size_t limit;
  197. size_t increase;
  198. #if CALC_EXACT_MALLOC_SIZE
  199. size_t allocated_size;
  200. size_t allocations;
  201. #endif
  202. } malloc_params;
  203. struct {
  204. size_t increment;
  205. struct heaps_slot *ptr;
  206. struct heaps_slot *sweep_slots;
  207. struct heaps_slot *free_slots;
  208. struct sorted_heaps_slot *sorted;
  209. size_t length;
  210. size_t used;
  211. struct heaps_free_bitmap *free_bitmap;
  212. RVALUE *range[2];
  213. RVALUE *freed;
  214. size_t live_num;
  215. size_t free_num;
  216. size_t free_min;
  217. size_t final_num;
  218. size_t do_heap_free;
  219. } heap;
  220. struct {
  221. int dont_gc;
  222. int dont_lazy_sweep;
  223. int during_gc;
  224. rb_atomic_t finalizing;
  225. } flags;
  226. struct {
  227. st_table *table;
  228. RVALUE *deferred;
  229. } final;
  230. mark_stack_t mark_stack;
  231. struct {
  232. int run;
  233. gc_profile_record *record;
  234. size_t count;
  235. size_t size;
  236. double invoke_time;
  237. } profile;
  238. struct gc_list *global_list;
  239. size_t count;
  240. int gc_stress;
  241. struct mark_func_data_struct {
  242. VALUE data;
  243. void (*mark_func)(struct rb_objspace *objspace, VALUE v);
  244. } *mark_func_data;
  245. } rb_objspace_t;
  246. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  247. #define rb_objspace (*GET_VM()->objspace)
  248. #define ruby_initial_gc_stress initial_params.gc_stress
  249. int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
  250. #else
  251. static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
  252. int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
  253. #endif
  254. #define malloc_limit objspace->malloc_params.limit
  255. #define malloc_increase objspace->malloc_params.increase
  256. #define heaps objspace->heap.ptr
  257. #define heaps_length objspace->heap.length
  258. #define heaps_used objspace->heap.used
  259. #define lomem objspace->heap.range[0]
  260. #define himem objspace->heap.range[1]
  261. #define heaps_inc objspace->heap.increment
  262. #define heaps_freed objspace->heap.freed
  263. #define dont_gc objspace->flags.dont_gc
  264. #define during_gc objspace->flags.during_gc
  265. #define finalizing objspace->flags.finalizing
  266. #define finalizer_table objspace->final.table
  267. #define deferred_final_list objspace->final.deferred
  268. #define global_List objspace->global_list
  269. #define ruby_gc_stress objspace->gc_stress
  270. #define initial_malloc_limit initial_params.initial_malloc_limit
  271. #define initial_heap_min_slots initial_params.initial_heap_min_slots
  272. #define initial_free_min initial_params.initial_free_min
  273. #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
  274. #define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
  275. #define RANY(o) ((RVALUE*)(o))
  276. #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
  277. #define HEAP_HEADER(p) ((struct heaps_header *)(p))
  278. #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
  279. #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
  280. #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
  281. #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
  282. #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
  283. #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
  284. #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
  285. #ifndef HEAP_ALIGN_LOG
  286. /* default tiny heap size: 16KB */
  287. #define HEAP_ALIGN_LOG 14
  288. #endif
  289. #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
  290. enum {
  291. HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
  292. HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
  293. REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
  294. HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
  295. HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
  296. HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t) * CHAR_BIT)
  297. };
  298. int ruby_gc_debug_indent = 0;
  299. VALUE rb_mGC;
  300. extern st_table *rb_class_tbl;
  301. int ruby_disable_gc_stress = 0;
  302. static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
  303. static VALUE define_final0(VALUE obj, VALUE block);
  304. VALUE rb_define_final(VALUE obj, VALUE block);
  305. VALUE rb_undefine_final(VALUE obj);
  306. static void run_final(rb_objspace_t *objspace, VALUE obj);
  307. static void initial_expand_heap(rb_objspace_t *objspace);
  308. static void negative_size_allocation_error(const char *);
  309. static void *aligned_malloc(size_t, size_t);
  310. static void aligned_free(void *);
  311. static void init_mark_stack(mark_stack_t *stack);
  312. static VALUE lazy_sweep_enable(void);
  313. static int garbage_collect(rb_objspace_t *);
  314. static int gc_lazy_sweep(rb_objspace_t *);
  315. static void mark_tbl(rb_objspace_t *, st_table *);
  316. static void rest_sweep(rb_objspace_t *);
  317. static void gc_mark_stacked_objects(rb_objspace_t *);
  318. static double getrusage_time(void);
  319. static inline void gc_prof_timer_start(rb_objspace_t *);
  320. static inline void gc_prof_timer_stop(rb_objspace_t *, int);
  321. static inline void gc_prof_mark_timer_start(rb_objspace_t *);
  322. static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
  323. static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
  324. static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
  325. static inline void gc_prof_set_malloc_info(rb_objspace_t *);
  326. static inline void gc_prof_inc_live_num(rb_objspace_t *);
  327. static inline void gc_prof_dec_live_num(rb_objspace_t *);
  328. /*
  329. --------------------------- ObjectSpace -----------------------------
  330. */
  331. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  332. rb_objspace_t *
  333. rb_objspace_alloc(void)
  334. {
  335. rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
  336. memset(objspace, 0, sizeof(*objspace));
  337. malloc_limit = initial_malloc_limit;
  338. ruby_gc_stress = ruby_initial_gc_stress;
  339. return objspace;
  340. }
  341. #endif
  342. #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
  343. static void free_stack_chunks(mark_stack_t *);
  344. void
  345. rb_objspace_free(rb_objspace_t *objspace)
  346. {
  347. rest_sweep(objspace);
  348. if (objspace->profile.record) {
  349. free(objspace->profile.record);
  350. objspace->profile.record = 0;
  351. }
  352. if (global_List) {
  353. struct gc_list *list, *next;
  354. for (list = global_List; list; list = next) {
  355. next = list->next;
  356. xfree(list);
  357. }
  358. }
  359. if (objspace->heap.free_bitmap) {
  360. struct heaps_free_bitmap *list, *next;
  361. for (list = objspace->heap.free_bitmap; list; list = next) {
  362. next = list->next;
  363. free(list);
  364. }
  365. }
  366. if (objspace->heap.sorted) {
  367. size_t i;
  368. for (i = 0; i < heaps_used; ++i) {
  369. free(objspace->heap.sorted[i].slot->bits);
  370. aligned_free(objspace->heap.sorted[i].slot->membase);
  371. free(objspace->heap.sorted[i].slot);
  372. }
  373. free(objspace->heap.sorted);
  374. heaps_used = 0;
  375. heaps = 0;
  376. }
  377. free_stack_chunks(&objspace->mark_stack);
  378. free(objspace);
  379. }
  380. #endif
  381. void
  382. rb_global_variable(VALUE *var)
  383. {
  384. rb_gc_register_address(var);
  385. }
  386. static void
  387. allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
  388. {
  389. struct sorted_heaps_slot *p;
  390. struct heaps_free_bitmap *bits;
  391. size_t size, add, i;
  392. size = next_heaps_length*sizeof(struct sorted_heaps_slot);
  393. add = next_heaps_length - heaps_used;
  394. if (heaps_used > 0) {
  395. p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
  396. if (p) objspace->heap.sorted = p;
  397. }
  398. else {
  399. p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
  400. }
  401. if (p == 0) {
  402. during_gc = 0;
  403. rb_memerror();
  404. }
  405. for (i = 0; i < add; i++) {
  406. bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  407. if (bits == 0) {
  408. during_gc = 0;
  409. rb_memerror();
  410. return;
  411. }
  412. bits->next = objspace->heap.free_bitmap;
  413. objspace->heap.free_bitmap = bits;
  414. }
  415. }
  416. static void
  417. link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  418. {
  419. slot->free_next = objspace->heap.free_slots;
  420. objspace->heap.free_slots = slot;
  421. }
  422. static void
  423. unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  424. {
  425. objspace->heap.free_slots = slot->free_next;
  426. slot->free_next = NULL;
  427. }
  428. static void
  429. assign_heap_slot(rb_objspace_t *objspace)
  430. {
  431. RVALUE *p, *pend, *membase;
  432. struct heaps_slot *slot;
  433. size_t hi, lo, mid;
  434. size_t objs;
  435. objs = HEAP_OBJ_LIMIT;
  436. p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
  437. if (p == 0) {
  438. during_gc = 0;
  439. rb_memerror();
  440. }
  441. slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
  442. if (slot == 0) {
  443. aligned_free(p);
  444. during_gc = 0;
  445. rb_memerror();
  446. }
  447. MEMZERO((void*)slot, struct heaps_slot, 1);
  448. slot->next = heaps;
  449. if (heaps) heaps->prev = slot;
  450. heaps = slot;
  451. membase = p;
  452. p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
  453. if ((VALUE)p % sizeof(RVALUE) != 0) {
  454. p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
  455. objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
  456. }
  457. lo = 0;
  458. hi = heaps_used;
  459. while (lo < hi) {
  460. register RVALUE *mid_membase;
  461. mid = (lo + hi) / 2;
  462. mid_membase = objspace->heap.sorted[mid].slot->membase;
  463. if (mid_membase < membase) {
  464. lo = mid + 1;
  465. }
  466. else if (mid_membase > membase) {
  467. hi = mid;
  468. }
  469. else {
  470. rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
  471. }
  472. }
  473. if (hi < heaps_used) {
  474. MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
  475. }
  476. objspace->heap.sorted[hi].slot = slot;
  477. objspace->heap.sorted[hi].start = p;
  478. objspace->heap.sorted[hi].end = (p + objs);
  479. heaps->membase = membase;
  480. heaps->slot = p;
  481. heaps->limit = objs;
  482. assert(objspace->heap.free_bitmap != NULL);
  483. heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
  484. objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
  485. HEAP_HEADER(membase)->base = heaps;
  486. HEAP_HEADER(membase)->bits = heaps->bits;
  487. memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  488. objspace->heap.free_num += objs;
  489. pend = p + objs;
  490. if (lomem == 0 || lomem > p) lomem = p;
  491. if (himem < pend) himem = pend;
  492. heaps_used++;
  493. while (p < pend) {
  494. p->as.free.flags = 0;
  495. p->as.free.next = heaps->freelist;
  496. heaps->freelist = p;
  497. p++;
  498. }
  499. link_free_heap_slot(objspace, heaps);
  500. }
  501. static void
  502. add_heap_slots(rb_objspace_t *objspace, size_t add)
  503. {
  504. size_t i;
  505. size_t next_heaps_length;
  506. next_heaps_length = heaps_used + add;
  507. if (next_heaps_length > heaps_length) {
  508. allocate_sorted_heaps(objspace, next_heaps_length);
  509. heaps_length = next_heaps_length;
  510. }
  511. for (i = 0; i < add; i++) {
  512. assign_heap_slot(objspace);
  513. }
  514. heaps_inc = 0;
  515. }
  516. static void
  517. init_heap(rb_objspace_t *objspace)
  518. {
  519. add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
  520. init_mark_stack(&objspace->mark_stack);
  521. #ifdef USE_SIGALTSTACK
  522. {
  523. /* altstack of another threads are allocated in another place */
  524. rb_thread_t *th = GET_THREAD();
  525. void *tmp = th->altstack;
  526. th->altstack = malloc(ALT_STACK_SIZE);
  527. free(tmp); /* free previously allocated area */
  528. }
  529. #endif
  530. objspace->profile.invoke_time = getrusage_time();
  531. finalizer_table = st_init_numtable();
  532. }
  533. static void
  534. initial_expand_heap(rb_objspace_t *objspace)
  535. {
  536. size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
  537. if (min_size > heaps_used) {
  538. add_heap_slots(objspace, min_size - heaps_used);
  539. }
  540. }
  541. static void
  542. set_heaps_increment(rb_objspace_t *objspace)
  543. {
  544. size_t next_heaps_length = (size_t)(heaps_used * 1.8);
  545. if (next_heaps_length == heaps_used) {
  546. next_heaps_length++;
  547. }
  548. heaps_inc = next_heaps_length - heaps_used;
  549. if (next_heaps_length > heaps_length) {
  550. allocate_sorted_heaps(objspace, next_heaps_length);
  551. heaps_length = next_heaps_length;
  552. }
  553. }
  554. static int
  555. heaps_increment(rb_objspace_t *objspace)
  556. {
  557. if (heaps_inc > 0) {
  558. assign_heap_slot(objspace);
  559. heaps_inc--;
  560. return TRUE;
  561. }
  562. return FALSE;
  563. }
  564. VALUE
  565. rb_newobj(void)
  566. {
  567. rb_objspace_t *objspace = &rb_objspace;
  568. VALUE obj;
  569. if (UNLIKELY(during_gc)) {
  570. dont_gc = 1;
  571. during_gc = 0;
  572. rb_bug("object allocation during garbage collection phase");
  573. }
  574. if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
  575. if (!garbage_collect(objspace)) {
  576. during_gc = 0;
  577. rb_memerror();
  578. }
  579. }
  580. if (UNLIKELY(!has_free_object)) {
  581. if (!gc_lazy_sweep(objspace)) {
  582. during_gc = 0;
  583. rb_memerror();
  584. }
  585. }
  586. obj = (VALUE)objspace->heap.free_slots->freelist;
  587. objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
  588. if (objspace->heap.free_slots->freelist == NULL) {
  589. unlink_free_heap_slot(objspace, objspace->heap.free_slots);
  590. }
  591. MEMZERO((void*)obj, RVALUE, 1);
  592. #ifdef GC_DEBUG
  593. RANY(obj)->file = rb_sourcefile();
  594. RANY(obj)->line = rb_sourceline();
  595. #endif
  596. gc_prof_inc_live_num(objspace);
  597. return obj;
  598. }
  599. NODE*
  600. rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
  601. {
  602. NODE *n = (NODE*)rb_newobj();
  603. n->flags |= T_NODE;
  604. nd_set_type(n, type);
  605. n->u1.value = a0;
  606. n->u2.value = a1;
  607. n->u3.value = a2;
  608. return n;
  609. }
  610. VALUE
  611. rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
  612. {
  613. NEWOBJ(data, struct RData);
  614. if (klass) Check_Type(klass, T_CLASS);
  615. OBJSETUP(data, klass, T_DATA);
  616. data->data = datap;
  617. data->dfree = dfree;
  618. data->dmark = dmark;
  619. return (VALUE)data;
  620. }
  621. VALUE
  622. rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
  623. {
  624. NEWOBJ(data, struct RTypedData);
  625. if (klass) Check_Type(klass, T_CLASS);
  626. OBJSETUP(data, klass, T_DATA);
  627. data->data = datap;
  628. data->typed_flag = 1;
  629. data->type = type;
  630. return (VALUE)data;
  631. }
  632. size_t
  633. rb_objspace_data_type_memsize(VALUE obj)
  634. {
  635. if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
  636. return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
  637. }
  638. else {
  639. return 0;
  640. }
  641. }
  642. const char *
  643. rb_objspace_data_type_name(VALUE obj)
  644. {
  645. if (RTYPEDDATA_P(obj)) {
  646. return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
  647. }
  648. else {
  649. return 0;
  650. }
  651. }
  652. static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
  653. static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
  654. static inline int
  655. is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
  656. {
  657. register RVALUE *p = RANY(ptr);
  658. register struct sorted_heaps_slot *heap;
  659. register size_t hi, lo, mid;
  660. if (p < lomem || p > himem) return FALSE;
  661. if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
  662. /* check if p looks like a pointer using bsearch*/
  663. lo = 0;
  664. hi = heaps_used;
  665. while (lo < hi) {
  666. mid = (lo + hi) / 2;
  667. heap = &objspace->heap.sorted[mid];
  668. if (heap->start <= p) {
  669. if (p < heap->end)
  670. return TRUE;
  671. lo = mid + 1;
  672. }
  673. else {
  674. hi = mid;
  675. }
  676. }
  677. return FALSE;
  678. }
  679. static int
  680. free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
  681. {
  682. if (!me->mark) {
  683. rb_free_method_entry(me);
  684. }
  685. return ST_CONTINUE;
  686. }
  687. void
  688. rb_free_m_table(st_table *tbl)
  689. {
  690. st_foreach(tbl, free_method_entry_i, 0);
  691. st_free_table(tbl);
  692. }
  693. static int
  694. free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
  695. {
  696. xfree(ce);
  697. return ST_CONTINUE;
  698. }
  699. void
  700. rb_free_const_table(st_table *tbl)
  701. {
  702. st_foreach(tbl, free_const_entry_i, 0);
  703. st_free_table(tbl);
  704. }
  705. static int obj_free(rb_objspace_t *, VALUE);
  706. static inline struct heaps_slot *
  707. add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
  708. {
  709. struct heaps_slot *slot;
  710. VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  711. p->as.free.flags = 0;
  712. slot = GET_HEAP_SLOT(p);
  713. p->as.free.next = slot->freelist;
  714. slot->freelist = p;
  715. return slot;
  716. }
  717. static void
  718. unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
  719. {
  720. if (slot->prev)
  721. slot->prev->next = slot->next;
  722. if (slot->next)
  723. slot->next->prev = slot->prev;
  724. if (heaps == slot)
  725. heaps = slot->next;
  726. if (objspace->heap.sweep_slots == slot)
  727. objspace->heap.sweep_slots = slot->next;
  728. slot->prev = NULL;
  729. slot->next = NULL;
  730. }
  731. static void
  732. free_unused_heaps(rb_objspace_t *objspace)
  733. {
  734. size_t i, j;
  735. RVALUE *last = 0;
  736. for (i = j = 1; j < heaps_used; i++) {
  737. if (objspace->heap.sorted[i].slot->limit == 0) {
  738. struct heaps_slot* h = objspace->heap.sorted[i].slot;
  739. ((struct heaps_free_bitmap *)(h->bits))->next =
  740. objspace->heap.free_bitmap;
  741. objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
  742. if (!last) {
  743. last = objspace->heap.sorted[i].slot->membase;
  744. }
  745. else {
  746. aligned_free(objspace->heap.sorted[i].slot->membase);
  747. }
  748. free(objspace->heap.sorted[i].slot);
  749. heaps_used--;
  750. }
  751. else {
  752. if (i != j) {
  753. objspace->heap.sorted[j] = objspace->heap.sorted[i];
  754. }
  755. j++;
  756. }
  757. }
  758. if (last) {
  759. if (last < heaps_freed) {
  760. aligned_free(heaps_freed);
  761. heaps_freed = last;
  762. }
  763. else {
  764. aligned_free(last);
  765. }
  766. }
  767. }
  768. static inline void
  769. make_deferred(RVALUE *p)
  770. {
  771. p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
  772. }
  773. static inline void
  774. make_io_deferred(RVALUE *p)
  775. {
  776. rb_io_t *fptr = p->as.file.fptr;
  777. make_deferred(p);
  778. p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
  779. p->as.data.data = fptr;
  780. }
  781. static int
  782. obj_free(rb_objspace_t *objspace, VALUE obj)
  783. {
  784. switch (BUILTIN_TYPE(obj)) {
  785. case T_NIL:
  786. case T_FIXNUM:
  787. case T_TRUE:
  788. case T_FALSE:
  789. rb_bug("obj_free() called for broken object");
  790. break;
  791. }
  792. if (FL_TEST(obj, FL_EXIVAR)) {
  793. rb_free_generic_ivar((VALUE)obj);
  794. FL_UNSET(obj, FL_EXIVAR);
  795. }
  796. switch (BUILTIN_TYPE(obj)) {
  797. case T_OBJECT:
  798. if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
  799. RANY(obj)->as.object.as.heap.ivptr) {
  800. xfree(RANY(obj)->as.object.as.heap.ivptr);
  801. }
  802. break;
  803. case T_MODULE:
  804. case T_CLASS:
  805. rb_clear_cache_by_class((VALUE)obj);
  806. if (RCLASS_M_TBL(obj)) {
  807. rb_free_m_table(RCLASS_M_TBL(obj));
  808. }
  809. if (RCLASS_IV_TBL(obj)) {
  810. st_free_table(RCLASS_IV_TBL(obj));
  811. }
  812. if (RCLASS_CONST_TBL(obj)) {
  813. rb_free_const_table(RCLASS_CONST_TBL(obj));
  814. }
  815. if (RCLASS_IV_INDEX_TBL(obj)) {
  816. st_free_table(RCLASS_IV_INDEX_TBL(obj));
  817. }
  818. xfree(RANY(obj)->as.klass.ptr);
  819. break;
  820. case T_STRING:
  821. rb_str_free(obj);
  822. break;
  823. case T_ARRAY:
  824. rb_ary_free(obj);
  825. break;
  826. case T_HASH:
  827. if (RANY(obj)->as.hash.ntbl) {
  828. st_free_table(RANY(obj)->as.hash.ntbl);
  829. }
  830. break;
  831. case T_REGEXP:
  832. if (RANY(obj)->as.regexp.ptr) {
  833. onig_free(RANY(obj)->as.regexp.ptr);
  834. }
  835. break;
  836. case T_DATA:
  837. if (DATA_PTR(obj)) {
  838. if (RTYPEDDATA_P(obj)) {
  839. RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
  840. }
  841. if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  842. xfree(DATA_PTR(obj));
  843. }
  844. else if (RANY(obj)->as.data.dfree) {
  845. make_deferred(RANY(obj));
  846. return 1;
  847. }
  848. }
  849. break;
  850. case T_MATCH:
  851. if (RANY(obj)->as.match.rmatch) {
  852. struct rmatch *rm = RANY(obj)->as.match.rmatch;
  853. onig_region_free(&rm->regs, 0);
  854. if (rm->char_offset)
  855. xfree(rm->char_offset);
  856. xfree(rm);
  857. }
  858. break;
  859. case T_FILE:
  860. if (RANY(obj)->as.file.fptr) {
  861. make_io_deferred(RANY(obj));
  862. return 1;
  863. }
  864. break;
  865. case T_RATIONAL:
  866. case T_COMPLEX:
  867. break;
  868. case T_ICLASS:
  869. /* iClass shares table with the module */
  870. xfree(RANY(obj)->as.klass.ptr);
  871. break;
  872. case T_FLOAT:
  873. break;
  874. case T_BIGNUM:
  875. if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
  876. xfree(RBIGNUM_DIGITS(obj));
  877. }
  878. break;
  879. case T_NODE:
  880. switch (nd_type(obj)) {
  881. case NODE_SCOPE:
  882. if (RANY(obj)->as.node.u1.tbl) {
  883. xfree(RANY(obj)->as.node.u1.tbl);
  884. }
  885. break;
  886. case NODE_ARGS:
  887. if (RANY(obj)->as.node.u3.args) {
  888. xfree(RANY(obj)->as.node.u3.args);
  889. }
  890. break;
  891. case NODE_ALLOCA:
  892. xfree(RANY(obj)->as.node.u1.node);
  893. break;
  894. }
  895. break; /* no need to free iv_tbl */
  896. case T_STRUCT:
  897. if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
  898. RANY(obj)->as.rstruct.as.heap.ptr) {
  899. xfree(RANY(obj)->as.rstruct.as.heap.ptr);
  900. }
  901. break;
  902. default:
  903. rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
  904. BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
  905. }
  906. return 0;
  907. }
  908. void
  909. Init_heap(void)
  910. {
  911. init_heap(&rb_objspace);
  912. }
  913. typedef int each_obj_callback(void *, void *, size_t, void *);
  914. struct each_obj_args {
  915. each_obj_callback *callback;
  916. void *data;
  917. };
  918. static VALUE
  919. objspace_each_objects(VALUE arg)
  920. {
  921. size_t i;
  922. RVALUE *membase = 0;
  923. RVALUE *pstart, *pend;
  924. rb_objspace_t *objspace = &rb_objspace;
  925. struct each_obj_args *args = (struct each_obj_args *)arg;
  926. volatile VALUE v;
  927. i = 0;
  928. while (i < heaps_used) {
  929. while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
  930. i--;
  931. while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
  932. i++;
  933. if (heaps_used <= i)
  934. break;
  935. membase = objspace->heap.sorted[i].slot->membase;
  936. pstart = objspace->heap.sorted[i].slot->slot;
  937. pend = pstart + objspace->heap.sorted[i].slot->limit;
  938. for (; pstart != pend; pstart++) {
  939. if (pstart->as.basic.flags) {
  940. v = (VALUE)pstart; /* acquire to save this object */
  941. break;
  942. }
  943. }
  944. if (pstart != pend) {
  945. if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
  946. break;
  947. }
  948. }
  949. }
  950. RB_GC_GUARD(v);
  951. return Qnil;
  952. }
  953. /*
  954. * rb_objspace_each_objects() is special C API to walk through
  955. * Ruby object space. This C API is too difficult to use it.
  956. * To be frank, you should not use it. Or you need to read the
  957. * source code of this function and understand what this function does.
  958. *
  959. * 'callback' will be called several times (the number of heap slot,
  960. * at current implementation) with:
  961. * vstart: a pointer to the first living object of the heap_slot.
  962. * vend: a pointer to next to the valid heap_slot area.
  963. * stride: a distance to next VALUE.
  964. *
  965. * If callback() returns non-zero, the iteration will be stopped.
  966. *
  967. * This is a sample callback code to iterate liveness objects:
  968. *
  969. * int
  970. * sample_callback(void *vstart, void *vend, int stride, void *data) {
  971. * VALUE v = (VALUE)vstart;
  972. * for (; v != (VALUE)vend; v += stride) {
  973. * if (RBASIC(v)->flags) { // liveness check
  974. * // do something with live object 'v'
  975. * }
  976. * return 0; // continue to iteration
  977. * }
  978. *
  979. * Note: 'vstart' is not a top of heap_slot. This point the first
  980. * living object to grasp at least one object to avoid GC issue.
  981. * This means that you can not walk through all Ruby object slot
  982. * including freed object slot.
  983. *
  984. * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
  985. * However, there are possibilities to pass variable values with
  986. * 'stride' with some reasons. You must use stride instead of
  987. * use some constant value in the iteration.
  988. */
  989. void
  990. rb_objspace_each_objects(each_obj_callback *callback, void *data)
  991. {
  992. struct each_obj_args args;
  993. rb_objspace_t *objspace = &rb_objspace;
  994. rest_sweep(objspace);
  995. objspace->flags.dont_lazy_sweep = TRUE;
  996. args.callback = callback;
  997. args.data = data;
  998. rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
  999. }
  1000. struct os_each_struct {
  1001. size_t num;
  1002. VALUE of;
  1003. };
  1004. static int
  1005. internal_object_p(VALUE obj)
  1006. {
  1007. RVALUE *p = (RVALUE *)obj;
  1008. if (p->as.basic.flags) {
  1009. switch (BUILTIN_TYPE(p)) {
  1010. case T_NONE:
  1011. case T_ICLASS:
  1012. case T_NODE:
  1013. case T_ZOMBIE:
  1014. break;
  1015. case T_CLASS:
  1016. if (FL_TEST(p, FL_SINGLETON))
  1017. break;
  1018. default:
  1019. if (!p->as.basic.klass) break;
  1020. return 0;
  1021. }
  1022. }
  1023. return 1;
  1024. }
  1025. static int
  1026. os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
  1027. {
  1028. struct os_each_struct *oes = (struct os_each_struct *)data;
  1029. RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
  1030. for (; p != pend; p++) {
  1031. volatile VALUE v = (VALUE)p;
  1032. if (!internal_object_p(v)) {
  1033. if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
  1034. rb_yield(v);
  1035. oes->num++;
  1036. }
  1037. }
  1038. }
  1039. return 0;
  1040. }
  1041. static VALUE
  1042. os_obj_of(VALUE of)
  1043. {
  1044. struct os_each_struct oes;
  1045. oes.num = 0;
  1046. oes.of = of;
  1047. rb_objspace_each_objects(os_obj_of_i, &oes);
  1048. return SIZET2NUM(oes.num);
  1049. }
  1050. /*
  1051. * call-seq:
  1052. * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
  1053. * ObjectSpace.each_object([module]) -> an_enumerator
  1054. *
  1055. * Calls the block once for each living, nonimmediate object in this
  1056. * Ruby process. If <i>module</i> is specified, calls the block
  1057. * for only those classes or modules that match (or are a subclass of)
  1058. * <i>module</i>. Returns the number of objects found. Immediate
  1059. * objects (<code>Fixnum</code>s, <code>Symbol</code>s
  1060. * <code>true</code>, <code>false</code>, and <code>nil</code>) are
  1061. * never returned. In the example below, <code>each_object</code>
  1062. * returns both the numbers we defined and several constants defined in
  1063. * the <code>Math</code> module.
  1064. *
  1065. * If no block is given, an enumerator is returned instead.
  1066. *
  1067. * a = 102.7
  1068. * b = 95 # Won't be returned
  1069. * c = 12345678987654321
  1070. * count = ObjectSpace.each_object(Numeric) {|x| p x }
  1071. * puts "Total count: #{count}"
  1072. *
  1073. * <em>produces:</em>
  1074. *
  1075. * 12345678987654321
  1076. * 102.7
  1077. * 2.71828182845905
  1078. * 3.14159265358979
  1079. * 2.22044604925031e-16
  1080. * 1.7976931348623157e+308
  1081. * 2.2250738585072e-308
  1082. * Total count: 7
  1083. *
  1084. */
  1085. static VALUE
  1086. os_each_obj(int argc, VALUE *argv, VALUE os)
  1087. {
  1088. VALUE of;
  1089. rb_secure(4);
  1090. if (argc == 0) {
  1091. of = 0;
  1092. }
  1093. else {
  1094. rb_scan_args(argc, argv, "01", &of);
  1095. }
  1096. RETURN_ENUMERATOR(os, 1, &of);
  1097. return os_obj_of(of);
  1098. }
  1099. /*
  1100. * call-seq:
  1101. * ObjectSpace.undefine_finalizer(obj)
  1102. *
  1103. * Removes all finalizers for <i>obj</i>.
  1104. *
  1105. */
  1106. static VALUE
  1107. undefine_final(VALUE os, VALUE obj)
  1108. {
  1109. return rb_undefine_final(obj);
  1110. }
  1111. VALUE
  1112. rb_undefine_final(VALUE obj)
  1113. {
  1114. rb_objspace_t *objspace = &rb_objspace;
  1115. st_data_t data = obj;
  1116. rb_check_frozen(obj);
  1117. st_delete(finalizer_table, &data, 0);
  1118. FL_UNSET(obj, FL_FINALIZE);
  1119. return obj;
  1120. }
  1121. /*
  1122. * call-seq:
  1123. * ObjectSpace.define_finalizer(obj, aProc=proc())
  1124. *
  1125. * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
  1126. * was destroyed.
  1127. *
  1128. */
  1129. static VALUE
  1130. define_final(int argc, VALUE *argv, VALUE os)
  1131. {
  1132. VALUE obj, block;
  1133. rb_scan_args(argc, argv, "11", &obj, &block);
  1134. rb_check_frozen(obj);
  1135. if (argc == 1) {
  1136. block = rb_block_proc();
  1137. }
  1138. else if (!rb_respond_to(block, rb_intern("call"))) {
  1139. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1140. rb_obj_classname(block));
  1141. }
  1142. return define_final0(obj, block);
  1143. }
  1144. static VALUE
  1145. define_final0(VALUE obj, VALUE block)
  1146. {
  1147. rb_objspace_t *objspace = &rb_objspace;
  1148. VALUE table;
  1149. st_data_t data;
  1150. if (!FL_ABLE(obj)) {
  1151. rb_raise(rb_eArgError, "cannot define finalizer for %s",
  1152. rb_obj_classname(obj));
  1153. }
  1154. RBASIC(obj)->flags |= FL_FINALIZE;
  1155. block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
  1156. OBJ_FREEZE(block);
  1157. if (st_lookup(finalizer_table, obj, &data)) {
  1158. table = (VALUE)data;
  1159. rb_ary_push(table, block);
  1160. }
  1161. else {
  1162. table = rb_ary_new3(1, block);
  1163. RBASIC(table)->klass = 0;
  1164. st_add_direct(finalizer_table, obj, table);
  1165. }
  1166. return block;
  1167. }
  1168. VALUE
  1169. rb_define_final(VALUE obj, VALUE block)
  1170. {
  1171. rb_check_frozen(obj);
  1172. if (!rb_respond_to(block, rb_intern("call"))) {
  1173. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1174. rb_obj_classname(block));
  1175. }
  1176. return define_final0(obj, block);
  1177. }
  1178. void
  1179. rb_gc_copy_finalizer(VALUE dest, VALUE obj)
  1180. {
  1181. rb_objspace_t *objspace = &rb_objspace;
  1182. VALUE table;
  1183. st_data_t data;
  1184. if (!FL_TEST(obj, FL_FINALIZE)) return;
  1185. if (st_lookup(finalizer_table, obj, &data)) {
  1186. table = (VALUE)data;
  1187. st_insert(finalizer_table, dest, table);
  1188. }
  1189. FL_SET(dest, FL_FINALIZE);
  1190. }
  1191. static VALUE
  1192. run_single_final(VALUE arg)
  1193. {
  1194. VALUE *args = (VALUE *)arg;
  1195. rb_eval_cmd(args[0], args[1], (int)args[2]);
  1196. return Qnil;
  1197. }
  1198. static void
  1199. run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
  1200. {
  1201. long i;
  1202. int status;
  1203. VALUE args[3];
  1204. VALUE objid = nonspecial_obj_id(obj);
  1205. if (RARRAY_LEN(table) > 0) {
  1206. args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
  1207. }
  1208. else {
  1209. args[1] = 0;
  1210. }
  1211. args[2] = (VALUE)rb_safe_level();
  1212. for (i=0; i<RARRAY_LEN(table); i++) {
  1213. VALUE final = RARRAY_PTR(table)[i];
  1214. args[0] = RARRAY_PTR(final)[1];
  1215. args[2] = FIX2INT(RARRAY_PTR(final)[0]);
  1216. status = 0;
  1217. rb_protect(run_single_final, (VALUE)args, &status);
  1218. if (status)
  1219. rb_set_errinfo(Qnil);
  1220. }
  1221. }
  1222. static void
  1223. run_final(rb_objspace_t *objspace, VALUE obj)
  1224. {
  1225. RUBY_DATA_FUNC free_func = 0;
  1226. st_data_t key, table;
  1227. objspace->heap.final_num--;
  1228. RBASIC(obj)->klass = 0;
  1229. if (RTYPEDDATA_P(obj)) {
  1230. free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
  1231. }
  1232. else {
  1233. free_func = RDATA(obj)->dfree;
  1234. }
  1235. if (free_func) {
  1236. (*free_func)(DATA_PTR(obj));
  1237. }
  1238. key = (st_data_t)obj;
  1239. if (st_delete(finalizer_table, &key, &table)) {
  1240. run_finalizer(objspace, obj, (VALUE)table);
  1241. }
  1242. }
  1243. static void
  1244. finalize_list(rb_objspace_t *objspace, RVALUE *p)
  1245. {
  1246. while (p) {
  1247. RVALUE *tmp = p->as.free.next;
  1248. run_final(objspace, (VALUE)p);
  1249. if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
  1250. add_slot_local_freelist(objspace, p);
  1251. if (!is_lazy_sweeping(objspace)) {
  1252. gc_prof_dec_live_num(objspace);
  1253. }
  1254. }
  1255. else {
  1256. struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
  1257. slot->limit--;
  1258. }
  1259. p = tmp;
  1260. }
  1261. }
  1262. static void
  1263. finalize_deferred(rb_objspace_t *objspace)
  1264. {
  1265. RVALUE *p = deferred_final_list;
  1266. deferred_final_list = 0;
  1267. if (p) {
  1268. finalize_list(objspace, p);
  1269. }
  1270. }
  1271. void
  1272. rb_gc_finalize_deferred(void)
  1273. {
  1274. rb_objspace_t *objspace = &rb_objspace;
  1275. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1276. finalize_deferred(objspace);
  1277. ATOMIC_SET(finalizing, 0);
  1278. }
  1279. static int
  1280. chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
  1281. {
  1282. RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
  1283. if ((p->as.basic.flags & FL_FINALIZE) == FL_FINALIZE &&
  1284. !MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
  1285. if (BUILTIN_TYPE(p) != T_ZOMBIE) {
  1286. p->as.free.flags = T_ZOMBIE;
  1287. RDATA(p)->dfree = 0;
  1288. }
  1289. p->as.free.next = *final_list;
  1290. *final_list = p;
  1291. }
  1292. return ST_CONTINUE;
  1293. }
  1294. struct force_finalize_list {
  1295. VALUE obj;
  1296. VALUE table;
  1297. struct force_finalize_list *next;
  1298. };
  1299. static int
  1300. force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
  1301. {
  1302. struct force_finalize_list **prev = (struct force_finalize_list **)arg;
  1303. struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
  1304. curr->obj = key;
  1305. curr->table = val;
  1306. curr->next = *prev;
  1307. *prev = curr;
  1308. return ST_CONTINUE;
  1309. }
  1310. void
  1311. rb_gc_call_finalizer_at_exit(void)
  1312. {
  1313. rb_objspace_call_finalizer(&rb_objspace);
  1314. }
  1315. static void
  1316. rb_objspace_call_finalizer(rb_objspace_t *objspace)
  1317. {
  1318. RVALUE *p, *pend;
  1319. RVALUE *final_list = 0;
  1320. size_t i;
  1321. rest_sweep(objspace);
  1322. if (ATOMIC_EXCHANGE(finalizing, 1)) return;
  1323. /* run finalizers */
  1324. do {
  1325. finalize_deferred(objspace);
  1326. /* mark reachable objects from finalizers */
  1327. /* They might be not referred from any place here */
  1328. mark_tbl(objspace, finalizer_table);
  1329. gc_mark_stacked_objects(objspace);
  1330. st_foreach(finalizer_table, chain_finalized_object,
  1331. (st_data_t)&deferred_final_list);
  1332. } while (deferred_final_list);
  1333. /* force to run finalizer */
  1334. while (finalizer_table->num_entries) {
  1335. struct force_finalize_list *list = 0;
  1336. st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
  1337. while (list) {
  1338. struct force_finalize_list *curr = list;
  1339. st_data_t obj = (st_data_t)curr->obj;
  1340. run_finalizer(objspace, curr->obj, curr->table);
  1341. st_delete(finalizer_table, &obj, 0);
  1342. list = curr->next;
  1343. xfree(curr);
  1344. }
  1345. }
  1346. /* finalizers are part of garbage collection */
  1347. during_gc++;
  1348. /* run data object's finalizers */
  1349. for (i = 0; i < heaps_used; i++) {
  1350. p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
  1351. while (p < pend) {
  1352. if (BUILTIN_TYPE(p) == T_DATA &&
  1353. DATA_PTR(p) && RANY(p)->as.data.dfree &&
  1354. !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
  1355. !rb_obj_is_fiber((VALUE)p)) {
  1356. p->as.free.flags = 0;
  1357. if (RTYPEDDATA_P(p)) {
  1358. RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
  1359. }
  1360. if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
  1361. xfree(DATA_PTR(p));
  1362. }
  1363. else if (RANY(p)->as.data.dfree) {
  1364. make_deferred(RANY(p));
  1365. RANY(p)->as.free.next = final_list;
  1366. final_list = p;
  1367. }
  1368. }
  1369. else if (BUILTIN_TYPE(p) == T_FILE) {
  1370. if (RANY(p)->as.file.fptr) {
  1371. make_io_deferred(RANY(p));
  1372. RANY(p)->as.free.next = final_list;
  1373. final_list = p;
  1374. }
  1375. }
  1376. p++;
  1377. }
  1378. }
  1379. during_gc = 0;
  1380. if (final_list) {
  1381. finalize_list(objspace, final_list);
  1382. }
  1383. st_free_table(finalizer_table);
  1384. finalizer_table = 0;
  1385. ATOMIC_SET(finalizing, 0);
  1386. }
  1387. static inline int
  1388. is_id_value(rb_objspace_t *objspace, VALUE ptr)
  1389. {
  1390. if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
  1391. if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
  1392. if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
  1393. return TRUE;
  1394. }
  1395. static inline int
  1396. is_dead_object(rb_objspace_t *objspace, VALUE ptr)
  1397. {
  1398. struct heaps_slot *slot = objspace->heap.sweep_slots;
  1399. if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
  1400. return FALSE;
  1401. while (slot) {
  1402. if ((VALUE)slot->slot <= ptr && ptr < (VALUE)(slot->slot + slot->limit))
  1403. return TRUE;
  1404. slot = slot->next;
  1405. }
  1406. return FALSE;
  1407. }
  1408. static inline int
  1409. is_live_object(rb_objspace_t *objspace, VALUE ptr)
  1410. {
  1411. if (BUILTIN_TYPE(ptr) == 0) return FALSE;
  1412. if (RBASIC(ptr)->klass == 0) return FALSE;
  1413. if (is_dead_object(objspace, ptr)) return FALSE;
  1414. return TRUE;
  1415. }
  1416. /*
  1417. * call-seq:
  1418. * ObjectSpace._id2ref(object_id) -> an_object
  1419. *
  1420. * Converts an object id to a reference to the object. May not be
  1421. * called on an object id passed as a parameter to a finalizer.
  1422. *
  1423. * s = "I am a string" #=> "I am a string"
  1424. * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
  1425. * r == s #=> true
  1426. *
  1427. */
  1428. static VALUE
  1429. id2ref(VALUE obj, VALUE objid)
  1430. {
  1431. #if SIZEOF_LONG == SIZEOF_VOIDP
  1432. #define NUM2PTR(x) NUM2ULONG(x)
  1433. #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
  1434. #define NUM2PTR(x) NUM2ULL(x)
  1435. #endif
  1436. rb_objspace_t *objspace = &rb_objspace;
  1437. VALUE ptr;
  1438. void *p0;
  1439. rb_secure(4);
  1440. ptr = NUM2PTR(objid);
  1441. p0 = (void *)ptr;
  1442. if (ptr == Qtrue) return Qtrue;
  1443. if (ptr == Qfalse) return Qfalse;
  1444. if (ptr == Qnil) return Qnil;
  1445. if (FIXNUM_P(ptr)) return (VALUE)ptr;
  1446. if (FLONUM_P(ptr)) return (VALUE)ptr;
  1447. ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
  1448. if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
  1449. ID symid = ptr / sizeof(RVALUE);
  1450. if (rb_id2name(symid) == 0)
  1451. rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
  1452. return ID2SYM(symid);
  1453. }
  1454. if (!is_id_value(objspace, ptr)) {
  1455. rb_raise(rb_eRangeError, "%p is not id value", p0);
  1456. }
  1457. if (!is_live_object(objspace, ptr)) {
  1458. rb_raise(rb_eRangeError, "%p is recycled object", p0);
  1459. }
  1460. return (VALUE)ptr;
  1461. }
  1462. /*
  1463. * Document-method: __id__
  1464. * Document-method: object_id
  1465. *
  1466. * call-seq:
  1467. * obj.__id__ -> fixnum
  1468. * obj.object_id -> fixnum
  1469. *
  1470. * Returns an integer identifier for <i>obj</i>. The same number will
  1471. * be returned on all calls to <code>id</code> for a given object, and
  1472. * no two active objects will share an id.
  1473. * <code>Object#object_id</code> is a different concept from the
  1474. * <code>:name</code> notation, which returns the symbol id of
  1475. * <code>name</code>. Replaces the deprecated <code>Object#id</code>.
  1476. */
  1477. /*
  1478. * call-seq:
  1479. * obj.hash -> fixnum
  1480. *
  1481. * Generates a <code>Fixnum</code> hash value for this object. This
  1482. * function must have the property that <code>a.eql?(b)</code> implies
  1483. * <code>a.hash == b.hash</code>. The hash value is used by class
  1484. * <code>Hash</code>. Any hash value that exceeds the capacity of a
  1485. * <code>Fixnum</code> will be truncated before being used.
  1486. */
  1487. VALUE
  1488. rb_obj_id(VALUE obj)
  1489. {
  1490. /*
  1491. * 32-bit VALUE space
  1492. * MSB ------------------------ LSB
  1493. * false 00000000000000000000000000000000
  1494. * true 00000000000000000000000000000010
  1495. * nil 00000000000000000000000000000100
  1496. * undef 00000000000000000000000000000110
  1497. * symbol ssssssssssssssssssssssss00001110
  1498. * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
  1499. * fixnum fffffffffffffffffffffffffffffff1
  1500. *
  1501. * object_id space
  1502. * LSB
  1503. * false 00000000000000000000000000000000
  1504. * true 00000000000000000000000000000010
  1505. * nil 00000000000000000000000000000100
  1506. * undef 00000000000000000000000000000110
  1507. * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
  1508. * object oooooooooooooooooooooooooooooo0 o...o % A = 0
  1509. * fixnum fffffffffffffffffffffffffffffff1 bignum if required
  1510. *
  1511. * where A = sizeof(RVALUE)/4
  1512. *
  1513. * sizeof(RVALUE) is
  1514. * 20 if 32-bit, double is 4-byte aligned
  1515. * 24 if 32-bit, double is 8-byte aligned
  1516. * 40 if 64-bit
  1517. */
  1518. if (SYMBOL_P(obj)) {
  1519. return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
  1520. }
  1521. else if (FLONUM_P(obj)) {
  1522. #if SIZEOF_LONG == SIZEOF_VOIDP
  1523. return LONG2NUM((SIGNED_VALUE)obj);
  1524. #else
  1525. return LL2NUM((SIGNED_VALUE)obj);
  1526. #endif
  1527. }
  1528. else if (SPECIAL_CONST_P(obj)) {
  1529. return LONG2NUM((SIGNED_VALUE)obj);
  1530. }
  1531. return nonspecial_obj_id(obj);
  1532. }
  1533. static int
  1534. set_zero(st_data_t key, st_data_t val, st_data_t arg)
  1535. {
  1536. VALUE k = (VALUE)key;
  1537. VALUE hash = (VALUE)arg;
  1538. rb_hash_aset(hash, k, INT2FIX(0));
  1539. return ST_CONTINUE;
  1540. }
  1541. /*
  1542. * call-seq:
  1543. * ObjectSpace.count_objects([result_hash]) -> hash
  1544. *
  1545. * Counts objects for each type.
  1546. *
  1547. * It returns a hash as:
  1548. * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
  1549. *
  1550. * If the optional argument, result_hash, is given,
  1551. * it is overwritten and returned.
  1552. * This is intended to avoid probe effect.
  1553. *
  1554. * The contents of the returned hash is implementation defined.
  1555. * It may be changed in future.
  1556. *
  1557. * This method is not expected to work except C Ruby.
  1558. *
  1559. */
  1560. static VALUE
  1561. count_objects(int argc, VALUE *argv, VALUE os)
  1562. {
  1563. rb_objspace_t *objspace = &rb_objspace;
  1564. size_t counts[T_MASK+1];
  1565. size_t freed = 0;
  1566. size_t total = 0;
  1567. size_t i;
  1568. VALUE hash;
  1569. if (rb_scan_args(argc, argv, "01", &hash) == 1) {
  1570. if (!RB_TYPE_P(hash, T_HASH))
  1571. rb_raise(rb_eTypeError, "non-hash given");
  1572. }
  1573. for (i = 0; i <= T_MASK; i++) {
  1574. counts[i] = 0;
  1575. }
  1576. for (i = 0; i < heaps_used; i++) {
  1577. RVALUE *p, *pend;
  1578. p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
  1579. for (;p < pend; p++) {
  1580. if (p->as.basic.flags) {
  1581. counts[BUILTIN_TYPE(p)]++;
  1582. }
  1583. else {
  1584. freed++;
  1585. }
  1586. }
  1587. total += objspace->heap.sorted[i].slot->limit;
  1588. }
  1589. if (hash == Qnil) {
  1590. hash = rb_hash_new();
  1591. }
  1592. else if (!RHASH_EMPTY_P(hash)) {
  1593. st_foreach(RHASH_TBL(hash), set_zero, hash);
  1594. }
  1595. rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
  1596. rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
  1597. for (i = 0; i <= T_MASK; i++) {
  1598. VALUE type;
  1599. switch (i) {
  1600. #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
  1601. COUNT_TYPE(T_NONE);
  1602. COUNT_TYPE(T_OBJECT);
  1603. COUNT_TYPE(T_CLASS);
  1604. COUNT_TYPE(T_MODULE);
  1605. COUNT_TYPE(T_FLOAT);
  1606. COUNT_TYPE(T_STRING);
  1607. COUNT_TYPE(T_REGEXP);
  1608. COUNT_TYPE(T_ARRAY);
  1609. COUNT_TYPE(T_HASH);
  1610. COUNT_TYPE(T_STRUCT);
  1611. COUNT_TYPE(T_BIGNUM);
  1612. COUNT_TYPE(T_FILE);
  1613. COUNT_TYPE(T_DATA);
  1614. COUNT_TYPE(T_MATCH);
  1615. COUNT_TYPE(T_COMPLEX);
  1616. COUNT_TYPE(T_RATIONAL);
  1617. COUNT_TYPE(T_NIL);
  1618. COUNT_TYPE(T_TRUE);
  1619. COUNT_TYPE(T_FALSE);
  1620. COUNT_TYPE(T_SYMBOL);
  1621. COUNT_TYPE(T_FIXNUM);
  1622. COUNT_TYPE(T_UNDEF);
  1623. COUNT_TYPE(T_NODE);
  1624. COUNT_TYPE(T_ICLASS);
  1625. COUNT_TYPE(T_ZOMBIE);
  1626. #undef COUNT_TYPE
  1627. default: type = INT2NUM(i); break;
  1628. }
  1629. if (counts[i])
  1630. rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
  1631. }
  1632. return hash;
  1633. }
  1634. /*
  1635. ------------------------ Garbage Collection ------------------------
  1636. */
  1637. /* Sweeping */
  1638. static VALUE
  1639. lazy_sweep_enable(void)
  1640. {
  1641. rb_objspace_t *objspace = &rb_objspace;
  1642. objspace->flags.dont_lazy_sweep = FALSE;
  1643. return Qnil;
  1644. }
  1645. static void
  1646. gc_clear_slot_bits(struct heaps_slot *slot)
  1647. {
  1648. memset(GET_HEAP_BITMAP(slot->slot), 0,
  1649. HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
  1650. }
  1651. static void
  1652. slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
  1653. {
  1654. size_t free_num = 0, final_num = 0;
  1655. RVALUE *p, *pend;
  1656. RVALUE *final = deferred_final_list;
  1657. int deferred;
  1658. uintptr_t *bits;
  1659. p = sweep_slot->slot; pend = p + sweep_slot->limit;
  1660. bits = GET_HEAP_BITMAP(p);
  1661. while (p < pend) {
  1662. if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
  1663. if (p->as.basic.flags) {
  1664. if ((deferred = obj_free(objspace, (VALUE)p)) ||
  1665. (FL_TEST(p, FL_FINALIZE))) {
  1666. if (!deferred) {
  1667. p->as.free.flags = T_ZOMBIE;
  1668. RDATA(p)->dfree = 0;
  1669. }
  1670. p->as.free.next = deferred_final_list;
  1671. deferred_final_list = p;
  1672. assert(BUILTIN_TYPE(p) == T_ZOMBIE);
  1673. final_num++;
  1674. }
  1675. else {
  1676. VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  1677. p->as.free.flags = 0;
  1678. p->as.free.next = sweep_slot->freelist;
  1679. sweep_slot->freelist = p;
  1680. free_num++;
  1681. }
  1682. }
  1683. else {
  1684. free_num++;
  1685. }
  1686. }
  1687. p++;
  1688. }
  1689. gc_clear_slot_bits(sweep_slot);
  1690. if (final_num + free_num == sweep_slot->limit &&
  1691. objspace->heap.free_num > objspace->heap.do_heap_free) {
  1692. RVALUE *pp;
  1693. for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
  1694. RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
  1695. pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  1696. }
  1697. sweep_slot->limit = final_num;
  1698. unlink_heap_slot(objspace, sweep_slot);
  1699. }
  1700. else {
  1701. if (free_num > 0) {
  1702. link_free_heap_slot(objspace, sweep_slot);
  1703. }
  1704. else {
  1705. sweep_slot->free_next = NULL;
  1706. }
  1707. objspace->heap.free_num += free_num;
  1708. }
  1709. objspace->heap.final_num += final_num;
  1710. if (deferred_final_list && !finalizing) {
  1711. rb_thread_t *th = GET_THREAD();
  1712. if (th) {
  1713. RUBY_VM_SET_FINALIZER_INTERRUPT(th);
  1714. }
  1715. }
  1716. }
  1717. static int
  1718. ready_to_gc(rb_objspace_t *objspace)
  1719. {
  1720. if (dont_gc || during_gc) {
  1721. if (!has_free_object) {
  1722. if (!heaps_increment(objspace)) {
  1723. set_heaps_increment(objspace);
  1724. heaps_increment(objspace);
  1725. }
  1726. }
  1727. return FALSE;
  1728. }
  1729. return TRUE;
  1730. }
  1731. static void
  1732. before_gc_sweep(rb_objspace_t *objspace)
  1733. {
  1734. objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
  1735. objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
  1736. if (objspace->heap.free_min < initial_free_min) {
  1737. objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  1738. objspace->heap.free_min = initial_free_min;
  1739. }
  1740. objspace->heap.sweep_slots = heaps;
  1741. objspace->heap.free_num = 0;
  1742. objspace->heap.free_slots = NULL;
  1743. /* sweep unlinked method entries */
  1744. if (GET_VM()->unlinked_method_entry_list) {
  1745. rb_sweep_method_entry(GET_VM());
  1746. }
  1747. }
  1748. static void
  1749. after_gc_sweep(rb_objspace_t *objspace)
  1750. {
  1751. size_t inc;
  1752. gc_prof_set_malloc_info(objspace);
  1753. if (objspace->heap.free_num < objspace->heap.free_min) {
  1754. set_heaps_increment(objspace);
  1755. heaps_increment(objspace);
  1756. }
  1757. inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
  1758. if (inc > malloc_limit) {
  1759. malloc_limit += (size_t)((inc - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));

Large files files are truncated, but you can click here to view the full file