PageRenderTime 58ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/ext/gc_bmp/gc_bmp.c

https://github.com/wanabe/ruby
C | 2770 lines | 2077 code | 328 blank | 365 comment | 352 complexity | 23063b91df185ea6a7aa4b504a122e24 MD5 | raw file
Possible License(s): LGPL-2.1, AGPL-3.0, 0BSD, Unlicense, GPL-2.0, BSD-3-Clause

Large files files are truncated, but you can click here to view the full file

  1. /**********************************************************************
  2. gc_bmp.c -
  3. $Author$
  4. created at: Tue Oct 5 09:44:46 JST 1993
  5. Copyright (C) 1993-2007 Yukihiro Matsumoto
  6. Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
  7. Copyright (C) 2000 Information-technology Promotion Agency, Japan
  8. **********************************************************************/
  9. #include "ruby.h"
  10. #include "ruby/re.h"
  11. #include "ruby/io.h"
  12. #include <stdio.h>
  13. #include <setjmp.h>
  14. #include <sys/types.h>
  15. #ifndef FALSE
  16. # define FALSE 0
  17. #elif FALSE
  18. # error FALSE must be false
  19. #endif
  20. #ifndef TRUE
  21. # define TRUE 1
  22. #elif !TRUE
  23. # error TRUE must be true
  24. #endif
  25. #ifdef HAVE_SYS_TIME_H
  26. #include <sys/time.h>
  27. #endif
  28. #ifdef HAVE_SYS_RESOURCE_H
  29. #include <sys/resource.h>
  30. #endif
  31. #if defined _WIN32 || defined __CYGWIN__
  32. #include <windows.h>
  33. #endif
  34. #ifdef HAVE_VALGRIND_MEMCHECK_H
  35. # include <valgrind/memcheck.h>
  36. # ifndef VALGRIND_MAKE_MEM_DEFINED
  37. # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
  38. # endif
  39. # ifndef VALGRIND_MAKE_MEM_UNDEFINED
  40. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
  41. # endif
  42. #else
  43. # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
  44. # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
  45. #endif
  46. int rb_io_fptr_finalize(struct rb_io_t*);
  47. #define rb_setjmp(env) RUBY_SETJMP(env)
  48. #define rb_jmp_buf rb_jmpbuf_t
  49. /* Make alloca work the best possible way. */
  50. #ifdef __GNUC__
  51. # ifndef atarist
  52. # ifndef alloca
  53. # define alloca __builtin_alloca
  54. # endif
  55. # endif /* atarist */
  56. #else
  57. # ifdef HAVE_ALLOCA_H
  58. # include <alloca.h>
  59. # else
  60. # ifdef _AIX
  61. #pragma alloca
  62. # else
  63. # ifndef alloca /* predefined by HP cc +Olibcalls */
  64. void *alloca ();
  65. # endif
  66. # endif /* AIX */
  67. # endif /* HAVE_ALLOCA_H */
  68. #endif /* __GNUC__ */
  69. #ifndef GC_MALLOC_LIMIT
  70. #define GC_MALLOC_LIMIT 8000000
  71. #endif
  72. #define MARK_STACK_MAX 1024
  73. /* for GC profile */
  74. #define GC_PROFILE_MORE_DETAIL 1
  75. typedef struct gc_profile_record {
  76. double gc_time;
  77. double gc_mark_time;
  78. double gc_sweep_time;
  79. double gc_invoke_time;
  80. size_t heap_use_slots;
  81. size_t heap_live_objects;
  82. size_t heap_free_objects;
  83. size_t heap_total_objects;
  84. size_t heap_use_size;
  85. size_t heap_total_size;
  86. int have_finalize;
  87. size_t allocate_increase;
  88. size_t allocate_limit;
  89. } gc_profile_record;
  90. static double
  91. getrusage_time(void)
  92. {
  93. #ifdef RUSAGE_SELF
  94. struct rusage usage;
  95. struct timeval time;
  96. getrusage(RUSAGE_SELF, &usage);
  97. time = usage.ru_utime;
  98. return time.tv_sec + time.tv_usec * 1e-6;
  99. #elif defined _WIN32
  100. FILETIME creation_time, exit_time, kernel_time, user_time;
  101. ULARGE_INTEGER ui;
  102. LONG_LONG q;
  103. double t;
  104. if (GetProcessTimes(GetCurrentProcess(),
  105. &creation_time, &exit_time, &kernel_time, &user_time) == 0)
  106. {
  107. return 0.0;
  108. }
  109. memcpy(&ui, &user_time, sizeof(FILETIME));
  110. q = ui.QuadPart / 10L;
  111. t = (DWORD)(q % 1000000L) * 1e-6;
  112. q /= 1000000L;
  113. #ifdef __GNUC__
  114. t += q;
  115. #else
  116. t += (double)(DWORD)(q >> 16) * (1 << 16);
  117. t += (DWORD)q & ~(~0 << 16);
  118. #endif
  119. return t;
  120. #else
  121. return 0.0;
  122. #endif
  123. }
  124. #define GC_PROF_TIMER_START do {\
  125. if (objspace->profile.run) {\
  126. if (!objspace->profile.record) {\
  127. objspace->profile.size = 1000;\
  128. objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
  129. }\
  130. if (count >= objspace->profile.size) {\
  131. objspace->profile.size += 1000;\
  132. objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
  133. }\
  134. if (!objspace->profile.record) {\
  135. rb_bug("gc_profile malloc or realloc miss");\
  136. }\
  137. MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
  138. gc_time = getrusage_time();\
  139. objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
  140. }\
  141. } while(0)
  142. #define GC_PROF_TIMER_STOP do {\
  143. if (objspace->profile.run) {\
  144. gc_time = getrusage_time() - gc_time;\
  145. if (gc_time < 0) gc_time = 0;\
  146. objspace->profile.record[count].gc_time = gc_time;\
  147. objspace->profile.count++;\
  148. }\
  149. } while(0)
  150. #if GC_PROFILE_MORE_DETAIL
  151. #define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\
  152. size_t count = objspace->profile.count
  153. #define GC_PROF_MARK_TIMER_START do {\
  154. if (objspace->profile.run) {\
  155. mark_time = getrusage_time();\
  156. }\
  157. } while(0)
  158. #define GC_PROF_MARK_TIMER_STOP do {\
  159. if (objspace->profile.run) {\
  160. mark_time = getrusage_time() - mark_time;\
  161. if (mark_time < 0) mark_time = 0;\
  162. objspace->profile.record[count].gc_mark_time = mark_time;\
  163. }\
  164. } while(0)
  165. #define GC_PROF_SWEEP_TIMER_START do {\
  166. if (objspace->profile.run) {\
  167. sweep_time = getrusage_time();\
  168. }\
  169. } while(0)
  170. #define GC_PROF_SWEEP_TIMER_STOP do {\
  171. if (objspace->profile.run) {\
  172. sweep_time = getrusage_time() - sweep_time;\
  173. if (sweep_time < 0) sweep_time = 0;\
  174. objspace->profile.record[count].gc_sweep_time = sweep_time;\
  175. }\
  176. } while(0)
  177. #define GC_PROF_SET_MALLOC_INFO do {\
  178. if (objspace->profile.run) {\
  179. gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
  180. record->allocate_increase = malloc_increase;\
  181. record->allocate_limit = malloc_limit; \
  182. }\
  183. } while(0)
  184. #define GC_PROF_SET_HEAP_INFO do {\
  185. if (objspace->profile.run) {\
  186. gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
  187. record->heap_use_slots = heaps_used;\
  188. record->heap_live_objects = live;\
  189. record->heap_free_objects = freed; \
  190. record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
  191. record->have_finalize = final_list ? Qtrue : Qfalse;\
  192. record->heap_use_size = live * sizeof(RVALUE); \
  193. record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\
  194. }\
  195. } while(0)
  196. #else
  197. #define INIT_GC_PROF_PARAMS double gc_time = 0;\
  198. size_t count = objspace->profile.count
  199. #define GC_PROF_MARK_TIMER_START
  200. #define GC_PROF_MARK_TIMER_STOP
  201. #define GC_PROF_SWEEP_TIMER_START
  202. #define GC_PROF_SWEEP_TIMER_STOP
  203. #define GC_PROF_SET_MALLOC_INFO
  204. #define GC_PROF_SET_HEAP_INFO do {\
  205. if (objspace->profile.run) {\
  206. gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
  207. record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
  208. record->heap_use_size = live * sizeof(RVALUE); \
  209. record->heap_total_size = heaps_used * HEAP_SIZE;\
  210. }\
  211. } while(0)
  212. #endif
  213. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  214. #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
  215. #endif
  216. typedef struct RVALUE {
  217. union {
  218. struct {
  219. VALUE flags; /* always 0 for freed obj */
  220. struct RVALUE *next;
  221. } free;
  222. struct {
  223. VALUE flags;
  224. struct RVALUE *next;
  225. int *map;
  226. VALUE slot;
  227. int limit;
  228. } bitmap;
  229. struct RBasic basic;
  230. struct RObject object;
  231. struct RClass klass;
  232. struct RFloat flonum;
  233. struct RString string;
  234. struct RArray array;
  235. struct RRegexp regexp;
  236. struct RHash hash;
  237. struct RData data;
  238. struct RTypedData typeddata;
  239. struct RStruct rstruct;
  240. struct RBignum bignum;
  241. struct RFile file;
  242. struct RMatch match;
  243. struct RRational rational;
  244. struct RComplex complex;
  245. } as;
  246. #ifdef GC_DEBUG
  247. const char *file;
  248. int line;
  249. #endif
  250. } RVALUE;
  251. #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
  252. #pragma pack(pop)
  253. #endif
  254. struct heaps_slot {
  255. void *membase;
  256. RVALUE *slot;
  257. size_t limit;
  258. RVALUE *bitmap;
  259. };
  260. #define HEAP_MIN_SLOTS 10000
  261. #define FREE_MIN 4096
  262. struct gc_list {
  263. VALUE *varptr;
  264. struct gc_list *next;
  265. };
  266. #define CALC_EXACT_MALLOC_SIZE 0
  267. typedef struct rb_objspace {
  268. struct {
  269. size_t limit;
  270. size_t increase;
  271. #if CALC_EXACT_MALLOC_SIZE
  272. size_t allocated_size;
  273. size_t allocations;
  274. #endif
  275. } malloc_params;
  276. struct {
  277. size_t increment;
  278. struct heaps_slot *ptr;
  279. size_t length;
  280. size_t used;
  281. RVALUE *freelist;
  282. RVALUE *range[2];
  283. RVALUE *freed;
  284. } heap;
  285. struct {
  286. int dont_gc;
  287. int during_gc;
  288. } flags;
  289. struct {
  290. st_table *table;
  291. RVALUE *deferred;
  292. } final;
  293. struct {
  294. VALUE buffer[MARK_STACK_MAX];
  295. VALUE *ptr;
  296. int overflow;
  297. } markstack;
  298. struct {
  299. int run;
  300. gc_profile_record *record;
  301. size_t count;
  302. size_t size;
  303. double invoke_time;
  304. } profile;
  305. struct gc_list *global_list;
  306. unsigned int count;
  307. int gc_stress;
  308. struct {
  309. RVALUE *freed_bitmap;
  310. } ext_heap;
  311. } rb_objspace_t;
  312. #define malloc_limit objspace->malloc_params.limit
  313. #define malloc_increase objspace->malloc_params.increase
  314. #define heap_slots objspace->heap.slots
  315. #define heaps objspace->heap.ptr
  316. #define heaps_length objspace->heap.length
  317. #define heaps_used objspace->heap.used
  318. #define freelist objspace->heap.freelist
  319. #define lomem objspace->heap.range[0]
  320. #define himem objspace->heap.range[1]
  321. #define heaps_inc objspace->heap.increment
  322. #define heaps_freed objspace->heap.freed
  323. #define dont_gc objspace->flags.dont_gc
  324. #define during_gc objspace->flags.during_gc
  325. #define finalizer_table objspace->final.table
  326. #define deferred_final_list objspace->final.deferred
  327. #define mark_stack objspace->markstack.buffer
  328. #define mark_stack_ptr objspace->markstack.ptr
  329. #define mark_stack_overflow objspace->markstack.overflow
  330. #define global_List objspace->global_list
  331. #define ruby_gc_stress objspace->gc_stress
  332. #define need_call_final (finalizer_table && finalizer_table->num_entries)
  333. static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
  334. #include "ruby/gc_ext.h"
  335. static rb_gc_inner_t *gc_inner;
  336. /* TODO: more suitable and safety expression */
  337. #define T_BITMAP (T_FIXNUM + 1)
  338. #define FL_ALIGNOFF FL_MARK
  339. static rb_objspace_t *
  340. rb_objspace_alloc_tmp(void)
  341. {
  342. rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
  343. memset(objspace, 0, sizeof(*objspace));
  344. malloc_limit = GC_MALLOC_LIMIT;
  345. return objspace;
  346. }
  347. static void
  348. rb_objspace_free_tmp(rb_objspace_t *objspace)
  349. {
  350. rb_objspace_call_finalizer(objspace);
  351. if (objspace->profile.record) {
  352. free(objspace->profile.record);
  353. objspace->profile.record = 0;
  354. }
  355. if (global_List) {
  356. struct gc_list *list, *next;
  357. for (list = global_List; list; list = next) {
  358. next = list->next;
  359. free(list);
  360. }
  361. }
  362. if (heaps) {
  363. size_t i;
  364. for (i = 0; i < heaps_used; ++i) {
  365. free(heaps[i].membase);
  366. }
  367. free(heaps);
  368. heaps_used = 0;
  369. heaps = 0;
  370. }
  371. free(objspace);
  372. }
  373. /* tiny heap size */
  374. /* 32KB */
  375. /*#define HEAP_SIZE 0x8000 */
  376. /* 128KB */
  377. /*#define HEAP_SIZE 0x20000 */
  378. /* 64KB */
  379. /*#define HEAP_SIZE 0x10000 */
  380. /* 16KB */
  381. #define BITMAP_ALIGN 0x4000
  382. /* 8KB */
  383. /*#define HEAP_SIZE 0x2000 */
  384. /* 4KB */
  385. /*#define HEAP_SIZE 0x1000 */
  386. /* 2KB */
  387. /*#define HEAP_SIZE 0x800 */
  388. #define HEAP_SIZE ((BITMAP_ALIGN / sizeof(struct RVALUE) + 2) * sizeof(RVALUE))
  389. #define BITMAP_MASK (0xFFFFFFFF - BITMAP_ALIGN + 1)
  390. #define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE) - 1)
  391. extern VALUE rb_cMutex;
  392. extern st_table *rb_class_tbl;
  393. int ruby_disable_gc_stress = 0;
  394. static void run_final(rb_objspace_t *objspace, VALUE obj);
  395. static int garbage_collect(rb_objspace_t *objspace);
  396. /*
  397. * call-seq:
  398. * GC.stress => true or false
  399. *
  400. * returns current status of GC stress mode.
  401. */
  402. static VALUE
  403. gc_stress_get(VALUE self)
  404. {
  405. rb_objspace_t *objspace = gc_inner->get_objspace();
  406. return ruby_gc_stress ? Qtrue : Qfalse;
  407. }
  408. /*
  409. * call-seq:
  410. * GC.stress = bool => bool
  411. *
  412. * updates GC stress mode.
  413. *
  414. * When GC.stress = true, GC is invoked for all GC opportunity:
  415. * all memory and object allocation.
  416. *
  417. * Since it makes Ruby very slow, it is only for debugging.
  418. */
  419. static VALUE
  420. gc_stress_set(VALUE self, VALUE flag)
  421. {
  422. rb_objspace_t *objspace = gc_inner->get_objspace();
  423. rb_secure(2);
  424. ruby_gc_stress = RTEST(flag);
  425. return flag;
  426. }
  427. /*
  428. * call-seq:
  429. * GC::Profiler.enable? => true or false
  430. *
  431. * returns current status of GC profile mode.
  432. */
  433. static VALUE
  434. gc_profile_enable_get(VALUE self)
  435. {
  436. rb_objspace_t *objspace = gc_inner->get_objspace();
  437. return objspace->profile.run;
  438. }
  439. /*
  440. * call-seq:
  441. * GC::Profiler.enable => nil
  442. *
  443. * updates GC profile mode.
  444. * start profiler for GC.
  445. *
  446. */
  447. static VALUE
  448. gc_profile_enable(void)
  449. {
  450. rb_objspace_t *objspace = gc_inner->get_objspace();
  451. objspace->profile.run = TRUE;
  452. return Qnil;
  453. }
  454. /*
  455. * call-seq:
  456. * GC::Profiler.disable => nil
  457. *
  458. * updates GC profile mode.
  459. * stop profiler for GC.
  460. *
  461. */
  462. static VALUE
  463. gc_profile_disable(void)
  464. {
  465. rb_objspace_t *objspace = gc_inner->get_objspace();
  466. objspace->profile.run = FALSE;
  467. return Qnil;
  468. }
  469. /*
  470. * call-seq:
  471. * GC::Profiler.clear => nil
  472. *
  473. * clear before profile data.
  474. *
  475. */
  476. static VALUE
  477. gc_profile_clear(void)
  478. {
  479. rb_objspace_t *objspace = gc_inner->get_objspace();
  480. MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
  481. objspace->profile.count = 0;
  482. return Qnil;
  483. }
  484. static void vm_xfree(rb_objspace_t *objspace, void *ptr);
  485. static void *
  486. vm_xmalloc(rb_objspace_t *objspace, size_t size)
  487. {
  488. void *mem;
  489. if ((ssize_t)size < 0) {
  490. gc_inner->negative_size_allocation_error("negative allocation size (or too big)");
  491. }
  492. if (size == 0) size = 1;
  493. #if CALC_EXACT_MALLOC_SIZE
  494. size += sizeof(size_t);
  495. #endif
  496. if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
  497. (malloc_increase+size) > malloc_limit) {
  498. gc_inner->garbage_collect_with_gvl(objspace);
  499. }
  500. mem = malloc(size);
  501. if (!mem) {
  502. if (gc_inner->garbage_collect_with_gvl(objspace)) {
  503. mem = malloc(size);
  504. }
  505. if (!mem) {
  506. gc_inner->ruby_memerror();
  507. }
  508. }
  509. malloc_increase += size;
  510. #if CALC_EXACT_MALLOC_SIZE
  511. objspace->malloc_params.allocated_size += size;
  512. objspace->malloc_params.allocations++;
  513. ((size_t *)mem)[0] = size;
  514. mem = (size_t *)mem + 1;
  515. #endif
  516. return mem;
  517. }
  518. static void *
  519. vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
  520. {
  521. void *mem;
  522. if ((ssize_t)size < 0) {
  523. gc_inner->negative_size_allocation_error("negative re-allocation size");
  524. }
  525. if (!ptr) return vm_xmalloc(objspace, size);
  526. if (size == 0) {
  527. vm_xfree(objspace, ptr);
  528. return 0;
  529. }
  530. if (ruby_gc_stress && !ruby_disable_gc_stress)
  531. gc_inner->garbage_collect_with_gvl(objspace);
  532. #if CALC_EXACT_MALLOC_SIZE
  533. size += sizeof(size_t);
  534. objspace->malloc_params.allocated_size -= size;
  535. ptr = (size_t *)ptr - 1;
  536. #endif
  537. mem = realloc(ptr, size);
  538. if (!mem) {
  539. if (gc_inner->garbage_collect_with_gvl(objspace)) {
  540. mem = realloc(ptr, size);
  541. }
  542. if (!mem) {
  543. gc_inner->ruby_memerror();
  544. }
  545. }
  546. malloc_increase += size;
  547. #if CALC_EXACT_MALLOC_SIZE
  548. objspace->malloc_params.allocated_size += size;
  549. ((size_t *)mem)[0] = size;
  550. mem = (size_t *)mem + 1;
  551. #endif
  552. return mem;
  553. }
  554. static void
  555. vm_xfree(rb_objspace_t *objspace, void *ptr)
  556. {
  557. #if CALC_EXACT_MALLOC_SIZE
  558. size_t size;
  559. ptr = ((size_t *)ptr) - 1;
  560. size = ((size_t*)ptr)[0];
  561. objspace->malloc_params.allocated_size -= size;
  562. objspace->malloc_params.allocations--;
  563. #endif
  564. free(ptr);
  565. }
  566. static void *
  567. ruby_xmalloc_tmp(size_t size)
  568. {
  569. return vm_xmalloc(gc_inner->get_objspace(), size);
  570. }
  571. static void *
  572. ruby_xmalloc2_tmp(size_t n, size_t size)
  573. {
  574. size_t len = size * n;
  575. if (n != 0 && size != len / n) {
  576. rb_raise(rb_eArgError, "malloc: possible integer overflow");
  577. }
  578. return vm_xmalloc(gc_inner->get_objspace(), len);
  579. }
  580. static void *
  581. ruby_xcalloc_tmp(size_t n, size_t size)
  582. {
  583. void *mem = ruby_xmalloc2(n, size);
  584. memset(mem, 0, n * size);
  585. return mem;
  586. }
  587. static void *
  588. ruby_xrealloc_tmp(void *ptr, size_t size)
  589. {
  590. return vm_xrealloc(gc_inner->get_objspace(), ptr, size);
  591. }
  592. static void *
  593. ruby_xrealloc2_tmp(void *ptr, size_t n, size_t size)
  594. {
  595. size_t len = size * n;
  596. if (n != 0 && size != len / n) {
  597. rb_raise(rb_eArgError, "realloc: possible integer overflow");
  598. }
  599. return ruby_xrealloc(ptr, len);
  600. }
  601. static void
  602. ruby_xfree_tmp(void *x)
  603. {
  604. if (x)
  605. vm_xfree(gc_inner->get_objspace(), x);
  606. }
  607. /*
  608. * call-seq:
  609. * GC.enable => true or false
  610. *
  611. * Enables garbage collection, returning <code>true</code> if garbage
  612. * collection was previously disabled.
  613. *
  614. * GC.disable #=> false
  615. * GC.enable #=> true
  616. * GC.enable #=> false
  617. *
  618. */
  619. static VALUE
  620. rb_gc_enable_tmp(void)
  621. {
  622. rb_objspace_t *objspace = gc_inner->get_objspace();
  623. int old = dont_gc;
  624. dont_gc = FALSE;
  625. return old ? Qtrue : Qfalse;
  626. }
  627. /*
  628. * call-seq:
  629. * GC.disable => true or false
  630. *
  631. * Disables garbage collection, returning <code>true</code> if garbage
  632. * collection was already disabled.
  633. *
  634. * GC.disable #=> false
  635. * GC.disable #=> true
  636. *
  637. */
  638. static VALUE
  639. rb_gc_disable_tmp(void)
  640. {
  641. rb_objspace_t *objspace = gc_inner->get_objspace();
  642. int old = dont_gc;
  643. dont_gc = TRUE;
  644. return old ? Qtrue : Qfalse;
  645. }
  646. extern VALUE rb_mGC;
  647. static void
  648. rb_gc_register_address_tmp(VALUE *addr)
  649. {
  650. rb_objspace_t *objspace = gc_inner->get_objspace();
  651. struct gc_list *tmp;
  652. tmp = ALLOC(struct gc_list);
  653. tmp->next = global_List;
  654. tmp->varptr = addr;
  655. global_List = tmp;
  656. }
  657. static void
  658. rb_gc_unregister_address_tmp(VALUE *addr)
  659. {
  660. rb_objspace_t *objspace = gc_inner->get_objspace();
  661. struct gc_list *tmp = global_List;
  662. if (tmp->varptr == addr) {
  663. global_List = tmp->next;
  664. xfree(tmp);
  665. return;
  666. }
  667. while (tmp->next) {
  668. if (tmp->next->varptr == addr) {
  669. struct gc_list *t = tmp->next;
  670. tmp->next = tmp->next->next;
  671. xfree(t);
  672. break;
  673. }
  674. tmp = tmp->next;
  675. }
  676. }
  677. static void
  678. allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
  679. {
  680. struct heaps_slot *p;
  681. size_t size;
  682. size = next_heaps_length*sizeof(struct heaps_slot);
  683. if (heaps_used > 0) {
  684. p = (struct heaps_slot *)realloc(heaps, size);
  685. if (p) heaps = p;
  686. }
  687. else {
  688. p = heaps = (struct heaps_slot *)malloc(size);
  689. }
  690. if (p == 0) {
  691. during_gc = 0;
  692. rb_memerror();
  693. }
  694. heaps_length = next_heaps_length;
  695. }
  696. #define FIND_BITMAP(res, p) do {\
  697. if (((RVALUE *)p)->as.free.flags & FL_ALIGNOFF) {\
  698. res = (RVALUE *)((((VALUE)p & BITMAP_MASK) + BITMAP_ALIGN) / sizeof(RVALUE) * sizeof(RVALUE)); \
  699. }\
  700. else {\
  701. res = (RVALUE *)(((VALUE)p & BITMAP_MASK) / sizeof(RVALUE) * sizeof(RVALUE));\
  702. }\
  703. } while(0)
  704. #define NUM_IN_SLOT(p, slot) (((VALUE)p - (VALUE)slot)/sizeof(RVALUE))
  705. #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) / (sizeof(int) * 8))
  706. /* #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) >> 5) */
  707. #define BITMAP_OFFSET(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) & ((sizeof(int) * 8)-1))
  708. #define MARKED_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] & 1 << BITMAP_OFFSET(bmap, p))
  709. #define MARK_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] |= 1 << BITMAP_OFFSET(bmap, p))
  710. #define CLEAR_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] &= ~(1 << BITMAP_OFFSET(bmap, p)))
  711. #define MARKED_IN_BITMAP_DIRECT(map, index, offset) (map[index] & 1 << offset)
  712. #define MARK_IN_BITMAP_DIRECT(map, index, offset) (map[index] |= 1 << offset)
  713. /* for debug */
  714. void
  715. bitmap_p(RVALUE *p)
  716. {
  717. RVALUE *bmap;
  718. int index, offset, marked;
  719. FIND_BITMAP(bmap, p);
  720. index = BITMAP_INDEX(bmap, p);
  721. offset = BITMAP_OFFSET(bmap, p);
  722. marked = MARKED_IN_BITMAP(bmap, p);
  723. printf("bitmap : ((RVALUE *)%p)\n", bmap);
  724. printf("map_index : %d | offset : %d\n", index, offset);
  725. printf("is mark ? %s\n", marked? "true" : "false");
  726. }
  727. VALUE
  728. find_bitmap(RVALUE *p) {
  729. RVALUE *res;
  730. FIND_BITMAP(res, p);
  731. return (VALUE)res;
  732. }
  733. void
  734. dump_bitmap(RVALUE *bmap) {
  735. int i;
  736. for (i = 0; i < 26; i++) {
  737. printf("dump %p map %d : %d %s\n", bmap, i, bmap->as.bitmap.map[i], bmap->as.bitmap.map[i]? "remain" : "clean");
  738. }
  739. }
  740. void
  741. bitmap2obj(RVALUE *bmap, int index, int offset)
  742. {
  743. printf("(RVALUE *)%p\n", (RVALUE *)(bmap->as.bitmap.slot + (index * sizeof(int) * 8 + offset) * sizeof(RVALUE)));
  744. }
  745. static void
  746. make_bitmap(struct heaps_slot *slot)
  747. {
  748. RVALUE *p, *pend, *bitmap, *last, *border;
  749. int *map = 0;
  750. int size;
  751. p = slot->slot;
  752. pend = p + slot->limit;
  753. last = pend - 1;
  754. RBASIC(last)->flags = 0;
  755. FIND_BITMAP(bitmap, last);
  756. if (bitmap < p || pend <= bitmap) {
  757. rb_bug("not include in heap slot: result bitmap(%p), find (%p), p (%p), pend(%p)", bitmap, last, p, pend);
  758. }
  759. border = bitmap;
  760. if (!((VALUE)border % BITMAP_ALIGN)) {
  761. border--;
  762. }
  763. while (p < pend) {
  764. if (p <= border) {
  765. RBASIC(p)->flags = FL_ALIGNOFF;
  766. }
  767. else {
  768. RBASIC(p)->flags = 0;
  769. }
  770. p++;
  771. }
  772. size = sizeof(int) * (HEAP_OBJ_LIMIT / (sizeof(int) * 8)+1);
  773. map = (int *)malloc(size);
  774. if (map == 0) {
  775. rb_memerror();
  776. }
  777. MEMZERO(map, int, (size/sizeof(int)));
  778. bitmap->as.bitmap.flags |= T_BITMAP;
  779. bitmap->as.bitmap.map = map;
  780. bitmap->as.bitmap.slot = (VALUE)slot->slot;
  781. bitmap->as.bitmap.limit = slot->limit;
  782. slot->bitmap = bitmap;
  783. }
  784. void
  785. test_bitmap(RVALUE *p, RVALUE *pend)
  786. {
  787. RVALUE *first, *bmap = 0, *bmap_tmp;
  788. int i;
  789. first = p;
  790. FIND_BITMAP(bmap_tmp, p);
  791. while (p < pend) {
  792. if (MARKED_IN_BITMAP(bmap, p)) printf("already marking! %p\n", p);
  793. if (bmap_tmp != p) {
  794. FIND_BITMAP(bmap, p);
  795. if (bmap_tmp != bmap) printf("diffrence bmap %p : %p\n", bmap_tmp, bmap);
  796. MARK_IN_BITMAP(bmap, p);
  797. }
  798. else {
  799. MARK_IN_BITMAP(bmap, p);
  800. }
  801. if (!MARKED_IN_BITMAP(bmap, p)) printf("not marking! %p\n", p);
  802. p++;
  803. }
  804. for (i =0; i < 26; i++) {
  805. printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
  806. }
  807. p = first;
  808. while (p < pend) {
  809. if (bmap_tmp != p) {
  810. FIND_BITMAP(bmap, p);
  811. CLEAR_IN_BITMAP(bmap, p);
  812. }
  813. else {
  814. CLEAR_IN_BITMAP(bmap, p);
  815. }
  816. if (MARKED_IN_BITMAP(bmap, p)) printf("not clear! %p\n", p);
  817. p++;
  818. }
  819. for (i =0; i < 26; i++) {
  820. printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
  821. }
  822. }
  823. static void
  824. assign_heap_slot(rb_objspace_t *objspace)
  825. {
  826. RVALUE *p, *pend, *membase;
  827. size_t hi, lo, mid;
  828. size_t objs;
  829. objs = HEAP_OBJ_LIMIT;
  830. p = (RVALUE*)malloc(HEAP_SIZE);
  831. if (p == 0) {
  832. during_gc = 0;
  833. rb_memerror();
  834. }
  835. membase = p;
  836. if ((VALUE)p % sizeof(RVALUE) != 0) {
  837. p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
  838. }
  839. lo = 0;
  840. hi = heaps_used;
  841. while (lo < hi) {
  842. register RVALUE *mid_membase;
  843. mid = (lo + hi) / 2;
  844. mid_membase = heaps[mid].membase;
  845. if (mid_membase < membase) {
  846. lo = mid + 1;
  847. }
  848. else if (mid_membase > membase) {
  849. hi = mid;
  850. }
  851. else {
  852. rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
  853. }
  854. }
  855. if (hi < heaps_used) {
  856. MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
  857. }
  858. heaps[hi].membase = membase;
  859. heaps[hi].slot = p;
  860. heaps[hi].limit = objs;
  861. pend = p + objs;
  862. if (lomem == 0 || lomem > p) lomem = p;
  863. if (himem < pend) himem = pend;
  864. heaps_used++;
  865. make_bitmap(&heaps[hi]);
  866. while (p < pend) {
  867. if (BUILTIN_TYPE(p) != T_BITMAP) {
  868. p->as.free.next = freelist;
  869. freelist = p;
  870. }
  871. p++;
  872. }
  873. }
  874. static void
  875. init_heap(rb_objspace_t *objspace)
  876. {
  877. size_t add, i;
  878. add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
  879. if (!add) {
  880. add = 1;
  881. }
  882. if ((heaps_used + add) > heaps_length) {
  883. allocate_heaps(objspace, heaps_used + add);
  884. }
  885. for (i = 0; i < add; i++) {
  886. assign_heap_slot(objspace);
  887. }
  888. heaps_inc = 0;
  889. objspace->profile.invoke_time = getrusage_time();
  890. }
  891. static void
  892. set_heaps_increment(rb_objspace_t *objspace)
  893. {
  894. size_t next_heaps_length = (size_t)(heaps_used * 1.8);
  895. if (next_heaps_length == heaps_used) {
  896. next_heaps_length++;
  897. }
  898. heaps_inc = next_heaps_length - heaps_used;
  899. if (next_heaps_length > heaps_length) {
  900. allocate_heaps(objspace, next_heaps_length);
  901. }
  902. }
  903. static int
  904. heaps_increment(rb_objspace_t *objspace)
  905. {
  906. if (heaps_inc > 0) {
  907. assign_heap_slot(objspace);
  908. heaps_inc--;
  909. return TRUE;
  910. }
  911. return FALSE;
  912. }
  913. #define RANY(o) ((RVALUE*)(o))
  914. static VALUE
  915. rb_newobj_from_heap(rb_objspace_t *objspace)
  916. {
  917. VALUE obj;
  918. int bmap_left = 0;
  919. if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
  920. if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
  921. during_gc = 0;
  922. rb_memerror();
  923. }
  924. }
  925. obj = (VALUE)freelist;
  926. freelist = freelist->as.free.next;
  927. if (RANY(obj)->as.free.flags & FL_ALIGNOFF) {
  928. bmap_left = Qtrue;
  929. }
  930. MEMZERO((void*)obj, RVALUE, 1);
  931. if (bmap_left) {
  932. RANY(obj)->as.free.flags = FL_ALIGNOFF;
  933. }
  934. #ifdef GC_DEBUG
  935. RANY(obj)->file = rb_sourcefile();
  936. RANY(obj)->line = rb_sourceline();
  937. #endif
  938. return obj;
  939. }
  940. /* TODO: remove this function. */
  941. #if USE_VALUE_CACHE
  942. static VALUE
  943. rb_fill_value_cache(rb_thread_t *th)
  944. {
  945. rb_objspace_t *objspace = gc_inner->get_objspace();
  946. int i;
  947. VALUE rv;
  948. RVALUE *bmap;
  949. /* LOCK */
  950. for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
  951. VALUE v = rb_newobj_from_heap(objspace);
  952. th->value_cache[i] = v;
  953. FIND_BITMAP(bmap, v);
  954. MARK_IN_BITMAP(bmap, v);
  955. }
  956. th->value_cache_ptr = &th->value_cache[0];
  957. rv = rb_newobj_from_heap(objspace);
  958. /* UNLOCK */
  959. return rv;
  960. }
  961. #endif
  962. static int
  963. rb_during_gc_tmp(void)
  964. {
  965. rb_objspace_t *objspace = gc_inner->get_objspace();
  966. return during_gc;
  967. }
  968. static VALUE
  969. rb_newobj_tmp(void)
  970. {
  971. #if USE_VALUE_CACHE
  972. rb_thread_t *th = GET_THREAD();
  973. VALUE v = *th->value_cache_ptr;
  974. #endif
  975. rb_objspace_t *objspace = gc_inner->get_objspace();
  976. if (during_gc) {
  977. dont_gc = 1;
  978. during_gc = 0;
  979. rb_bug("object allocation during garbage collection phase");
  980. }
  981. #if USE_VALUE_CACHE
  982. if (v) {
  983. rb_set_flag_force(v, 0);
  984. th->value_cache_ptr++;
  985. }
  986. else {
  987. v = rb_fill_value_cache(th);
  988. }
  989. #if defined(GC_DEBUG)
  990. printf("cache index: %d, v: %p, th: %p\n",
  991. th->value_cache_ptr - th->value_cache, v, th);
  992. #endif
  993. return v;
  994. #else
  995. return rb_newobj_from_heap(objspace);
  996. #endif
  997. }
  998. static void
  999. rb_set_flag_force_tmp(VALUE obj, VALUE t)
  1000. {
  1001. t = t & ~FL_ALIGNOFF;
  1002. if (RBASIC(obj)->flags & FL_ALIGNOFF) {
  1003. RBASIC(obj)->flags = FL_ALIGNOFF | t;
  1004. }
  1005. else {
  1006. RBASIC(obj)->flags = t;
  1007. }
  1008. }
  1009. static VALUE
  1010. rb_data_object_alloc_tmp(VALUE klass, void *datap, RUBY_DATA_FUNC dmark,
  1011. RUBY_DATA_FUNC dfree)
  1012. {
  1013. NEWOBJ(data, struct RData);
  1014. if (klass) Check_Type(klass, T_CLASS);
  1015. OBJSETUP(data, klass, T_DATA);
  1016. data->data = datap;
  1017. data->dfree = dfree;
  1018. data->dmark = dmark;
  1019. return (VALUE)data;
  1020. }
  1021. static VALUE
  1022. rb_data_typed_object_alloc_tmp(VALUE klass, void *datap,
  1023. const rb_data_type_t *type)
  1024. {
  1025. NEWOBJ(data, struct RTypedData);
  1026. if (klass) Check_Type(klass, T_CLASS);
  1027. OBJSETUP(data, klass, T_DATA);
  1028. data->data = datap;
  1029. data->typed_flag = 1;
  1030. data->type = type;
  1031. return (VALUE)data;
  1032. }
  1033. static size_t
  1034. rb_objspace_data_type_memsize_tmp(VALUE obj)
  1035. {
  1036. if (RTYPEDDATA_P(obj)) {
  1037. return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj));
  1038. }
  1039. else {
  1040. return 0;
  1041. }
  1042. }
  1043. static const char *
  1044. rb_objspace_data_type_name_tmp(VALUE obj)
  1045. {
  1046. if (RTYPEDDATA_P(obj)) {
  1047. return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
  1048. }
  1049. else {
  1050. return 0;
  1051. }
  1052. }
  1053. #ifdef __ia64
  1054. #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
  1055. #else
  1056. #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
  1057. #endif
  1058. #define STACK_START (th->machine_stack_start)
  1059. #define STACK_END (th->machine_stack_end)
  1060. #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
  1061. #if STACK_GROW_DIRECTION < 0
  1062. # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
  1063. #elif STACK_GROW_DIRECTION > 0
  1064. # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
  1065. #else
  1066. # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
  1067. : (size_t)(STACK_END - STACK_START + 1))
  1068. #endif
  1069. #if !STACK_GROW_DIRECTION
  1070. int ruby_stack_grow_direction;
  1071. static int
  1072. ruby_get_stack_grow_direction_tmp(volatile VALUE *addr)
  1073. {
  1074. VALUE *end;
  1075. SET_MACHINE_STACK_END(&end);
  1076. if (end > addr) return ruby_stack_grow_direction = 1;
  1077. return ruby_stack_grow_direction = -1;
  1078. }
  1079. #endif
  1080. #define GC_WATER_MARK 512
  1081. static int
  1082. ruby_stack_check_tmp(void)
  1083. {
  1084. #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
  1085. return 0;
  1086. #else
  1087. return gc_inner->stack_check();
  1088. #endif
  1089. }
  1090. static void
  1091. init_mark_stack(rb_objspace_t *objspace)
  1092. {
  1093. mark_stack_overflow = 0;
  1094. mark_stack_ptr = mark_stack;
  1095. }
  1096. #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
  1097. static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
  1098. #define IS_FREE_CELL(obj) ((obj->as.basic.flags & ~(FL_ALIGNOFF)) == 0)
  1099. static void
  1100. gc_mark_all(rb_objspace_t *objspace)
  1101. {
  1102. RVALUE *p, *pend, *bmap;
  1103. size_t i;
  1104. init_mark_stack(objspace);
  1105. for (i = 0; i < heaps_used; i++) {
  1106. p = heaps[i].slot; pend = p + heaps[i].limit;
  1107. bmap = heaps[i].bitmap;
  1108. while (p < pend) {
  1109. if (MARKED_IN_BITMAP(bmap, p) &&
  1110. !(IS_FREE_CELL(p))) {
  1111. gc_inner->gc_mark_children(objspace, (VALUE)p, 0);
  1112. }
  1113. p++;
  1114. }
  1115. }
  1116. }
  1117. static void
  1118. gc_mark_rest(rb_objspace_t *objspace)
  1119. {
  1120. VALUE tmp_arry[MARK_STACK_MAX];
  1121. VALUE *p;
  1122. p = (mark_stack_ptr - mark_stack) + tmp_arry;
  1123. MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
  1124. init_mark_stack(objspace);
  1125. while (p != tmp_arry) {
  1126. p--;
  1127. gc_inner->gc_mark_children(objspace, *p, 0);
  1128. }
  1129. }
  1130. static inline int
  1131. is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
  1132. {
  1133. register RVALUE *p = RANY(ptr);
  1134. register struct heaps_slot *heap;
  1135. register size_t hi, lo, mid;
  1136. if (p < lomem || p > himem) return FALSE;
  1137. if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
  1138. /* check if p looks like a pointer using bsearch*/
  1139. lo = 0;
  1140. hi = heaps_used;
  1141. while (lo < hi) {
  1142. mid = (lo + hi) / 2;
  1143. heap = &heaps[mid];
  1144. if (heap->slot <= p) {
  1145. if (p < heap->slot + heap->limit)
  1146. return TRUE;
  1147. lo = mid + 1;
  1148. }
  1149. else {
  1150. hi = mid;
  1151. }
  1152. }
  1153. return FALSE;
  1154. }
  1155. static void
  1156. mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
  1157. {
  1158. VALUE v;
  1159. while (n--) {
  1160. v = *x;
  1161. VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
  1162. if (is_pointer_to_heap(objspace, (void *)v)) {
  1163. gc_mark(objspace, v, 0);
  1164. }
  1165. x++;
  1166. }
  1167. }
  1168. static void
  1169. gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
  1170. {
  1171. long n;
  1172. if (end <= start) return;
  1173. n = end - start;
  1174. mark_locations_array(objspace, start, n);
  1175. }
  1176. static void
  1177. rb_gc_mark_locations_tmp(VALUE *start, VALUE *end)
  1178. {
  1179. gc_mark_locations(gc_inner->get_objspace(), start, end);
  1180. }
  1181. #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
  1182. struct mark_tbl_arg {
  1183. rb_objspace_t *objspace;
  1184. int lev;
  1185. };
  1186. static int
  1187. mark_entry(ID key, VALUE value, st_data_t data)
  1188. {
  1189. struct mark_tbl_arg *arg = (void*)data;
  1190. gc_mark(arg->objspace, value, arg->lev);
  1191. return ST_CONTINUE;
  1192. }
  1193. static void
  1194. mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
  1195. {
  1196. struct mark_tbl_arg arg;
  1197. if (!tbl) return;
  1198. arg.objspace = objspace;
  1199. arg.lev = lev;
  1200. st_foreach(tbl, mark_entry, (st_data_t)&arg);
  1201. }
  1202. static int
  1203. mark_key(VALUE key, VALUE value, st_data_t data)
  1204. {
  1205. struct mark_tbl_arg *arg = (void*)data;
  1206. gc_mark(arg->objspace, key, arg->lev);
  1207. return ST_CONTINUE;
  1208. }
  1209. static void
  1210. mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
  1211. {
  1212. struct mark_tbl_arg arg;
  1213. if (!tbl) return;
  1214. arg.objspace = objspace;
  1215. arg.lev = lev;
  1216. st_foreach(tbl, mark_key, (st_data_t)&arg);
  1217. }
  1218. static void
  1219. rb_mark_set_tmp(st_table *tbl)
  1220. {
  1221. mark_set(gc_inner->get_objspace(), tbl, 0);
  1222. }
  1223. static int
  1224. mark_keyvalue(VALUE key, VALUE value, st_data_t data)
  1225. {
  1226. struct mark_tbl_arg *arg = (void*)data;
  1227. gc_mark(arg->objspace, key, arg->lev);
  1228. gc_mark(arg->objspace, value, arg->lev);
  1229. return ST_CONTINUE;
  1230. }
  1231. static void
  1232. mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
  1233. {
  1234. struct mark_tbl_arg arg;
  1235. if (!tbl) return;
  1236. arg.objspace = objspace;
  1237. arg.lev = lev;
  1238. st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
  1239. }
  1240. static int
  1241. mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
  1242. {
  1243. struct mark_tbl_arg *arg = (void*)data;
  1244. gc_inner->mark_method_entry(arg->objspace, me, arg->lev);
  1245. return ST_CONTINUE;
  1246. }
  1247. static void
  1248. mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
  1249. {
  1250. struct mark_tbl_arg arg;
  1251. if (!tbl) return;
  1252. arg.objspace = objspace;
  1253. arg.lev = lev;
  1254. st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
  1255. }
  1256. static int
  1257. free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
  1258. {
  1259. rb_free_method_entry(me);
  1260. return ST_CONTINUE;
  1261. }
  1262. static void
  1263. rb_free_m_table_tmp(st_table *tbl)
  1264. {
  1265. st_foreach(tbl, free_method_entry_i, 0);
  1266. st_free_table(tbl);
  1267. }
  1268. static void
  1269. rb_mark_tbl_tmp(st_table *tbl)
  1270. {
  1271. mark_tbl(gc_inner->get_objspace(), tbl, 0);
  1272. }
  1273. static void
  1274. rb_gc_mark_maybe_tmp(VALUE obj)
  1275. {
  1276. if (is_pointer_to_heap(gc_inner->get_objspace(), (void *)obj)) {
  1277. gc_mark(gc_inner->get_objspace(), obj, 0);
  1278. }
  1279. }
  1280. #define GC_LEVEL_MAX 250
  1281. static void
  1282. gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
  1283. {
  1284. register RVALUE *obj, *bmap;
  1285. obj = RANY(ptr);
  1286. if (rb_special_const_p(ptr)) return; /* special const not marked */
  1287. if (IS_FREE_CELL(obj)) return; /* free cell */
  1288. if (BUILTIN_TYPE(obj) == T_BITMAP) return;
  1289. FIND_BITMAP(bmap, obj);
  1290. if (MARKED_IN_BITMAP(bmap, obj)) return; /* already marked */
  1291. MARK_IN_BITMAP(bmap, obj);
  1292. if (lev > GC_LEVEL_MAX || (lev == 0 && gc_inner->stack_check())) {
  1293. if (!mark_stack_overflow) {
  1294. if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
  1295. *mark_stack_ptr = ptr;
  1296. mark_stack_ptr++;
  1297. }
  1298. else {
  1299. mark_stack_overflow = 1;
  1300. }
  1301. }
  1302. return;
  1303. }
  1304. gc_inner->gc_mark_children(objspace, ptr, lev+1);
  1305. }
  1306. static int
  1307. gc_set_mark_flag(register RVALUE *obj)
  1308. {
  1309. register RVALUE *bmap;
  1310. if (IS_FREE_CELL(obj)) return 1; /* free cell */
  1311. FIND_BITMAP(bmap, obj);
  1312. if (MARKED_IN_BITMAP(bmap, obj)) return 1; /* already marked */
  1313. MARK_IN_BITMAP(bmap, obj);
  1314. return 0;
  1315. }
  1316. static inline void
  1317. add_freelist(rb_objspace_t *objspace, RVALUE *p)
  1318. {
  1319. RVALUE *bmap;
  1320. VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
  1321. rb_set_flag_force((VALUE)p, 0);
  1322. FIND_BITMAP(bmap, p);
  1323. CLEAR_IN_BITMAP(bmap, p);
  1324. p->as.free.next = freelist;
  1325. freelist = p;
  1326. }
  1327. static void
  1328. finalize_list(rb_objspace_t *objspace, RVALUE *p)
  1329. {
  1330. while (p) {
  1331. RVALUE *tmp = p->as.free.next;
  1332. run_final(objspace, (VALUE)p);
  1333. if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
  1334. add_freelist(objspace, p);
  1335. }
  1336. else {
  1337. struct heaps_slot *slot = (struct heaps_slot *)RDATA(p)->dmark;
  1338. slot->limit--;
  1339. }
  1340. p = tmp;
  1341. }
  1342. }
  1343. static void
  1344. free_unused_heaps(rb_objspace_t *objspace)
  1345. {
  1346. size_t i, j;
  1347. RVALUE *last = 0, *bmap = 0;
  1348. for (i = j = 1; j < heaps_used; i++) {
  1349. if (heaps[i].limit == 0) {
  1350. if (!last) {
  1351. last = heaps[i].membase;
  1352. bmap = heaps[i].bitmap;
  1353. }
  1354. else {
  1355. free(heaps[i].membase);
  1356. free(heaps[i].bitmap->as.bitmap.map);
  1357. }
  1358. heaps_used--;
  1359. }
  1360. else {
  1361. if (i != j) {
  1362. heaps[j] = heaps[i];
  1363. }
  1364. j++;
  1365. }
  1366. }
  1367. if (last) {
  1368. if (last < heaps_freed) {
  1369. free(heaps_freed);
  1370. free(objspace->ext_heap.freed_bitmap->as.bitmap.map);
  1371. heaps_freed = last;
  1372. heaps_freed = bmap;
  1373. }
  1374. else {
  1375. free(last);
  1376. free(bmap->as.bitmap.map);
  1377. }
  1378. }
  1379. }
  1380. static void
  1381. gc_sweep(rb_objspace_t *objspace)
  1382. {
  1383. RVALUE *p, *pend, *final_list;
  1384. size_t freed = 0;
  1385. size_t i;
  1386. size_t live = 0, free_min = 0, do_heap_free = 0;
  1387. do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
  1388. free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
  1389. if (free_min < FREE_MIN) {
  1390. do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  1391. free_min = FREE_MIN;
  1392. }
  1393. freelist = 0;
  1394. final_list = deferred_final_list;
  1395. deferred_final_list = 0;
  1396. for (i = 0; i < heaps_used; i++) {
  1397. size_t free_num = 0, final_num = 0;
  1398. RVALUE *free = freelist;
  1399. RVALUE *final = final_list;
  1400. int *map = heaps[i].bitmap->as.bitmap.map;
  1401. int deferred, bmap_index = 0, bmap_offset = 0;
  1402. p = heaps[i].slot; pend = p + heaps[i].limit;
  1403. while (p < pend) {
  1404. if (BUILTIN_TYPE(p) == T_BITMAP) {
  1405. free_num++;
  1406. }
  1407. else if(!(MARKED_IN_BITMAP_DIRECT(map, bmap_index, bmap_offset))) {
  1408. if (!(IS_FREE_CELL(p)) &&
  1409. ((deferred = gc_inner->obj_free(objspace, (VALUE)p)) ||
  1410. ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
  1411. if (!deferred) {
  1412. rb_set_flag_force((VALUE)p, T_ZOMBIE);
  1413. RDATA(p)->dfree = 0;
  1414. }
  1415. p->as.free.next = final_list;
  1416. final_list = p;
  1417. final_num++;
  1418. }
  1419. else {
  1420. /* Do not touch the fields if they don't have to be modified.
  1421. * This is in order to preserve copy-on-write semantics.
  1422. */
  1423. if (!IS_FREE_CELL(p))
  1424. rb_set_flag_force((VALUE)p, 0);
  1425. if (p->as.free.next != freelist)
  1426. p->as.free.next = freelist;
  1427. freelist = p;
  1428. free_num++;
  1429. }
  1430. }
  1431. else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
  1432. /* objects to be finalized */
  1433. /* do nothing remain marked */
  1434. }
  1435. else {
  1436. live++;
  1437. }
  1438. p++;
  1439. bmap_offset++;
  1440. if (bmap_offset >= (int)(sizeof(int) * 8)) {
  1441. bmap_index++;
  1442. bmap_offset = 0;
  1443. }
  1444. }
  1445. MEMZERO(heaps[i].bitmap->as.bitmap.map, int, bmap_index+1);
  1446. if (final_num + free_num == heaps[i].limit && freed > do_heap_free) {
  1447. RVALUE *pp;
  1448. for (pp = final_list; pp != final; pp = pp->as.free.next) {
  1449. RDATA(pp)->dmark = (void *)&heaps[i];
  1450. pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  1451. }
  1452. heaps[i].limit = final_num;
  1453. freelist = free; /* cancel this page from freelist */
  1454. }
  1455. else {
  1456. freed += free_num;
  1457. }
  1458. }
  1459. GC_PROF_SET_MALLOC_INFO;
  1460. if (malloc_increase > malloc_limit) {
  1461. malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)live / (live + freed));
  1462. if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
  1463. }
  1464. malloc_increase = 0;
  1465. if (freed < free_min) {
  1466. set_heaps_increment(objspace);
  1467. heaps_increment(objspace);
  1468. }
  1469. during_gc = 0;
  1470. /* clear finalization list */
  1471. if (final_list) {
  1472. RVALUE *bmap, *pp;
  1473. for (pp = final_list; pp != 0; pp = pp->as.free.next) {
  1474. FIND_BITMAP(bmap, pp);
  1475. MARK_IN_BITMAP(bmap, pp);
  1476. }
  1477. GC_PROF_SET_HEAP_INFO;
  1478. deferred_final_list = final_list;
  1479. gc_inner->ruby_vm_set_finalizer_interrupt();
  1480. }
  1481. else {
  1482. free_unused_heaps(objspace);
  1483. GC_PROF_SET_HEAP_INFO;
  1484. }
  1485. }
  1486. static void
  1487. rb_gc_force_recycle_tmp(VALUE p)
  1488. {
  1489. rb_objspace_t *objspace = gc_inner->get_objspace();
  1490. add_freelist(objspace, (RVALUE *)p);
  1491. }
  1492. static inline void
  1493. make_deferred(RVALUE *p)
  1494. {
  1495. rb_set_flag_force((VALUE)p, (p->as.basic.flags & ~T_MASK) | T_ZOMBIE);
  1496. }
  1497. static inline void
  1498. make_io_deferred(RVALUE *p)
  1499. {
  1500. rb_io_t *fptr = p->as.file.fptr;
  1501. make_deferred(p);
  1502. p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
  1503. p->as.data.data = fptr;
  1504. }
  1505. #define GC_NOTIFY 0
  1506. void rb_vm_mark(void *ptr);
  1507. #if STACK_GROW_DIRECTION < 0
  1508. #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_END, end = STACK_START)
  1509. #elif STACK_GROW_DIRECTION > 0
  1510. #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_START, end = STACK_END+appendix)
  1511. #else
  1512. #define GET_STACK_BOUNDS(stack_start, stack_end, appendix) \
  1513. ((STACK_END < STACK_START) ? \
  1514. (start = STACK_END, end = STACK_START) : (start = STACK_START, end = STACK_END+appendix))
  1515. #endif
  1516. void rb_gc_mark_encodings(void);
  1517. static int
  1518. garbage_collect(rb_objspace_t *objspace)
  1519. {
  1520. struct gc_list *list;
  1521. INIT_GC_PROF_PARAMS;
  1522. if (GC_NOTIFY) printf("start garbage_collect()\n");
  1523. if (!heaps) {
  1524. return FALSE;
  1525. }
  1526. if (dont_gc || during_gc) {
  1527. if (!freelist) {
  1528. if (!heaps_increment(objspace)) {
  1529. set_heaps_increment(objspace);
  1530. heaps_increment(objspace);
  1531. }
  1532. }
  1533. return TRUE;
  1534. }
  1535. during_gc++;
  1536. objspace->count++;
  1537. GC_PROF_TIMER_START;
  1538. GC_PROF_MARK_TIMER_START;
  1539. gc_inner->gc_mark_core(objspace);
  1540. if (finalizer_table) {
  1541. mark_tbl(objspace, finalizer_table, 0);
  1542. }
  1543. rb_gc_mark_threads();
  1544. rb_gc_mark_symbols();
  1545. rb_gc_mark_encodings();
  1546. /* mark protected global variables */
  1547. for (list = global_List; list; list = list->next) {
  1548. rb_gc_mark_maybe(*list->varptr);
  1549. }
  1550. rb_mark_end_proc();
  1551. rb_gc_mark_global_tbl();
  1552. mark_tbl(objspace, rb_class_tbl, 0);
  1553. /* mark generic instance variables for special constants */
  1554. rb_mark_generic_ivar_tbl();
  1555. rb_gc_mark_parser();
  1556. /* gc_mark objects whose marking are not completed*/
  1557. while (!MARK_STACK_EMPTY) {
  1558. if (mark_stack_overflow) {
  1559. gc_mark_all(objspace);
  1560. }
  1561. else {
  1562. gc_mark_rest(objspace);
  1563. }
  1564. }
  1565. GC_PROF_MARK_TIMER_STOP;
  1566. GC_PROF_SWEEP_TIMER_START;
  1567. gc_sweep(objspace);
  1568. GC_PROF_SWEEP_TIMER_STOP;
  1569. GC_PROF_TIMER_STOP;
  1570. if (GC_NOTIFY) printf("end garbage_collect()\n");
  1571. return TRUE;
  1572. }
  1573. static int
  1574. rb_garbage_collect_tmp(void)
  1575. {
  1576. return garbage_collect(gc_inner->get_objspace());
  1577. }
  1578. /*
  1579. * Document-class: ObjectSpace
  1580. *
  1581. * The <code>ObjectSpace</code> module contains a number of routines
  1582. * that interact with the garbage collection facility and allow you to
  1583. * traverse all living objects with an iterator.
  1584. *
  1585. * <code>ObjectSpace</code> also provides support for object
  1586. * finalizers, procs that will be called when a specific object is
  1587. * about to be destroyed by garbage collection.
  1588. *
  1589. * include ObjectSpace
  1590. *
  1591. *
  1592. * a = "A"
  1593. * b = "B"
  1594. * c = "C"
  1595. *
  1596. *
  1597. * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
  1598. * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
  1599. * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
  1600. *
  1601. * <em>produces:</em>
  1602. *
  1603. * Finalizer three on 537763470
  1604. * Finalizer one on 537763480
  1605. * Finalizer two on 537763480
  1606. *
  1607. */
  1608. static void
  1609. Init_heap_tmp(void)
  1610. {
  1611. init_heap(gc_inner->get_objspace());
  1612. }
  1613. /*
  1614. * rb_objspace_each_objects() is special C API to walk through
  1615. * Ruby object space. This C API is too difficult to use it.
  1616. * To be frank, you should not use it. Or you need to read the
  1617. * source code of this function and understand what this function does.
  1618. *
  1619. * 'callback' will be called several times (the number of heap slot,
  1620. * at current implementation) with:
  1621. * vstart: a pointer to the first living object of the heap_slot.
  1622. * vend: a pointer to next to the valid heap_slot area.
  1623. * stride: a distance to next VALUE.
  1624. *
  1625. * If callback() returns non-zero, the iteration will be stopped.
  1626. *
  1627. * This is a sample callback code to iterate liveness objects:
  1628. *
  1629. * int
  1630. * sample_callback(void *vstart, void *vend, int stride, void *data) {
  1631. * VALUE v = (VALUE)vstart;
  1632. * for (; v != (VALUE)vend; v += stride) {
  1633. * if (RBASIC(v)->flags) { // liveness check
  1634. * // do something with live object 'v'
  1635. * }
  1636. * return 0; // continue to iteration
  1637. * }
  1638. *
  1639. * Note: 'vstart' is not a top of heap_slot. This point the first
  1640. * living object to grasp at least one object to avoid GC issue.
  1641. * This means that you can not walk through all Ruby object slot
  1642. * including freed object slot.
  1643. *
  1644. * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
  1645. * However, there are possibilities to pass variable values with
  1646. * 'stride' with some reasons. You must use stride instead of
  1647. * use some constant value in the iteration.
  1648. */
  1649. static void
  1650. rb_objspace_each_objects_tmp(int (*callback)(void *vstart, void *vend,
  1651. size_t stride, void *d),
  1652. void *data)
  1653. {
  1654. size_t i;
  1655. RVALUE *membase = 0;
  1656. RVALUE *pstart, *pend;
  1657. rb_objspace_t *objspace = gc_inner->get_objspace();
  1658. volatile VALUE v;
  1659. i = 0;
  1660. while (i < heaps_used) {
  1661. while (0 < i && (uintptr_t)membase < (uintptr_t)heaps[i-1].membase)
  1662. i--;
  1663. while (i < heaps_used && (uintptr_t)heaps[i].membase <= (uintptr_t)membase )
  1664. i++;
  1665. if (heaps_used <= i)
  1666. break;
  1667. membase = heaps[i].membase;
  1668. pstart = heaps[i].slot;
  1669. pend = pstart + heaps[i].limit;
  1670. for (; pstart != pend; pstart++) {
  1671. if (pstart->as.basic.flags & ~FL_ALIGNOFF) {
  1672. v = (VALUE)pstart; /* acquire to save this object */
  1673. break;
  1674. }
  1675. }
  1676. if (pstart != pend) {
  1677. if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
  1678. return;
  1679. }
  1680. }
  1681. }
  1682. return;
  1683. }
  1684. struct os_each_struct {
  1685. size_t num;
  1686. VALUE of;
  1687. };
  1688. static int
  1689. os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
  1690. {
  1691. struct os_each_struct *oes = (struct os_each_struct *)data;
  1692. RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
  1693. volatile VALUE v;
  1694. for (; p != pend; p++) {
  1695. if (!IS_FREE_CELL(p)) {
  1696. if (gc_inner->os_obj_of_check_type(p)) {
  1697. if (BUILTIN_TYPE(p) == T_BITMAP) continue;
  1698. if (!p->as.basic.klass) continue;
  1699. v = (VALUE)p;
  1700. if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
  1701. rb_yield(v);
  1702. oes->num++;
  1703. }
  1704. }
  1705. }
  1706. }
  1707. return 0;
  1708. }
  1709. static VALUE
  1710. os_obj_of(VALUE of)
  1711. {
  1712. struct os_each_struct oes;
  1713. oes.num = 0;
  1714. oes.of = of;
  1715. rb_objspace_each_objects(os_obj_of_i, &oes);
  1716. return SIZET2NUM(oes.num);
  1717. }
  1718. /*
  1719. * call-seq:
  1720. * ObjectSpace.each_object([module]) {|obj| ... } => fixnum
  1721. *
  1722. * Calls the block once for each living, nonimmediate object in this
  1723. * Ruby process. If <i>module</i> is specified, calls the block
  1724. * for only those classes or modules that match (or are a subclass of)
  1725. * <i>module</i>. Returns the number of objects found. Immediate
  1726. * objects (<code>Fixnum</code>s, <code>Symbol</code>s
  1727. * <code>true</code>, <code>false</code>, and <code>nil</code>) are
  1728. * never returned. In the example below, <code>each_object</code>
  1729. * returns both the numbers we defined and several constants defined in
  1730. * the <code>Math</code> module.
  1731. *
  1732. * a = 102.7
  1733. * b = 95 # Won't be returned
  1734. * c = 12345678987654321
  1735. * count = ObjectSpace.each_object(Numeric) {|x| p x }
  1736. * puts "Total count: #{count}"
  1737. *
  1738. * <em>produces:</em>
  1739. *
  1740. * 12345678987654321
  1741. * 102.7
  1742. * 2.71828182845905
  1743. * 3.14159265358979
  1744. * 2.22044604925031e-16
  1745. * 1.7976931348623157e+308
  1746. * 2.2250738585072e-308
  1747. * Total count: 7
  1748. *
  1749. */
  1750. static VALUE
  1751. os_each_obj(int argc, VALUE *argv, VALUE os)
  1752. {
  1753. VALUE of;
  1754. rb_secure(4);
  1755. if (argc == 0) {
  1756. of = 0;
  1757. }
  1758. else {
  1759. rb_scan_args(argc, argv, "01", &of);
  1760. }
  1761. RETURN_ENUMERATOR(os, 1, &of);
  1762. return os_obj_of(of);
  1763. }
  1764. /*
  1765. * call-seq:
  1766. * ObjectSpace.undefine_finalizer(obj)
  1767. *
  1768. * Removes all finalizers for <i>obj</i>.
  1769. *
  1770. */
  1771. static VALUE
  1772. undefine_final(VALUE os, VALUE obj)
  1773. {
  1774. rb_objspace_t *objspace = gc_inner->get_objspace();
  1775. if (OBJ_FROZEN(obj)) rb_error_frozen("object");
  1776. if (finalizer_table) {
  1777. st_delete(finalizer_table, (st_data_t*)&obj, 0);
  1778. }
  1779. FL_UNSET(obj, FL_FINALIZE);
  1780. return obj;
  1781. }
  1782. /*
  1783. * call-seq:
  1784. * ObjectSpace.define_finalizer(obj, aProc=proc())
  1785. *
  1786. * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
  1787. * was destroyed.
  1788. *
  1789. */
  1790. static VALUE
  1791. define_final(int argc, VALUE *argv, VALUE os)
  1792. {
  1793. rb_objspace_t *objspace = gc_inner->get_objspace();
  1794. VALUE obj, block, table;
  1795. rb_scan_args(argc, argv, "11", &obj, &block);
  1796. if (OBJ_FROZEN(obj)) rb_error_frozen("object");
  1797. if (argc == 1) {
  1798. block = rb_block_proc();
  1799. }
  1800. else if (!rb_respond_to(block, rb_intern("call"))) {
  1801. rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
  1802. rb_obj_classname(block));
  1803. }
  1804. if (!FL_ABLE(obj)) {
  1805. rb_raise(rb_eArgError, "cannot define finalizer for %s",
  1806. rb_obj_classname(obj));
  1807. }
  1808. RBASIC(obj)->flags |= FL_FINALIZE;
  1809. block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
  1810. OBJ_FREEZE(block);
  1811. if (!finalizer_table) {
  1812. finalizer_table = st_init_numtable();
  1813. }
  1814. if (st_lookup(finalizer_table, obj, &table)) {
  1815. rb_ary_push(table, block);
  1816. }
  1817. else {
  1818. table = rb_ary_new3(1, block);
  1819. RBASIC(table)->klass = 0;
  1820. st_add_direct(finalizer_table, obj, table);
  1821. }
  1822. return block;
  1823. }
  1824. static void
  1825. rb_gc_copy_finalizer_tmp(VALUE dest, VALUE obj)
  1826. {
  1827. rb_objspace_t *objspace = gc_inner->get_objspace();
  1828. VALUE table;
  1829. if (!finalizer_table) return;
  1830. if (!FL_TEST(obj, FL_FINALIZE)) return;
  1831. if (st_lookup(finalizer_table, obj, &table)) {
  1832. st_insert(finalizer_table, dest, table);
  1833. }
  1834. FL_SET(dest, FL_FINALIZE);
  1835. }
  1836. static VALUE
  1837. run_single_final(VALUE arg)
  1838. {
  1839. VALUE *args = (VALUE *)arg;
  1840. rb_eval_cmd(args[0], args[1], (int)args[2]);
  1841. return Qnil;
  1842. }
  1843. static void
  1844. run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE objid, VALUE table)
  1845. {
  1846. long i;
  1847. int status;
  1848. VALUE args[3];
  1849. args[1] = 0;
  1850. args[2] = (VALUE)rb_safe_level();
  1851. if (!args[1] && RARRAY_LEN(table) > 0) {
  1852. args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
  1853. }
  1854. for (i=0; i<RARRAY_LEN(table); i++) {
  1855. VALUE final = RARRAY_PTR(table)[i];
  1856. args[0] = RARRAY_PTR(final)[1];
  1857. args[2] = FIX2INT(RARRAY_PTR(final)[0]);
  1858. rb_protect(run_single_final, (VALUE)args, &status);
  1859. }
  1860. }
  1861. static void
  1862. run_final(rb_objspace_t *objspace, VALUE obj)
  1863. {
  1864. VALUE table, objid;
  1865. RUBY_DATA_FUNC free_func = 0;
  1866. objid = rb_obj_id(obj); /* make obj into id */
  1867. RBASIC(obj)->klass = 0;
  1868. if (RTYPEDDATA_P(obj)) {
  1869. free_func = RTYPEDDATA_TYPE(obj)->dfree;
  1870. }
  1871. else {
  1872. free_func = RDATA(obj)->dfree;
  1873. }
  1874. if (free_func) {
  1875. (*free_func)(DATA_PTR(obj));
  1876. }
  1877. if (finalizer_table &&
  1878. st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
  1879. run_finalizer(objspace, obj, objid, table);
  1880. }
  1881. }
  1882. static void
  1883. finalize_deferred(rb_objspace_t *ob

Large files files are truncated, but you can click here to view the full file