/ext/gc_bmp/gc_bmp.c
C | 2770 lines | 2077 code | 328 blank | 365 comment | 352 complexity | 23063b91df185ea6a7aa4b504a122e24 MD5 | raw file
Possible License(s): LGPL-2.1, AGPL-3.0, 0BSD, Unlicense, GPL-2.0, BSD-3-Clause
Large files files are truncated, but you can click here to view the full file
- /**********************************************************************
- gc_bmp.c -
- $Author$
- created at: Tue Oct 5 09:44:46 JST 1993
- Copyright (C) 1993-2007 Yukihiro Matsumoto
- Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
- Copyright (C) 2000 Information-technology Promotion Agency, Japan
- **********************************************************************/
- #include "ruby.h"
- #include "ruby/re.h"
- #include "ruby/io.h"
- #include <stdio.h>
- #include <setjmp.h>
- #include <sys/types.h>
- #ifndef FALSE
- # define FALSE 0
- #elif FALSE
- # error FALSE must be false
- #endif
- #ifndef TRUE
- # define TRUE 1
- #elif !TRUE
- # error TRUE must be true
- #endif
- #ifdef HAVE_SYS_TIME_H
- #include <sys/time.h>
- #endif
- #ifdef HAVE_SYS_RESOURCE_H
- #include <sys/resource.h>
- #endif
- #if defined _WIN32 || defined __CYGWIN__
- #include <windows.h>
- #endif
- #ifdef HAVE_VALGRIND_MEMCHECK_H
- # include <valgrind/memcheck.h>
- # ifndef VALGRIND_MAKE_MEM_DEFINED
- # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
- # endif
- # ifndef VALGRIND_MAKE_MEM_UNDEFINED
- # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
- # endif
- #else
- # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
- # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
- #endif
- int rb_io_fptr_finalize(struct rb_io_t*);
- #define rb_setjmp(env) RUBY_SETJMP(env)
- #define rb_jmp_buf rb_jmpbuf_t
- /* Make alloca work the best possible way. */
- #ifdef __GNUC__
- # ifndef atarist
- # ifndef alloca
- # define alloca __builtin_alloca
- # endif
- # endif /* atarist */
- #else
- # ifdef HAVE_ALLOCA_H
- # include <alloca.h>
- # else
- # ifdef _AIX
- #pragma alloca
- # else
- # ifndef alloca /* predefined by HP cc +Olibcalls */
- void *alloca ();
- # endif
- # endif /* AIX */
- # endif /* HAVE_ALLOCA_H */
- #endif /* __GNUC__ */
- #ifndef GC_MALLOC_LIMIT
- #define GC_MALLOC_LIMIT 8000000
- #endif
- #define MARK_STACK_MAX 1024
- /* for GC profile */
- #define GC_PROFILE_MORE_DETAIL 1
- typedef struct gc_profile_record {
- double gc_time;
- double gc_mark_time;
- double gc_sweep_time;
- double gc_invoke_time;
- size_t heap_use_slots;
- size_t heap_live_objects;
- size_t heap_free_objects;
- size_t heap_total_objects;
- size_t heap_use_size;
- size_t heap_total_size;
- int have_finalize;
- size_t allocate_increase;
- size_t allocate_limit;
- } gc_profile_record;
- static double
- getrusage_time(void)
- {
- #ifdef RUSAGE_SELF
- struct rusage usage;
- struct timeval time;
- getrusage(RUSAGE_SELF, &usage);
- time = usage.ru_utime;
- return time.tv_sec + time.tv_usec * 1e-6;
- #elif defined _WIN32
- FILETIME creation_time, exit_time, kernel_time, user_time;
- ULARGE_INTEGER ui;
- LONG_LONG q;
- double t;
- if (GetProcessTimes(GetCurrentProcess(),
- &creation_time, &exit_time, &kernel_time, &user_time) == 0)
- {
- return 0.0;
- }
- memcpy(&ui, &user_time, sizeof(FILETIME));
- q = ui.QuadPart / 10L;
- t = (DWORD)(q % 1000000L) * 1e-6;
- q /= 1000000L;
- #ifdef __GNUC__
- t += q;
- #else
- t += (double)(DWORD)(q >> 16) * (1 << 16);
- t += (DWORD)q & ~(~0 << 16);
- #endif
- return t;
- #else
- return 0.0;
- #endif
- }
- #define GC_PROF_TIMER_START do {\
- if (objspace->profile.run) {\
- if (!objspace->profile.record) {\
- objspace->profile.size = 1000;\
- objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
- }\
- if (count >= objspace->profile.size) {\
- objspace->profile.size += 1000;\
- objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
- }\
- if (!objspace->profile.record) {\
- rb_bug("gc_profile malloc or realloc miss");\
- }\
- MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
- gc_time = getrusage_time();\
- objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
- }\
- } while(0)
- #define GC_PROF_TIMER_STOP do {\
- if (objspace->profile.run) {\
- gc_time = getrusage_time() - gc_time;\
- if (gc_time < 0) gc_time = 0;\
- objspace->profile.record[count].gc_time = gc_time;\
- objspace->profile.count++;\
- }\
- } while(0)
- #if GC_PROFILE_MORE_DETAIL
- #define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\
- size_t count = objspace->profile.count
- #define GC_PROF_MARK_TIMER_START do {\
- if (objspace->profile.run) {\
- mark_time = getrusage_time();\
- }\
- } while(0)
- #define GC_PROF_MARK_TIMER_STOP do {\
- if (objspace->profile.run) {\
- mark_time = getrusage_time() - mark_time;\
- if (mark_time < 0) mark_time = 0;\
- objspace->profile.record[count].gc_mark_time = mark_time;\
- }\
- } while(0)
- #define GC_PROF_SWEEP_TIMER_START do {\
- if (objspace->profile.run) {\
- sweep_time = getrusage_time();\
- }\
- } while(0)
- #define GC_PROF_SWEEP_TIMER_STOP do {\
- if (objspace->profile.run) {\
- sweep_time = getrusage_time() - sweep_time;\
- if (sweep_time < 0) sweep_time = 0;\
- objspace->profile.record[count].gc_sweep_time = sweep_time;\
- }\
- } while(0)
- #define GC_PROF_SET_MALLOC_INFO do {\
- if (objspace->profile.run) {\
- gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
- record->allocate_increase = malloc_increase;\
- record->allocate_limit = malloc_limit; \
- }\
- } while(0)
- #define GC_PROF_SET_HEAP_INFO do {\
- if (objspace->profile.run) {\
- gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
- record->heap_use_slots = heaps_used;\
- record->heap_live_objects = live;\
- record->heap_free_objects = freed; \
- record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
- record->have_finalize = final_list ? Qtrue : Qfalse;\
- record->heap_use_size = live * sizeof(RVALUE); \
- record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\
- }\
- } while(0)
- #else
- #define INIT_GC_PROF_PARAMS double gc_time = 0;\
- size_t count = objspace->profile.count
- #define GC_PROF_MARK_TIMER_START
- #define GC_PROF_MARK_TIMER_STOP
- #define GC_PROF_SWEEP_TIMER_START
- #define GC_PROF_SWEEP_TIMER_STOP
- #define GC_PROF_SET_MALLOC_INFO
- #define GC_PROF_SET_HEAP_INFO do {\
- if (objspace->profile.run) {\
- gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
- record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
- record->heap_use_size = live * sizeof(RVALUE); \
- record->heap_total_size = heaps_used * HEAP_SIZE;\
- }\
- } while(0)
- #endif
- #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
- #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
- #endif
- typedef struct RVALUE {
- union {
- struct {
- VALUE flags; /* always 0 for freed obj */
- struct RVALUE *next;
- } free;
- struct {
- VALUE flags;
- struct RVALUE *next;
- int *map;
- VALUE slot;
- int limit;
- } bitmap;
- struct RBasic basic;
- struct RObject object;
- struct RClass klass;
- struct RFloat flonum;
- struct RString string;
- struct RArray array;
- struct RRegexp regexp;
- struct RHash hash;
- struct RData data;
- struct RTypedData typeddata;
- struct RStruct rstruct;
- struct RBignum bignum;
- struct RFile file;
- struct RMatch match;
- struct RRational rational;
- struct RComplex complex;
- } as;
- #ifdef GC_DEBUG
- const char *file;
- int line;
- #endif
- } RVALUE;
- #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
- #pragma pack(pop)
- #endif
- struct heaps_slot {
- void *membase;
- RVALUE *slot;
- size_t limit;
- RVALUE *bitmap;
- };
- #define HEAP_MIN_SLOTS 10000
- #define FREE_MIN 4096
- struct gc_list {
- VALUE *varptr;
- struct gc_list *next;
- };
- #define CALC_EXACT_MALLOC_SIZE 0
- typedef struct rb_objspace {
- struct {
- size_t limit;
- size_t increase;
- #if CALC_EXACT_MALLOC_SIZE
- size_t allocated_size;
- size_t allocations;
- #endif
- } malloc_params;
- struct {
- size_t increment;
- struct heaps_slot *ptr;
- size_t length;
- size_t used;
- RVALUE *freelist;
- RVALUE *range[2];
- RVALUE *freed;
- } heap;
- struct {
- int dont_gc;
- int during_gc;
- } flags;
- struct {
- st_table *table;
- RVALUE *deferred;
- } final;
- struct {
- VALUE buffer[MARK_STACK_MAX];
- VALUE *ptr;
- int overflow;
- } markstack;
- struct {
- int run;
- gc_profile_record *record;
- size_t count;
- size_t size;
- double invoke_time;
- } profile;
- struct gc_list *global_list;
- unsigned int count;
- int gc_stress;
- struct {
- RVALUE *freed_bitmap;
- } ext_heap;
- } rb_objspace_t;
- #define malloc_limit objspace->malloc_params.limit
- #define malloc_increase objspace->malloc_params.increase
- #define heap_slots objspace->heap.slots
- #define heaps objspace->heap.ptr
- #define heaps_length objspace->heap.length
- #define heaps_used objspace->heap.used
- #define freelist objspace->heap.freelist
- #define lomem objspace->heap.range[0]
- #define himem objspace->heap.range[1]
- #define heaps_inc objspace->heap.increment
- #define heaps_freed objspace->heap.freed
- #define dont_gc objspace->flags.dont_gc
- #define during_gc objspace->flags.during_gc
- #define finalizer_table objspace->final.table
- #define deferred_final_list objspace->final.deferred
- #define mark_stack objspace->markstack.buffer
- #define mark_stack_ptr objspace->markstack.ptr
- #define mark_stack_overflow objspace->markstack.overflow
- #define global_List objspace->global_list
- #define ruby_gc_stress objspace->gc_stress
- #define need_call_final (finalizer_table && finalizer_table->num_entries)
- static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
- #include "ruby/gc_ext.h"
- static rb_gc_inner_t *gc_inner;
- /* TODO: more suitable and safety expression */
- #define T_BITMAP (T_FIXNUM + 1)
- #define FL_ALIGNOFF FL_MARK
- static rb_objspace_t *
- rb_objspace_alloc_tmp(void)
- {
- rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
- memset(objspace, 0, sizeof(*objspace));
- malloc_limit = GC_MALLOC_LIMIT;
- return objspace;
- }
- static void
- rb_objspace_free_tmp(rb_objspace_t *objspace)
- {
- rb_objspace_call_finalizer(objspace);
- if (objspace->profile.record) {
- free(objspace->profile.record);
- objspace->profile.record = 0;
- }
- if (global_List) {
- struct gc_list *list, *next;
- for (list = global_List; list; list = next) {
- next = list->next;
- free(list);
- }
- }
- if (heaps) {
- size_t i;
- for (i = 0; i < heaps_used; ++i) {
- free(heaps[i].membase);
- }
- free(heaps);
- heaps_used = 0;
- heaps = 0;
- }
- free(objspace);
- }
- /* tiny heap size */
- /* 32KB */
- /*#define HEAP_SIZE 0x8000 */
- /* 128KB */
- /*#define HEAP_SIZE 0x20000 */
- /* 64KB */
- /*#define HEAP_SIZE 0x10000 */
- /* 16KB */
- #define BITMAP_ALIGN 0x4000
- /* 8KB */
- /*#define HEAP_SIZE 0x2000 */
- /* 4KB */
- /*#define HEAP_SIZE 0x1000 */
- /* 2KB */
- /*#define HEAP_SIZE 0x800 */
- #define HEAP_SIZE ((BITMAP_ALIGN / sizeof(struct RVALUE) + 2) * sizeof(RVALUE))
- #define BITMAP_MASK (0xFFFFFFFF - BITMAP_ALIGN + 1)
- #define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE) - 1)
- extern VALUE rb_cMutex;
- extern st_table *rb_class_tbl;
- int ruby_disable_gc_stress = 0;
- static void run_final(rb_objspace_t *objspace, VALUE obj);
- static int garbage_collect(rb_objspace_t *objspace);
- /*
- * call-seq:
- * GC.stress => true or false
- *
- * returns current status of GC stress mode.
- */
- static VALUE
- gc_stress_get(VALUE self)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- return ruby_gc_stress ? Qtrue : Qfalse;
- }
- /*
- * call-seq:
- * GC.stress = bool => bool
- *
- * updates GC stress mode.
- *
- * When GC.stress = true, GC is invoked for all GC opportunity:
- * all memory and object allocation.
- *
- * Since it makes Ruby very slow, it is only for debugging.
- */
- static VALUE
- gc_stress_set(VALUE self, VALUE flag)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- rb_secure(2);
- ruby_gc_stress = RTEST(flag);
- return flag;
- }
- /*
- * call-seq:
- * GC::Profiler.enable? => true or false
- *
- * returns current status of GC profile mode.
- */
- static VALUE
- gc_profile_enable_get(VALUE self)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- return objspace->profile.run;
- }
- /*
- * call-seq:
- * GC::Profiler.enable => nil
- *
- * updates GC profile mode.
- * start profiler for GC.
- *
- */
- static VALUE
- gc_profile_enable(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- objspace->profile.run = TRUE;
- return Qnil;
- }
- /*
- * call-seq:
- * GC::Profiler.disable => nil
- *
- * updates GC profile mode.
- * stop profiler for GC.
- *
- */
- static VALUE
- gc_profile_disable(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- objspace->profile.run = FALSE;
- return Qnil;
- }
- /*
- * call-seq:
- * GC::Profiler.clear => nil
- *
- * clear before profile data.
- *
- */
- static VALUE
- gc_profile_clear(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
- objspace->profile.count = 0;
- return Qnil;
- }
- static void vm_xfree(rb_objspace_t *objspace, void *ptr);
- static void *
- vm_xmalloc(rb_objspace_t *objspace, size_t size)
- {
- void *mem;
- if ((ssize_t)size < 0) {
- gc_inner->negative_size_allocation_error("negative allocation size (or too big)");
- }
- if (size == 0) size = 1;
- #if CALC_EXACT_MALLOC_SIZE
- size += sizeof(size_t);
- #endif
- if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
- (malloc_increase+size) > malloc_limit) {
- gc_inner->garbage_collect_with_gvl(objspace);
- }
- mem = malloc(size);
- if (!mem) {
- if (gc_inner->garbage_collect_with_gvl(objspace)) {
- mem = malloc(size);
- }
- if (!mem) {
- gc_inner->ruby_memerror();
- }
- }
- malloc_increase += size;
- #if CALC_EXACT_MALLOC_SIZE
- objspace->malloc_params.allocated_size += size;
- objspace->malloc_params.allocations++;
- ((size_t *)mem)[0] = size;
- mem = (size_t *)mem + 1;
- #endif
- return mem;
- }
- static void *
- vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
- {
- void *mem;
- if ((ssize_t)size < 0) {
- gc_inner->negative_size_allocation_error("negative re-allocation size");
- }
- if (!ptr) return vm_xmalloc(objspace, size);
- if (size == 0) {
- vm_xfree(objspace, ptr);
- return 0;
- }
- if (ruby_gc_stress && !ruby_disable_gc_stress)
- gc_inner->garbage_collect_with_gvl(objspace);
- #if CALC_EXACT_MALLOC_SIZE
- size += sizeof(size_t);
- objspace->malloc_params.allocated_size -= size;
- ptr = (size_t *)ptr - 1;
- #endif
- mem = realloc(ptr, size);
- if (!mem) {
- if (gc_inner->garbage_collect_with_gvl(objspace)) {
- mem = realloc(ptr, size);
- }
- if (!mem) {
- gc_inner->ruby_memerror();
- }
- }
- malloc_increase += size;
- #if CALC_EXACT_MALLOC_SIZE
- objspace->malloc_params.allocated_size += size;
- ((size_t *)mem)[0] = size;
- mem = (size_t *)mem + 1;
- #endif
- return mem;
- }
- static void
- vm_xfree(rb_objspace_t *objspace, void *ptr)
- {
- #if CALC_EXACT_MALLOC_SIZE
- size_t size;
- ptr = ((size_t *)ptr) - 1;
- size = ((size_t*)ptr)[0];
- objspace->malloc_params.allocated_size -= size;
- objspace->malloc_params.allocations--;
- #endif
- free(ptr);
- }
- static void *
- ruby_xmalloc_tmp(size_t size)
- {
- return vm_xmalloc(gc_inner->get_objspace(), size);
- }
- static void *
- ruby_xmalloc2_tmp(size_t n, size_t size)
- {
- size_t len = size * n;
- if (n != 0 && size != len / n) {
- rb_raise(rb_eArgError, "malloc: possible integer overflow");
- }
- return vm_xmalloc(gc_inner->get_objspace(), len);
- }
- static void *
- ruby_xcalloc_tmp(size_t n, size_t size)
- {
- void *mem = ruby_xmalloc2(n, size);
- memset(mem, 0, n * size);
- return mem;
- }
- static void *
- ruby_xrealloc_tmp(void *ptr, size_t size)
- {
- return vm_xrealloc(gc_inner->get_objspace(), ptr, size);
- }
- static void *
- ruby_xrealloc2_tmp(void *ptr, size_t n, size_t size)
- {
- size_t len = size * n;
- if (n != 0 && size != len / n) {
- rb_raise(rb_eArgError, "realloc: possible integer overflow");
- }
- return ruby_xrealloc(ptr, len);
- }
- static void
- ruby_xfree_tmp(void *x)
- {
- if (x)
- vm_xfree(gc_inner->get_objspace(), x);
- }
- /*
- * call-seq:
- * GC.enable => true or false
- *
- * Enables garbage collection, returning <code>true</code> if garbage
- * collection was previously disabled.
- *
- * GC.disable #=> false
- * GC.enable #=> true
- * GC.enable #=> false
- *
- */
- static VALUE
- rb_gc_enable_tmp(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- int old = dont_gc;
- dont_gc = FALSE;
- return old ? Qtrue : Qfalse;
- }
- /*
- * call-seq:
- * GC.disable => true or false
- *
- * Disables garbage collection, returning <code>true</code> if garbage
- * collection was already disabled.
- *
- * GC.disable #=> false
- * GC.disable #=> true
- *
- */
- static VALUE
- rb_gc_disable_tmp(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- int old = dont_gc;
- dont_gc = TRUE;
- return old ? Qtrue : Qfalse;
- }
- extern VALUE rb_mGC;
- static void
- rb_gc_register_address_tmp(VALUE *addr)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- struct gc_list *tmp;
- tmp = ALLOC(struct gc_list);
- tmp->next = global_List;
- tmp->varptr = addr;
- global_List = tmp;
- }
- static void
- rb_gc_unregister_address_tmp(VALUE *addr)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- struct gc_list *tmp = global_List;
- if (tmp->varptr == addr) {
- global_List = tmp->next;
- xfree(tmp);
- return;
- }
- while (tmp->next) {
- if (tmp->next->varptr == addr) {
- struct gc_list *t = tmp->next;
- tmp->next = tmp->next->next;
- xfree(t);
- break;
- }
- tmp = tmp->next;
- }
- }
- static void
- allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
- {
- struct heaps_slot *p;
- size_t size;
- size = next_heaps_length*sizeof(struct heaps_slot);
- if (heaps_used > 0) {
- p = (struct heaps_slot *)realloc(heaps, size);
- if (p) heaps = p;
- }
- else {
- p = heaps = (struct heaps_slot *)malloc(size);
- }
- if (p == 0) {
- during_gc = 0;
- rb_memerror();
- }
- heaps_length = next_heaps_length;
- }
- #define FIND_BITMAP(res, p) do {\
- if (((RVALUE *)p)->as.free.flags & FL_ALIGNOFF) {\
- res = (RVALUE *)((((VALUE)p & BITMAP_MASK) + BITMAP_ALIGN) / sizeof(RVALUE) * sizeof(RVALUE)); \
- }\
- else {\
- res = (RVALUE *)(((VALUE)p & BITMAP_MASK) / sizeof(RVALUE) * sizeof(RVALUE));\
- }\
- } while(0)
- #define NUM_IN_SLOT(p, slot) (((VALUE)p - (VALUE)slot)/sizeof(RVALUE))
- #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) / (sizeof(int) * 8))
- /* #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) >> 5) */
- #define BITMAP_OFFSET(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) & ((sizeof(int) * 8)-1))
- #define MARKED_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] & 1 << BITMAP_OFFSET(bmap, p))
- #define MARK_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] |= 1 << BITMAP_OFFSET(bmap, p))
- #define CLEAR_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] &= ~(1 << BITMAP_OFFSET(bmap, p)))
- #define MARKED_IN_BITMAP_DIRECT(map, index, offset) (map[index] & 1 << offset)
- #define MARK_IN_BITMAP_DIRECT(map, index, offset) (map[index] |= 1 << offset)
- /* for debug */
- void
- bitmap_p(RVALUE *p)
- {
- RVALUE *bmap;
- int index, offset, marked;
- FIND_BITMAP(bmap, p);
- index = BITMAP_INDEX(bmap, p);
- offset = BITMAP_OFFSET(bmap, p);
- marked = MARKED_IN_BITMAP(bmap, p);
- printf("bitmap : ((RVALUE *)%p)\n", bmap);
- printf("map_index : %d | offset : %d\n", index, offset);
- printf("is mark ? %s\n", marked? "true" : "false");
- }
- VALUE
- find_bitmap(RVALUE *p) {
- RVALUE *res;
- FIND_BITMAP(res, p);
- return (VALUE)res;
- }
- void
- dump_bitmap(RVALUE *bmap) {
- int i;
-
- for (i = 0; i < 26; i++) {
- printf("dump %p map %d : %d %s\n", bmap, i, bmap->as.bitmap.map[i], bmap->as.bitmap.map[i]? "remain" : "clean");
- }
- }
- void
- bitmap2obj(RVALUE *bmap, int index, int offset)
- {
- printf("(RVALUE *)%p\n", (RVALUE *)(bmap->as.bitmap.slot + (index * sizeof(int) * 8 + offset) * sizeof(RVALUE)));
- }
- static void
- make_bitmap(struct heaps_slot *slot)
- {
- RVALUE *p, *pend, *bitmap, *last, *border;
- int *map = 0;
- int size;
-
- p = slot->slot;
- pend = p + slot->limit;
- last = pend - 1;
- RBASIC(last)->flags = 0;
- FIND_BITMAP(bitmap, last);
- if (bitmap < p || pend <= bitmap) {
- rb_bug("not include in heap slot: result bitmap(%p), find (%p), p (%p), pend(%p)", bitmap, last, p, pend);
- }
- border = bitmap;
- if (!((VALUE)border % BITMAP_ALIGN)) {
- border--;
- }
- while (p < pend) {
- if (p <= border) {
- RBASIC(p)->flags = FL_ALIGNOFF;
- }
- else {
- RBASIC(p)->flags = 0;
- }
- p++;
- }
- size = sizeof(int) * (HEAP_OBJ_LIMIT / (sizeof(int) * 8)+1);
- map = (int *)malloc(size);
- if (map == 0) {
- rb_memerror();
- }
- MEMZERO(map, int, (size/sizeof(int)));
- bitmap->as.bitmap.flags |= T_BITMAP;
- bitmap->as.bitmap.map = map;
- bitmap->as.bitmap.slot = (VALUE)slot->slot;
- bitmap->as.bitmap.limit = slot->limit;
- slot->bitmap = bitmap;
- }
- void
- test_bitmap(RVALUE *p, RVALUE *pend)
- {
- RVALUE *first, *bmap = 0, *bmap_tmp;
- int i;
- first = p;
- FIND_BITMAP(bmap_tmp, p);
- while (p < pend) {
- if (MARKED_IN_BITMAP(bmap, p)) printf("already marking! %p\n", p);
- if (bmap_tmp != p) {
- FIND_BITMAP(bmap, p);
- if (bmap_tmp != bmap) printf("diffrence bmap %p : %p\n", bmap_tmp, bmap);
- MARK_IN_BITMAP(bmap, p);
- }
- else {
- MARK_IN_BITMAP(bmap, p);
- }
- if (!MARKED_IN_BITMAP(bmap, p)) printf("not marking! %p\n", p);
- p++;
- }
- for (i =0; i < 26; i++) {
- printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
- }
- p = first;
- while (p < pend) {
- if (bmap_tmp != p) {
- FIND_BITMAP(bmap, p);
- CLEAR_IN_BITMAP(bmap, p);
- }
- else {
- CLEAR_IN_BITMAP(bmap, p);
- }
- if (MARKED_IN_BITMAP(bmap, p)) printf("not clear! %p\n", p);
- p++;
- }
- for (i =0; i < 26; i++) {
- printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
- }
- }
- static void
- assign_heap_slot(rb_objspace_t *objspace)
- {
- RVALUE *p, *pend, *membase;
- size_t hi, lo, mid;
- size_t objs;
- objs = HEAP_OBJ_LIMIT;
- p = (RVALUE*)malloc(HEAP_SIZE);
- if (p == 0) {
- during_gc = 0;
- rb_memerror();
- }
- membase = p;
- if ((VALUE)p % sizeof(RVALUE) != 0) {
- p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
- }
- lo = 0;
- hi = heaps_used;
- while (lo < hi) {
- register RVALUE *mid_membase;
- mid = (lo + hi) / 2;
- mid_membase = heaps[mid].membase;
- if (mid_membase < membase) {
- lo = mid + 1;
- }
- else if (mid_membase > membase) {
- hi = mid;
- }
- else {
- rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
- }
- }
- if (hi < heaps_used) {
- MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
- }
- heaps[hi].membase = membase;
- heaps[hi].slot = p;
- heaps[hi].limit = objs;
- pend = p + objs;
- if (lomem == 0 || lomem > p) lomem = p;
- if (himem < pend) himem = pend;
- heaps_used++;
- make_bitmap(&heaps[hi]);
- while (p < pend) {
- if (BUILTIN_TYPE(p) != T_BITMAP) {
- p->as.free.next = freelist;
- freelist = p;
- }
- p++;
- }
- }
- static void
- init_heap(rb_objspace_t *objspace)
- {
- size_t add, i;
- add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
- if (!add) {
- add = 1;
- }
- if ((heaps_used + add) > heaps_length) {
- allocate_heaps(objspace, heaps_used + add);
- }
- for (i = 0; i < add; i++) {
- assign_heap_slot(objspace);
- }
- heaps_inc = 0;
- objspace->profile.invoke_time = getrusage_time();
- }
- static void
- set_heaps_increment(rb_objspace_t *objspace)
- {
- size_t next_heaps_length = (size_t)(heaps_used * 1.8);
- if (next_heaps_length == heaps_used) {
- next_heaps_length++;
- }
- heaps_inc = next_heaps_length - heaps_used;
- if (next_heaps_length > heaps_length) {
- allocate_heaps(objspace, next_heaps_length);
- }
- }
- static int
- heaps_increment(rb_objspace_t *objspace)
- {
- if (heaps_inc > 0) {
- assign_heap_slot(objspace);
- heaps_inc--;
- return TRUE;
- }
- return FALSE;
- }
- #define RANY(o) ((RVALUE*)(o))
- static VALUE
- rb_newobj_from_heap(rb_objspace_t *objspace)
- {
- VALUE obj;
- int bmap_left = 0;
- if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
- if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
- during_gc = 0;
- rb_memerror();
- }
- }
- obj = (VALUE)freelist;
- freelist = freelist->as.free.next;
- if (RANY(obj)->as.free.flags & FL_ALIGNOFF) {
- bmap_left = Qtrue;
- }
- MEMZERO((void*)obj, RVALUE, 1);
- if (bmap_left) {
- RANY(obj)->as.free.flags = FL_ALIGNOFF;
- }
- #ifdef GC_DEBUG
- RANY(obj)->file = rb_sourcefile();
- RANY(obj)->line = rb_sourceline();
- #endif
- return obj;
- }
- /* TODO: remove this function. */
- #if USE_VALUE_CACHE
- static VALUE
- rb_fill_value_cache(rb_thread_t *th)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- int i;
- VALUE rv;
- RVALUE *bmap;
- /* LOCK */
- for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
- VALUE v = rb_newobj_from_heap(objspace);
- th->value_cache[i] = v;
- FIND_BITMAP(bmap, v);
- MARK_IN_BITMAP(bmap, v);
- }
- th->value_cache_ptr = &th->value_cache[0];
- rv = rb_newobj_from_heap(objspace);
- /* UNLOCK */
- return rv;
- }
- #endif
- static int
- rb_during_gc_tmp(void)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- return during_gc;
- }
- static VALUE
- rb_newobj_tmp(void)
- {
- #if USE_VALUE_CACHE
- rb_thread_t *th = GET_THREAD();
- VALUE v = *th->value_cache_ptr;
- #endif
- rb_objspace_t *objspace = gc_inner->get_objspace();
- if (during_gc) {
- dont_gc = 1;
- during_gc = 0;
- rb_bug("object allocation during garbage collection phase");
- }
- #if USE_VALUE_CACHE
- if (v) {
- rb_set_flag_force(v, 0);
- th->value_cache_ptr++;
- }
- else {
- v = rb_fill_value_cache(th);
- }
- #if defined(GC_DEBUG)
- printf("cache index: %d, v: %p, th: %p\n",
- th->value_cache_ptr - th->value_cache, v, th);
- #endif
- return v;
- #else
- return rb_newobj_from_heap(objspace);
- #endif
- }
- static void
- rb_set_flag_force_tmp(VALUE obj, VALUE t)
- {
- t = t & ~FL_ALIGNOFF;
- if (RBASIC(obj)->flags & FL_ALIGNOFF) {
- RBASIC(obj)->flags = FL_ALIGNOFF | t;
- }
- else {
- RBASIC(obj)->flags = t;
- }
- }
- static VALUE
- rb_data_object_alloc_tmp(VALUE klass, void *datap, RUBY_DATA_FUNC dmark,
- RUBY_DATA_FUNC dfree)
- {
- NEWOBJ(data, struct RData);
- if (klass) Check_Type(klass, T_CLASS);
- OBJSETUP(data, klass, T_DATA);
- data->data = datap;
- data->dfree = dfree;
- data->dmark = dmark;
- return (VALUE)data;
- }
- static VALUE
- rb_data_typed_object_alloc_tmp(VALUE klass, void *datap,
- const rb_data_type_t *type)
- {
- NEWOBJ(data, struct RTypedData);
- if (klass) Check_Type(klass, T_CLASS);
- OBJSETUP(data, klass, T_DATA);
- data->data = datap;
- data->typed_flag = 1;
- data->type = type;
- return (VALUE)data;
- }
- static size_t
- rb_objspace_data_type_memsize_tmp(VALUE obj)
- {
- if (RTYPEDDATA_P(obj)) {
- return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj));
- }
- else {
- return 0;
- }
- }
- static const char *
- rb_objspace_data_type_name_tmp(VALUE obj)
- {
- if (RTYPEDDATA_P(obj)) {
- return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
- }
- else {
- return 0;
- }
- }
- #ifdef __ia64
- #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
- #else
- #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
- #endif
- #define STACK_START (th->machine_stack_start)
- #define STACK_END (th->machine_stack_end)
- #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
- #if STACK_GROW_DIRECTION < 0
- # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
- #elif STACK_GROW_DIRECTION > 0
- # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
- #else
- # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
- : (size_t)(STACK_END - STACK_START + 1))
- #endif
- #if !STACK_GROW_DIRECTION
- int ruby_stack_grow_direction;
- static int
- ruby_get_stack_grow_direction_tmp(volatile VALUE *addr)
- {
- VALUE *end;
- SET_MACHINE_STACK_END(&end);
- if (end > addr) return ruby_stack_grow_direction = 1;
- return ruby_stack_grow_direction = -1;
- }
- #endif
- #define GC_WATER_MARK 512
- static int
- ruby_stack_check_tmp(void)
- {
- #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
- return 0;
- #else
- return gc_inner->stack_check();
- #endif
- }
- static void
- init_mark_stack(rb_objspace_t *objspace)
- {
- mark_stack_overflow = 0;
- mark_stack_ptr = mark_stack;
- }
- #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
- static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
- #define IS_FREE_CELL(obj) ((obj->as.basic.flags & ~(FL_ALIGNOFF)) == 0)
- static void
- gc_mark_all(rb_objspace_t *objspace)
- {
- RVALUE *p, *pend, *bmap;
- size_t i;
- init_mark_stack(objspace);
- for (i = 0; i < heaps_used; i++) {
- p = heaps[i].slot; pend = p + heaps[i].limit;
- bmap = heaps[i].bitmap;
- while (p < pend) {
- if (MARKED_IN_BITMAP(bmap, p) &&
- !(IS_FREE_CELL(p))) {
- gc_inner->gc_mark_children(objspace, (VALUE)p, 0);
- }
- p++;
- }
- }
- }
- static void
- gc_mark_rest(rb_objspace_t *objspace)
- {
- VALUE tmp_arry[MARK_STACK_MAX];
- VALUE *p;
- p = (mark_stack_ptr - mark_stack) + tmp_arry;
- MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
- init_mark_stack(objspace);
- while (p != tmp_arry) {
- p--;
- gc_inner->gc_mark_children(objspace, *p, 0);
- }
- }
- static inline int
- is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
- {
- register RVALUE *p = RANY(ptr);
- register struct heaps_slot *heap;
- register size_t hi, lo, mid;
- if (p < lomem || p > himem) return FALSE;
- if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
- /* check if p looks like a pointer using bsearch*/
- lo = 0;
- hi = heaps_used;
- while (lo < hi) {
- mid = (lo + hi) / 2;
- heap = &heaps[mid];
- if (heap->slot <= p) {
- if (p < heap->slot + heap->limit)
- return TRUE;
- lo = mid + 1;
- }
- else {
- hi = mid;
- }
- }
- return FALSE;
- }
- static void
- mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
- {
- VALUE v;
- while (n--) {
- v = *x;
- VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
- if (is_pointer_to_heap(objspace, (void *)v)) {
- gc_mark(objspace, v, 0);
- }
- x++;
- }
- }
- static void
- gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
- {
- long n;
- if (end <= start) return;
- n = end - start;
- mark_locations_array(objspace, start, n);
- }
- static void
- rb_gc_mark_locations_tmp(VALUE *start, VALUE *end)
- {
- gc_mark_locations(gc_inner->get_objspace(), start, end);
- }
- #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
- struct mark_tbl_arg {
- rb_objspace_t *objspace;
- int lev;
- };
- static int
- mark_entry(ID key, VALUE value, st_data_t data)
- {
- struct mark_tbl_arg *arg = (void*)data;
- gc_mark(arg->objspace, value, arg->lev);
- return ST_CONTINUE;
- }
- static void
- mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
- {
- struct mark_tbl_arg arg;
- if (!tbl) return;
- arg.objspace = objspace;
- arg.lev = lev;
- st_foreach(tbl, mark_entry, (st_data_t)&arg);
- }
- static int
- mark_key(VALUE key, VALUE value, st_data_t data)
- {
- struct mark_tbl_arg *arg = (void*)data;
- gc_mark(arg->objspace, key, arg->lev);
- return ST_CONTINUE;
- }
- static void
- mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
- {
- struct mark_tbl_arg arg;
- if (!tbl) return;
- arg.objspace = objspace;
- arg.lev = lev;
- st_foreach(tbl, mark_key, (st_data_t)&arg);
- }
- static void
- rb_mark_set_tmp(st_table *tbl)
- {
- mark_set(gc_inner->get_objspace(), tbl, 0);
- }
- static int
- mark_keyvalue(VALUE key, VALUE value, st_data_t data)
- {
- struct mark_tbl_arg *arg = (void*)data;
- gc_mark(arg->objspace, key, arg->lev);
- gc_mark(arg->objspace, value, arg->lev);
- return ST_CONTINUE;
- }
- static void
- mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
- {
- struct mark_tbl_arg arg;
- if (!tbl) return;
- arg.objspace = objspace;
- arg.lev = lev;
- st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
- }
- static int
- mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
- {
- struct mark_tbl_arg *arg = (void*)data;
- gc_inner->mark_method_entry(arg->objspace, me, arg->lev);
- return ST_CONTINUE;
- }
- static void
- mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
- {
- struct mark_tbl_arg arg;
- if (!tbl) return;
- arg.objspace = objspace;
- arg.lev = lev;
- st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
- }
- static int
- free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
- {
- rb_free_method_entry(me);
- return ST_CONTINUE;
- }
- static void
- rb_free_m_table_tmp(st_table *tbl)
- {
- st_foreach(tbl, free_method_entry_i, 0);
- st_free_table(tbl);
- }
- static void
- rb_mark_tbl_tmp(st_table *tbl)
- {
- mark_tbl(gc_inner->get_objspace(), tbl, 0);
- }
- static void
- rb_gc_mark_maybe_tmp(VALUE obj)
- {
- if (is_pointer_to_heap(gc_inner->get_objspace(), (void *)obj)) {
- gc_mark(gc_inner->get_objspace(), obj, 0);
- }
- }
- #define GC_LEVEL_MAX 250
- static void
- gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
- {
- register RVALUE *obj, *bmap;
- obj = RANY(ptr);
- if (rb_special_const_p(ptr)) return; /* special const not marked */
- if (IS_FREE_CELL(obj)) return; /* free cell */
- if (BUILTIN_TYPE(obj) == T_BITMAP) return;
- FIND_BITMAP(bmap, obj);
- if (MARKED_IN_BITMAP(bmap, obj)) return; /* already marked */
- MARK_IN_BITMAP(bmap, obj);
- if (lev > GC_LEVEL_MAX || (lev == 0 && gc_inner->stack_check())) {
- if (!mark_stack_overflow) {
- if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
- *mark_stack_ptr = ptr;
- mark_stack_ptr++;
- }
- else {
- mark_stack_overflow = 1;
- }
- }
- return;
- }
- gc_inner->gc_mark_children(objspace, ptr, lev+1);
- }
- static int
- gc_set_mark_flag(register RVALUE *obj)
- {
- register RVALUE *bmap;
- if (IS_FREE_CELL(obj)) return 1; /* free cell */
- FIND_BITMAP(bmap, obj);
- if (MARKED_IN_BITMAP(bmap, obj)) return 1; /* already marked */
- MARK_IN_BITMAP(bmap, obj);
- return 0;
- }
- static inline void
- add_freelist(rb_objspace_t *objspace, RVALUE *p)
- {
- RVALUE *bmap;
- VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
- rb_set_flag_force((VALUE)p, 0);
- FIND_BITMAP(bmap, p);
- CLEAR_IN_BITMAP(bmap, p);
- p->as.free.next = freelist;
- freelist = p;
- }
- static void
- finalize_list(rb_objspace_t *objspace, RVALUE *p)
- {
- while (p) {
- RVALUE *tmp = p->as.free.next;
- run_final(objspace, (VALUE)p);
- if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
- add_freelist(objspace, p);
- }
- else {
- struct heaps_slot *slot = (struct heaps_slot *)RDATA(p)->dmark;
- slot->limit--;
- }
- p = tmp;
- }
- }
- static void
- free_unused_heaps(rb_objspace_t *objspace)
- {
- size_t i, j;
- RVALUE *last = 0, *bmap = 0;
- for (i = j = 1; j < heaps_used; i++) {
- if (heaps[i].limit == 0) {
- if (!last) {
- last = heaps[i].membase;
- bmap = heaps[i].bitmap;
- }
- else {
- free(heaps[i].membase);
- free(heaps[i].bitmap->as.bitmap.map);
- }
- heaps_used--;
- }
- else {
- if (i != j) {
- heaps[j] = heaps[i];
- }
- j++;
- }
- }
- if (last) {
- if (last < heaps_freed) {
- free(heaps_freed);
- free(objspace->ext_heap.freed_bitmap->as.bitmap.map);
- heaps_freed = last;
- heaps_freed = bmap;
- }
- else {
- free(last);
- free(bmap->as.bitmap.map);
- }
- }
- }
- static void
- gc_sweep(rb_objspace_t *objspace)
- {
- RVALUE *p, *pend, *final_list;
- size_t freed = 0;
- size_t i;
- size_t live = 0, free_min = 0, do_heap_free = 0;
- do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
- free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
- if (free_min < FREE_MIN) {
- do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
- free_min = FREE_MIN;
- }
- freelist = 0;
- final_list = deferred_final_list;
- deferred_final_list = 0;
- for (i = 0; i < heaps_used; i++) {
- size_t free_num = 0, final_num = 0;
- RVALUE *free = freelist;
- RVALUE *final = final_list;
- int *map = heaps[i].bitmap->as.bitmap.map;
- int deferred, bmap_index = 0, bmap_offset = 0;
- p = heaps[i].slot; pend = p + heaps[i].limit;
- while (p < pend) {
- if (BUILTIN_TYPE(p) == T_BITMAP) {
- free_num++;
- }
- else if(!(MARKED_IN_BITMAP_DIRECT(map, bmap_index, bmap_offset))) {
- if (!(IS_FREE_CELL(p)) &&
- ((deferred = gc_inner->obj_free(objspace, (VALUE)p)) ||
- ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
- if (!deferred) {
- rb_set_flag_force((VALUE)p, T_ZOMBIE);
- RDATA(p)->dfree = 0;
- }
- p->as.free.next = final_list;
- final_list = p;
- final_num++;
- }
- else {
- /* Do not touch the fields if they don't have to be modified.
- * This is in order to preserve copy-on-write semantics.
- */
- if (!IS_FREE_CELL(p))
- rb_set_flag_force((VALUE)p, 0);
- if (p->as.free.next != freelist)
- p->as.free.next = freelist;
- freelist = p;
- free_num++;
- }
- }
- else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
- /* objects to be finalized */
- /* do nothing remain marked */
- }
- else {
- live++;
- }
- p++;
- bmap_offset++;
- if (bmap_offset >= (int)(sizeof(int) * 8)) {
- bmap_index++;
- bmap_offset = 0;
- }
- }
- MEMZERO(heaps[i].bitmap->as.bitmap.map, int, bmap_index+1);
- if (final_num + free_num == heaps[i].limit && freed > do_heap_free) {
- RVALUE *pp;
- for (pp = final_list; pp != final; pp = pp->as.free.next) {
- RDATA(pp)->dmark = (void *)&heaps[i];
- pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
- }
- heaps[i].limit = final_num;
- freelist = free; /* cancel this page from freelist */
- }
- else {
- freed += free_num;
- }
- }
- GC_PROF_SET_MALLOC_INFO;
- if (malloc_increase > malloc_limit) {
- malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)live / (live + freed));
- if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
- }
- malloc_increase = 0;
- if (freed < free_min) {
- set_heaps_increment(objspace);
- heaps_increment(objspace);
- }
- during_gc = 0;
- /* clear finalization list */
- if (final_list) {
- RVALUE *bmap, *pp;
- for (pp = final_list; pp != 0; pp = pp->as.free.next) {
- FIND_BITMAP(bmap, pp);
- MARK_IN_BITMAP(bmap, pp);
- }
- GC_PROF_SET_HEAP_INFO;
- deferred_final_list = final_list;
- gc_inner->ruby_vm_set_finalizer_interrupt();
- }
- else {
- free_unused_heaps(objspace);
- GC_PROF_SET_HEAP_INFO;
- }
- }
- static void
- rb_gc_force_recycle_tmp(VALUE p)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- add_freelist(objspace, (RVALUE *)p);
- }
- static inline void
- make_deferred(RVALUE *p)
- {
- rb_set_flag_force((VALUE)p, (p->as.basic.flags & ~T_MASK) | T_ZOMBIE);
- }
- static inline void
- make_io_deferred(RVALUE *p)
- {
- rb_io_t *fptr = p->as.file.fptr;
- make_deferred(p);
- p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
- p->as.data.data = fptr;
- }
- #define GC_NOTIFY 0
- void rb_vm_mark(void *ptr);
- #if STACK_GROW_DIRECTION < 0
- #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_END, end = STACK_START)
- #elif STACK_GROW_DIRECTION > 0
- #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_START, end = STACK_END+appendix)
- #else
- #define GET_STACK_BOUNDS(stack_start, stack_end, appendix) \
- ((STACK_END < STACK_START) ? \
- (start = STACK_END, end = STACK_START) : (start = STACK_START, end = STACK_END+appendix))
- #endif
- void rb_gc_mark_encodings(void);
- static int
- garbage_collect(rb_objspace_t *objspace)
- {
- struct gc_list *list;
- INIT_GC_PROF_PARAMS;
- if (GC_NOTIFY) printf("start garbage_collect()\n");
- if (!heaps) {
- return FALSE;
- }
- if (dont_gc || during_gc) {
- if (!freelist) {
- if (!heaps_increment(objspace)) {
- set_heaps_increment(objspace);
- heaps_increment(objspace);
- }
- }
- return TRUE;
- }
- during_gc++;
- objspace->count++;
- GC_PROF_TIMER_START;
- GC_PROF_MARK_TIMER_START;
- gc_inner->gc_mark_core(objspace);
- if (finalizer_table) {
- mark_tbl(objspace, finalizer_table, 0);
- }
- rb_gc_mark_threads();
- rb_gc_mark_symbols();
- rb_gc_mark_encodings();
- /* mark protected global variables */
- for (list = global_List; list; list = list->next) {
- rb_gc_mark_maybe(*list->varptr);
- }
- rb_mark_end_proc();
- rb_gc_mark_global_tbl();
- mark_tbl(objspace, rb_class_tbl, 0);
- /* mark generic instance variables for special constants */
- rb_mark_generic_ivar_tbl();
- rb_gc_mark_parser();
- /* gc_mark objects whose marking are not completed*/
- while (!MARK_STACK_EMPTY) {
- if (mark_stack_overflow) {
- gc_mark_all(objspace);
- }
- else {
- gc_mark_rest(objspace);
- }
- }
- GC_PROF_MARK_TIMER_STOP;
- GC_PROF_SWEEP_TIMER_START;
- gc_sweep(objspace);
- GC_PROF_SWEEP_TIMER_STOP;
- GC_PROF_TIMER_STOP;
- if (GC_NOTIFY) printf("end garbage_collect()\n");
- return TRUE;
- }
- static int
- rb_garbage_collect_tmp(void)
- {
- return garbage_collect(gc_inner->get_objspace());
- }
- /*
- * Document-class: ObjectSpace
- *
- * The <code>ObjectSpace</code> module contains a number of routines
- * that interact with the garbage collection facility and allow you to
- * traverse all living objects with an iterator.
- *
- * <code>ObjectSpace</code> also provides support for object
- * finalizers, procs that will be called when a specific object is
- * about to be destroyed by garbage collection.
- *
- * include ObjectSpace
- *
- *
- * a = "A"
- * b = "B"
- * c = "C"
- *
- *
- * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
- * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
- * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
- *
- * <em>produces:</em>
- *
- * Finalizer three on 537763470
- * Finalizer one on 537763480
- * Finalizer two on 537763480
- *
- */
- static void
- Init_heap_tmp(void)
- {
- init_heap(gc_inner->get_objspace());
- }
- /*
- * rb_objspace_each_objects() is special C API to walk through
- * Ruby object space. This C API is too difficult to use it.
- * To be frank, you should not use it. Or you need to read the
- * source code of this function and understand what this function does.
- *
- * 'callback' will be called several times (the number of heap slot,
- * at current implementation) with:
- * vstart: a pointer to the first living object of the heap_slot.
- * vend: a pointer to next to the valid heap_slot area.
- * stride: a distance to next VALUE.
- *
- * If callback() returns non-zero, the iteration will be stopped.
- *
- * This is a sample callback code to iterate liveness objects:
- *
- * int
- * sample_callback(void *vstart, void *vend, int stride, void *data) {
- * VALUE v = (VALUE)vstart;
- * for (; v != (VALUE)vend; v += stride) {
- * if (RBASIC(v)->flags) { // liveness check
- * // do something with live object 'v'
- * }
- * return 0; // continue to iteration
- * }
- *
- * Note: 'vstart' is not a top of heap_slot. This point the first
- * living object to grasp at least one object to avoid GC issue.
- * This means that you can not walk through all Ruby object slot
- * including freed object slot.
- *
- * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
- * However, there are possibilities to pass variable values with
- * 'stride' with some reasons. You must use stride instead of
- * use some constant value in the iteration.
- */
- static void
- rb_objspace_each_objects_tmp(int (*callback)(void *vstart, void *vend,
- size_t stride, void *d),
- void *data)
- {
- size_t i;
- RVALUE *membase = 0;
- RVALUE *pstart, *pend;
- rb_objspace_t *objspace = gc_inner->get_objspace();
- volatile VALUE v;
- i = 0;
- while (i < heaps_used) {
- while (0 < i && (uintptr_t)membase < (uintptr_t)heaps[i-1].membase)
- i--;
- while (i < heaps_used && (uintptr_t)heaps[i].membase <= (uintptr_t)membase )
- i++;
- if (heaps_used <= i)
- break;
- membase = heaps[i].membase;
- pstart = heaps[i].slot;
- pend = pstart + heaps[i].limit;
- for (; pstart != pend; pstart++) {
- if (pstart->as.basic.flags & ~FL_ALIGNOFF) {
- v = (VALUE)pstart; /* acquire to save this object */
- break;
- }
- }
- if (pstart != pend) {
- if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
- return;
- }
- }
- }
- return;
- }
- struct os_each_struct {
- size_t num;
- VALUE of;
- };
- static int
- os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
- {
- struct os_each_struct *oes = (struct os_each_struct *)data;
- RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
- volatile VALUE v;
- for (; p != pend; p++) {
- if (!IS_FREE_CELL(p)) {
- if (gc_inner->os_obj_of_check_type(p)) {
- if (BUILTIN_TYPE(p) == T_BITMAP) continue;
- if (!p->as.basic.klass) continue;
- v = (VALUE)p;
- if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
- rb_yield(v);
- oes->num++;
- }
- }
- }
- }
- return 0;
- }
- static VALUE
- os_obj_of(VALUE of)
- {
- struct os_each_struct oes;
- oes.num = 0;
- oes.of = of;
- rb_objspace_each_objects(os_obj_of_i, &oes);
- return SIZET2NUM(oes.num);
- }
- /*
- * call-seq:
- * ObjectSpace.each_object([module]) {|obj| ... } => fixnum
- *
- * Calls the block once for each living, nonimmediate object in this
- * Ruby process. If <i>module</i> is specified, calls the block
- * for only those classes or modules that match (or are a subclass of)
- * <i>module</i>. Returns the number of objects found. Immediate
- * objects (<code>Fixnum</code>s, <code>Symbol</code>s
- * <code>true</code>, <code>false</code>, and <code>nil</code>) are
- * never returned. In the example below, <code>each_object</code>
- * returns both the numbers we defined and several constants defined in
- * the <code>Math</code> module.
- *
- * a = 102.7
- * b = 95 # Won't be returned
- * c = 12345678987654321
- * count = ObjectSpace.each_object(Numeric) {|x| p x }
- * puts "Total count: #{count}"
- *
- * <em>produces:</em>
- *
- * 12345678987654321
- * 102.7
- * 2.71828182845905
- * 3.14159265358979
- * 2.22044604925031e-16
- * 1.7976931348623157e+308
- * 2.2250738585072e-308
- * Total count: 7
- *
- */
- static VALUE
- os_each_obj(int argc, VALUE *argv, VALUE os)
- {
- VALUE of;
- rb_secure(4);
- if (argc == 0) {
- of = 0;
- }
- else {
- rb_scan_args(argc, argv, "01", &of);
- }
- RETURN_ENUMERATOR(os, 1, &of);
- return os_obj_of(of);
- }
- /*
- * call-seq:
- * ObjectSpace.undefine_finalizer(obj)
- *
- * Removes all finalizers for <i>obj</i>.
- *
- */
- static VALUE
- undefine_final(VALUE os, VALUE obj)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- if (OBJ_FROZEN(obj)) rb_error_frozen("object");
- if (finalizer_table) {
- st_delete(finalizer_table, (st_data_t*)&obj, 0);
- }
- FL_UNSET(obj, FL_FINALIZE);
- return obj;
- }
- /*
- * call-seq:
- * ObjectSpace.define_finalizer(obj, aProc=proc())
- *
- * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
- * was destroyed.
- *
- */
- static VALUE
- define_final(int argc, VALUE *argv, VALUE os)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- VALUE obj, block, table;
- rb_scan_args(argc, argv, "11", &obj, &block);
- if (OBJ_FROZEN(obj)) rb_error_frozen("object");
- if (argc == 1) {
- block = rb_block_proc();
- }
- else if (!rb_respond_to(block, rb_intern("call"))) {
- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
- rb_obj_classname(block));
- }
- if (!FL_ABLE(obj)) {
- rb_raise(rb_eArgError, "cannot define finalizer for %s",
- rb_obj_classname(obj));
- }
- RBASIC(obj)->flags |= FL_FINALIZE;
- block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
- OBJ_FREEZE(block);
- if (!finalizer_table) {
- finalizer_table = st_init_numtable();
- }
- if (st_lookup(finalizer_table, obj, &table)) {
- rb_ary_push(table, block);
- }
- else {
- table = rb_ary_new3(1, block);
- RBASIC(table)->klass = 0;
- st_add_direct(finalizer_table, obj, table);
- }
- return block;
- }
- static void
- rb_gc_copy_finalizer_tmp(VALUE dest, VALUE obj)
- {
- rb_objspace_t *objspace = gc_inner->get_objspace();
- VALUE table;
- if (!finalizer_table) return;
- if (!FL_TEST(obj, FL_FINALIZE)) return;
- if (st_lookup(finalizer_table, obj, &table)) {
- st_insert(finalizer_table, dest, table);
- }
- FL_SET(dest, FL_FINALIZE);
- }
- static VALUE
- run_single_final(VALUE arg)
- {
- VALUE *args = (VALUE *)arg;
- rb_eval_cmd(args[0], args[1], (int)args[2]);
- return Qnil;
- }
- static void
- run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE objid, VALUE table)
- {
- long i;
- int status;
- VALUE args[3];
- args[1] = 0;
- args[2] = (VALUE)rb_safe_level();
- if (!args[1] && RARRAY_LEN(table) > 0) {
- args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
- }
- for (i=0; i<RARRAY_LEN(table); i++) {
- VALUE final = RARRAY_PTR(table)[i];
- args[0] = RARRAY_PTR(final)[1];
- args[2] = FIX2INT(RARRAY_PTR(final)[0]);
- rb_protect(run_single_final, (VALUE)args, &status);
- }
- }
- static void
- run_final(rb_objspace_t *objspace, VALUE obj)
- {
- VALUE table, objid;
- RUBY_DATA_FUNC free_func = 0;
- objid = rb_obj_id(obj); /* make obj into id */
- RBASIC(obj)->klass = 0;
- if (RTYPEDDATA_P(obj)) {
- free_func = RTYPEDDATA_TYPE(obj)->dfree;
- }
- else {
- free_func = RDATA(obj)->dfree;
- }
- if (free_func) {
- (*free_func)(DATA_PTR(obj));
- }
- if (finalizer_table &&
- st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
- run_finalizer(objspace, obj, objid, table);
- }
- }
- static void
- finalize_deferred(rb_objspace_t *ob…
Large files files are truncated, but you can click here to view the full file