PageRenderTime 354ms CodeModel.GetById 30ms app.highlight 207ms RepoModel.GetById 15ms app.codeStats 20ms

/libs/headers/gc/private/gc_priv.h

http://github.com/nddrylliog/ooc
C++ Header | 1853 lines | 986 code | 179 blank | 688 comment | 128 complexity | 879699c5a094c2b5c5a26f53b2a1ea35 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/* 
   2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
   3 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
   4 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
   5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
   6 *
   7 *
   8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   9 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  10 *
  11 * Permission is hereby granted to use or copy this program
  12 * for any purpose,  provided the above notices are retained on all copies.
  13 * Permission to modify the code and to distribute modified code is granted,
  14 * provided the above notices are retained, and a notice that the code was
  15 * modified is included with the above copyright notice.
  16 */
  17 
  18
  19# ifndef GC_PRIVATE_H
  20# define GC_PRIVATE_H
  21
  22# include <stdlib.h>
  23# if !(defined( sony_news ) )
  24#   include <stddef.h>
  25# endif
  26
  27#ifdef DGUX
  28#   include <sys/types.h>
  29#   include <sys/time.h>
  30#   include <sys/resource.h>
  31#endif /* DGUX */
  32
  33#ifdef BSD_TIME
  34#   include <sys/types.h>
  35#   include <sys/time.h>
  36#   include <sys/resource.h>
  37#endif /* BSD_TIME */
  38
  39#ifdef PARALLEL_MARK
  40#   define AO_REQUIRE_CAS
  41#endif
  42
  43#ifndef _GC_H
  44#   include "../gc.h"
  45#endif
  46
  47#ifndef GC_TINY_FL_H
  48#   include "../gc_tiny_fl.h"
  49#endif
  50
  51#ifndef GC_MARK_H
  52#   include "../gc_mark.h"
  53#endif
  54
  55typedef GC_word word;
  56typedef GC_signed_word signed_word;
  57typedef unsigned int unsigned32;
  58
  59typedef int GC_bool;
  60# define TRUE 1
  61# define FALSE 0
  62
  63typedef char * ptr_t;	/* A generic pointer to which we can add	*/
  64			/* byte displacements and which can be used	*/
  65			/* for address comparisons.			*/
  66
  67# ifndef GCCONFIG_H
  68#   include "gcconfig.h"
  69# endif
  70
  71# ifndef HEADERS_H
  72#   include "gc_hdrs.h"
  73# endif
  74
  75#if __GNUC__ >= 3
  76# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
  77  /* Equivalent to (expr), but predict that usually (expr)==outcome. */
  78# define INLINE inline
  79#else
  80# define EXPECT(expr, outcome) (expr)
  81# define INLINE
  82#endif /* __GNUC__ */
  83
  84# ifndef GC_LOCKS_H
  85#   include "gc_locks.h"
  86# endif
  87
  88# ifdef STACK_GROWS_DOWN
  89#   define COOLER_THAN >
  90#   define HOTTER_THAN <
  91#   define MAKE_COOLER(x,y) if ((x)+(y) > (x)) {(x) += (y);} \
  92			    else {(x) = (ptr_t)ONES;}
  93#   define MAKE_HOTTER(x,y) (x) -= (y)
  94# else
  95#   define COOLER_THAN <
  96#   define HOTTER_THAN >
  97#   define MAKE_COOLER(x,y) if ((x)-(y) < (x)) {(x) -= (y);} else {(x) = 0;}
  98#   define MAKE_HOTTER(x,y) (x) += (y)
  99# endif
 100
 101#if defined(AMIGA) && defined(__SASC)
 102#   define GC_FAR __far
 103#else
 104#   define GC_FAR
 105#endif
 106
 107
 108/*********************************/
 109/*                               */
 110/* Definitions for conservative  */
 111/* collector                     */
 112/*                               */
 113/*********************************/
 114
 115/*********************************/
 116/*                               */
 117/* Easily changeable parameters  */
 118/*                               */
 119/*********************************/
 120
 121/* #define STUBBORN_ALLOC */
 122		    /* Enable stubborm allocation, and thus a limited	*/
 123		    /* form of incremental collection w/o dirty bits.	*/
 124
 125/* #define ALL_INTERIOR_POINTERS */
 126		    /* Forces all pointers into the interior of an 	*/
 127		    /* object to be considered valid.  Also causes the	*/
 128		    /* sizes of all objects to be inflated by at least 	*/
 129		    /* one byte.  This should suffice to guarantee	*/
 130		    /* that in the presence of a compiler that does	*/
 131		    /* not perform garbage-collector-unsafe		*/
 132		    /* optimizations, all portable, strictly ANSI	*/
 133		    /* conforming C programs should be safely usable	*/
 134		    /* with malloc replaced by GC_malloc and free	*/
 135		    /* calls removed.  There are several disadvantages: */
 136		    /* 1. There are probably no interesting, portable,	*/
 137		    /*    strictly ANSI	conforming C programs.		*/
 138		    /* 2. This option makes it hard for the collector	*/
 139		    /*    to allocate space that is not ``pointed to''  */
 140		    /*    by integers, etc.  Under SunOS 4.X with a 	*/
 141		    /*    statically linked libc, we empiricaly		*/
 142		    /*    observed that it would be difficult to 	*/
 143		    /*	  allocate individual objects larger than 100K.	*/
 144		    /* 	  Even if only smaller objects are allocated,	*/
 145		    /*    more swap space is likely to be needed.       */
 146		    /*    Fortunately, much of this will never be	*/
 147		    /*    touched.					*/
 148		    /* If you can easily avoid using this option, do.	*/
 149		    /* If not, try to keep individual objects small.	*/
 150		    /* This is now really controlled at startup,	*/
 151		    /* through GC_all_interior_pointers.		*/
 152		    
 153
 154#define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
 155
 156#if !defined(DONT_ADD_BYTE_AT_END)
 157# define EXTRA_BYTES GC_all_interior_pointers
 158# define MAX_EXTRA_BYTES 1
 159#else
 160# define EXTRA_BYTES 0
 161# define MAX_EXTRA_BYTES 0
 162#endif
 163
 164
 165# ifndef LARGE_CONFIG
 166#   define MINHINCR 16	 /* Minimum heap increment, in blocks of HBLKSIZE  */
 167			 /* Must be multiple of largest page size.	   */
 168#   define MAXHINCR 2048 /* Maximum heap increment, in blocks              */
 169# else
 170#   define MINHINCR 64
 171#   define MAXHINCR 4096
 172# endif
 173
 174# define TIME_LIMIT 50	   /* We try to keep pause times from exceeding	 */
 175			   /* this by much. In milliseconds.		 */
 176
 177# define BL_LIMIT GC_black_list_spacing
 178			   /* If we need a block of N bytes, and we have */
 179			   /* a block of N + BL_LIMIT bytes available, 	 */
 180			   /* and N > BL_LIMIT,				 */
 181			   /* but all possible positions in it are 	 */
 182			   /* blacklisted, we just use it anyway (and	 */
 183			   /* print a warning, if warnings are enabled). */
 184			   /* This risks subsequently leaking the block	 */
 185			   /* due to a false reference.  But not using	 */
 186			   /* the block risks unreasonable immediate	 */
 187			   /* heap growth.				 */
 188
 189/*********************************/
 190/*                               */
 191/* Stack saving for debugging	 */
 192/*                               */
 193/*********************************/
 194
 195#ifdef NEED_CALLINFO
 196    struct callinfo {
 197	word ci_pc;  	/* Caller, not callee, pc	*/
 198#	if NARGS > 0
 199	    word ci_arg[NARGS];	/* bit-wise complement to avoid retention */
 200#	endif
 201#	if (NFRAMES * (NARGS + 1)) % 2 == 1
 202	    /* Likely alignment problem. */
 203	    word ci_dummy;
 204#	endif
 205    };
 206#endif
 207
 208#ifdef SAVE_CALL_CHAIN
 209
 210/* Fill in the pc and argument information for up to NFRAMES of my	*/
 211/* callers.  Ignore my frame and my callers frame.			*/
 212void GC_save_callers(struct callinfo info[NFRAMES]);
 213  
 214void GC_print_callers(struct callinfo info[NFRAMES]);
 215
 216#endif
 217
 218
 219/*********************************/
 220/*                               */
 221/* OS interface routines	 */
 222/*                               */
 223/*********************************/
 224
 225#ifdef BSD_TIME
 226#   undef CLOCK_TYPE
 227#   undef GET_TIME
 228#   undef MS_TIME_DIFF
 229#   define CLOCK_TYPE struct timeval
 230#   define GET_TIME(x) { struct rusage rusage; \
 231			 getrusage (RUSAGE_SELF,  &rusage); \
 232			 x = rusage.ru_utime; }
 233#   define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
 234                               + (double) (a.tv_usec - b.tv_usec) / 1000.0)
 235#else /* !BSD_TIME */
 236# if defined(MSWIN32) || defined(MSWINCE)
 237#   include <windows.h>
 238#   include <winbase.h>
 239#   define CLOCK_TYPE DWORD
 240#   define GET_TIME(x) x = GetTickCount()
 241#   define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
 242# else /* !MSWIN32, !MSWINCE, !BSD_TIME */
 243#   include <time.h>
 244#   if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
 245      clock_t clock();	/* Not in time.h, where it belongs	*/
 246#   endif
 247#   if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
 248#     include <machine/limits.h>
 249#     define CLOCKS_PER_SEC CLK_TCK
 250#   endif
 251#   if !defined(CLOCKS_PER_SEC)
 252#     define CLOCKS_PER_SEC 1000000
 253/*
 254 * This is technically a bug in the implementation.  ANSI requires that
 255 * CLOCKS_PER_SEC be defined.  But at least under SunOS4.1.1, it isn't.
 256 * Also note that the combination of ANSI C and POSIX is incredibly gross
 257 * here. The type clock_t is used by both clock() and times().  But on
 258 * some machines these use different notions of a clock tick,  CLOCKS_PER_SEC
 259 * seems to apply only to clock.  Hence we use it here.  On many machines,
 260 * including SunOS, clock actually uses units of microseconds (which are
 261 * not really clock ticks).
 262 */
 263#   endif
 264#   define CLOCK_TYPE clock_t
 265#   define GET_TIME(x) x = clock()
 266#   define MS_TIME_DIFF(a,b) ((unsigned long) \
 267		(1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
 268# endif /* !MSWIN32 */
 269#endif /* !BSD_TIME */
 270
 271/* We use bzero and bcopy internally.  They may not be available.	*/
 272# if defined(SPARC) && defined(SUNOS4)
 273#   define BCOPY_EXISTS
 274# endif
 275# if defined(M68K) && defined(AMIGA)
 276#   define BCOPY_EXISTS
 277# endif
 278# if defined(M68K) && defined(NEXT)
 279#   define BCOPY_EXISTS
 280# endif
 281# if defined(VAX)
 282#   define BCOPY_EXISTS
 283# endif
 284# if defined(AMIGA)
 285#   include <string.h>
 286#   define BCOPY_EXISTS
 287# endif
 288# if defined(DARWIN)
 289#   include <string.h>
 290#   define BCOPY_EXISTS
 291# endif
 292
 293# ifndef BCOPY_EXISTS
 294#   include <string.h>
 295#   define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
 296#   define BZERO(x,n)  memset(x, 0, (size_t)(n))
 297# else
 298#   define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
 299#   define BZERO(x,n) bzero((void *)(x),(size_t)(n))
 300# endif
 301
 302/*
 303 * Stop and restart mutator threads.
 304 */
 305# ifdef PCR
 306#     include "th/PCR_ThCtl.h"
 307#     define STOP_WORLD() \
 308 	PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
 309 				   PCR_allSigsBlocked, \
 310 				   PCR_waitForever)
 311#     define START_WORLD() \
 312	PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
 313 				   PCR_allSigsBlocked, \
 314 				   PCR_waitForever);
 315# else
 316#   if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \
 317	|| defined(GC_PTHREADS)
 318      void GC_stop_world();
 319      void GC_start_world();
 320#     define STOP_WORLD() GC_stop_world()
 321#     define START_WORLD() GC_start_world()
 322#   else
 323#     define STOP_WORLD()
 324#     define START_WORLD()
 325#   endif
 326# endif
 327
 328/* Abandon ship */
 329# ifdef PCR
 330#   define ABORT(s) PCR_Base_Panic(s)
 331# else
 332#   ifdef SMALL_CONFIG
 333#	define ABORT(msg) abort()
 334#   else
 335	GC_API void GC_abort(const char * msg);
 336#       define ABORT(msg) GC_abort(msg)
 337#   endif
 338# endif
 339
 340/* Exit abnormally, but without making a mess (e.g. out of memory) */
 341# ifdef PCR
 342#   define EXIT() PCR_Base_Exit(1,PCR_waitForever)
 343# else
 344#   define EXIT() (void)exit(1)
 345# endif
 346
 347/* Print warning message, e.g. almost out of memory.	*/
 348# define WARN(msg,arg) (*GC_current_warn_proc)("GC Warning: " msg, (GC_word)(arg))
 349extern GC_warn_proc GC_current_warn_proc;
 350
 351/* Get environment entry */
 352#if !defined(NO_GETENV)
 353#   if defined(EMPTY_GETENV_RESULTS)
 354	/* Workaround for a reputed Wine bug.	*/
 355	static inline char * fixed_getenv(const char *name)
 356	{
 357	  char * tmp = getenv(name);
 358	  if (tmp == 0 || strlen(tmp) == 0)
 359	    return 0;
 360	  return tmp;
 361	}
 362#       define GETENV(name) fixed_getenv(name)
 363#   else
 364#       define GETENV(name) getenv(name)
 365#   endif
 366#else
 367#   define GETENV(name) 0
 368#endif
 369
 370#if defined(DARWIN)
 371#	if defined(POWERPC)
 372#		if CPP_WORDSZ == 32
 373#                 define GC_THREAD_STATE_T ppc_thread_state_t
 374#		  define GC_MACH_THREAD_STATE PPC_THREAD_STATE
 375#		  define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
 376#		  define GC_MACH_HEADER mach_header
 377#		  define GC_MACH_SECTION section
 378#                 define GC_GETSECTBYNAME getsectbynamefromheader
 379#	        else
 380#                 define GC_THREAD_STATE_T ppc_thread_state64_t
 381#		  define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
 382#		  define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
 383#		  define GC_MACH_HEADER mach_header_64
 384#		  define GC_MACH_SECTION section_64
 385#                 define GC_GETSECTBYNAME getsectbynamefromheader_64
 386#		endif
 387#	elif defined(I386) || defined(X86_64)
 388#               if CPP_WORDSZ == 32
 389#		  define GC_THREAD_STATE_T x86_thread_state32_t
 390#		  define GC_MACH_THREAD_STATE x86_THREAD_STATE32
 391#		  define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
 392#		  define GC_MACH_HEADER mach_header
 393#		  define GC_MACH_SECTION section
 394#                 define GC_GETSECTBYNAME getsectbynamefromheader
 395#               else
 396#		  define GC_THREAD_STATE_T x86_thread_state64_t
 397#		  define GC_MACH_THREAD_STATE x86_THREAD_STATE64
 398#		  define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
 399#		  define GC_MACH_HEADER mach_header_64
 400#		  define GC_MACH_SECTION section_64
 401#                 define GC_GETSECTBYNAME getsectbynamefromheader_64
 402#               endif
 403#	else
 404#		error define GC_THREAD_STATE_T
 405#		define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
 406#		define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
 407#	endif
 408/* Try to work out the right way to access thread state structure members.
 409   The structure has changed its definition in different Darwin versions.
 410   This now defaults to the (older) names without __, thus hopefully,
 411   not breaking any existing Makefile.direct builds.  */
 412#       if defined (HAS_PPC_THREAD_STATE___R0) \
 413	  || defined (HAS_PPC_THREAD_STATE64___R0) \
 414	  || defined (HAS_X86_THREAD_STATE32___EAX) \
 415	  || defined (HAS_X86_THREAD_STATE64___RAX)
 416#         define THREAD_FLD(x) __ ## x
 417#       else
 418#         define THREAD_FLD(x) x
 419#       endif
 420#endif
 421
 422/*********************************/
 423/*                               */
 424/* Word-size-dependent defines   */
 425/*                               */
 426/*********************************/
 427
 428#if CPP_WORDSZ == 32
 429#  define WORDS_TO_BYTES(x)   ((x)<<2)
 430#  define BYTES_TO_WORDS(x)   ((x)>>2)
 431#  define LOGWL               ((word)5)    /* log[2] of CPP_WORDSZ */
 432#  define modWORDSZ(n) ((n) & 0x1f)        /* n mod size of word	    */
 433#  if ALIGNMENT != 4
 434#	define UNALIGNED
 435#  endif
 436#endif
 437
 438#if CPP_WORDSZ == 64
 439#  define WORDS_TO_BYTES(x)   ((x)<<3)
 440#  define BYTES_TO_WORDS(x)   ((x)>>3)
 441#  define LOGWL               ((word)6)    /* log[2] of CPP_WORDSZ */
 442#  define modWORDSZ(n) ((n) & 0x3f)        /* n mod size of word	    */
 443#  if ALIGNMENT != 8
 444#	define UNALIGNED
 445#  endif
 446#endif
 447
 448/* The first TINY_FREELISTS free lists correspond to the first	*/
 449/* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep 	*/
 450/* separate free lists for each multiple of GRANULE_BYTES	*/
 451/* up to (TINY_FREELISTS-1) * GRANULE_BYTES.  After that they	*/
 452/* may be spread out further. 					*/
 453#include "../gc_tiny_fl.h"
 454#define GRANULE_BYTES GC_GRANULE_BYTES
 455#define TINY_FREELISTS GC_TINY_FREELISTS
 456
 457#define WORDSZ ((word)CPP_WORDSZ)
 458#define SIGNB  ((word)1 << (WORDSZ-1))
 459#define BYTES_PER_WORD      ((word)(sizeof (word)))
 460#define ONES                ((word)(signed_word)(-1))
 461#define divWORDSZ(n) ((n) >> LOGWL)	   /* divide n by size of word      */
 462
 463#if GRANULE_BYTES == 8
 464# define BYTES_TO_GRANULES(n) ((n)>>3)
 465# define GRANULES_TO_BYTES(n) ((n)<<3)
 466# if CPP_WORDSZ == 64
 467#   define GRANULES_TO_WORDS(n) (n)
 468# elif CPP_WORDSZ == 32
 469#   define GRANULES_TO_WORDS(n) ((n)<<1)
 470# else
 471#   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
 472# endif
 473#elif GRANULE_BYTES == 16
 474# define BYTES_TO_GRANULES(n) ((n)>>4)
 475# define GRANULES_TO_BYTES(n) ((n)<<4)
 476# if CPP_WORDSZ == 64
 477#   define GRANULES_TO_WORDS(n) ((n)<<1)
 478# elif CPP_WORDSZ == 32
 479#   define GRANULES_TO_WORDS(n) ((n)<<2)
 480# else
 481#   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
 482# endif
 483#else
 484# error Bad GRANULE_BYTES value
 485#endif
 486
 487/*********************/
 488/*                   */
 489/*  Size Parameters  */
 490/*                   */
 491/*********************/
 492
 493/*  Heap block size, bytes. Should be power of 2.		*/
 494/* Incremental GC with MPROTECT_VDB currently requires the	*/
 495/* page size to be a multiple of HBLKSIZE.  Since most modern	*/
 496/* architectures support variable page sizes down to 4K, and	*/
 497/* X86 is generally 4K, we now default to 4K, except for	*/
 498/*   Alpha: Seems to be used with 8K pages.			*/
 499/*   SMALL_CONFIG: Want less block-level fragmentation.		*/
 500
 501#ifndef HBLKSIZE
 502# ifdef SMALL_CONFIG
 503#   define CPP_LOG_HBLKSIZE 10
 504# else
 505#   if defined(ALPHA)
 506#     define CPP_LOG_HBLKSIZE 13
 507#   else
 508#     define CPP_LOG_HBLKSIZE 12
 509#   endif
 510# endif
 511#else
 512# if HBLKSIZE == 512
 513#   define CPP_LOG_HBLKSIZE 9
 514# endif
 515# if HBLKSIZE == 1024
 516#   define CPP_LOG_HBLKSIZE 10
 517# endif
 518# if HBLKSIZE == 2048
 519#   define CPP_LOG_HBLKSIZE 11
 520# endif
 521# if HBLKSIZE == 4096
 522#   define CPP_LOG_HBLKSIZE 12
 523# endif
 524# if HBLKSIZE == 8192
 525#   define CPP_LOG_HBLKSIZE 13
 526# endif
 527# if HBLKSIZE == 16384
 528#   define CPP_LOG_HBLKSIZE 14
 529# endif
 530# ifndef CPP_LOG_HBLKSIZE
 531    --> fix HBLKSIZE
 532# endif
 533# undef HBLKSIZE
 534#endif
 535
 536# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
 537# define LOG_HBLKSIZE   ((size_t)CPP_LOG_HBLKSIZE)
 538# define HBLKSIZE ((size_t)CPP_HBLKSIZE)
 539
 540
 541/*  max size objects supported by freelist (larger objects are	*/
 542/*  allocated directly with allchblk(), by rounding to the next */
 543/*  multiple of HBLKSIZE.					*/
 544
 545#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
 546#define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
 547#define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
 548#define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
 549#define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
 550#define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
 551		
 552# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
 553
 554# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
 555	/* Equivalent to subtracting 2 hblk pointers.	*/
 556	/* We do it this way because a compiler should	*/
 557	/* find it hard to use an integer division	*/
 558	/* instead of a shift.  The bundled SunOS 4.1	*/
 559	/* o.w. sometimes pessimizes the subtraction to	*/
 560	/* involve a call to .div.			*/
 561 
 562# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
 563 
 564# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
 565
 566# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
 567
 568/* Round up byte allocation requests to integral number of words, etc. */
 569# define ROUNDED_UP_WORDS(n) \
 570	BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
 571# define ROUNDED_UP_GRANULES(n) \
 572	BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
 573# if MAX_EXTRA_BYTES == 0
 574#  define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
 575# else
 576#  define SMALL_OBJ(bytes) \
 577	    (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
 578	     (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
 579    	/* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES.	*/
 580    	/* But we try to avoid looking up EXTRA_BYTES.			*/
 581# endif
 582# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
 583# ifndef MIN_WORDS
 584#  define MIN_WORDS 2	/* FIXME: obsolete */
 585# endif
 586
 587
 588/*
 589 * Hash table representation of sets of pages.
 590 * Implements a map from aligned HBLKSIZE chunks of the address space to one
 591 * bit each.
 592 * This assumes it is OK to spuriously set bits, e.g. because multiple
 593 * addresses are represented by a single location.
 594 * Used by black-listing code, and perhaps by dirty bit maintenance code.
 595 */
 596 
 597# ifdef LARGE_CONFIG
 598#   if CPP_WORDSZ == 32
 599#    define LOG_PHT_ENTRIES  20 /* Collisions likely at 1M blocks,	*/
 600				/* which is >= 4GB.  Each table takes	*/
 601				/* 128KB, some of which may never be	*/
 602				/* touched.				*/
 603#   else
 604#    define LOG_PHT_ENTRIES  21 /* Collisions likely at 2M blocks,	*/
 605				/* which is >= 8GB.  Each table takes	*/
 606				/* 256KB, some of which may never be	*/
 607				/* touched.				*/
 608#   endif
 609# else
 610#   ifdef SMALL_CONFIG
 611#     define LOG_PHT_ENTRIES  15 /* Collisions are likely if heap grows	*/
 612				 /* to more than 32K hblks = 128MB.	*/
 613				 /* Each hash table occupies 4K bytes.  */
 614#   else /* default "medium" configuration */
 615#     define LOG_PHT_ENTRIES  18 /* Collisions are likely if heap grows	*/
 616				 /* to more than 256K hblks >= 1GB.	*/
 617				 /* Each hash table occupies 32K bytes. */
 618				 /* Even for somewhat smaller heaps, 	*/
 619				 /* say half that, collisions may be an	*/
 620				 /* issue because we blacklist 		*/
 621				 /* addresses outside the heap.		*/
 622#   endif
 623# endif
 624# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
 625# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
 626typedef word page_hash_table[PHT_SIZE];
 627
 628# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
 629
 630# define get_pht_entry_from_index(bl, index) \
 631		(((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
 632# define set_pht_entry_from_index(bl, index) \
 633		(bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
 634# define clear_pht_entry_from_index(bl, index) \
 635		(bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
 636/* And a dumb but thread-safe version of set_pht_entry_from_index.	*/
 637/* This sets (many) extra bits.						*/
 638# define set_pht_entry_from_index_safe(bl, index) \
 639		(bl)[divWORDSZ(index)] = ONES
 640	
 641
 642
 643/********************************************/
 644/*                                          */
 645/*    H e a p   B l o c k s                 */
 646/*                                          */
 647/********************************************/
 648
 649/*  heap block header */
 650#define HBLKMASK   (HBLKSIZE-1)
 651
 652#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
 653	   /* upper bound                                    */
 654	   /* We allocate 1 bit per allocation granule.	     */
 655	   /* If MARK_BIT_PER_GRANULE is defined, we use     */
 656	   /* every nth bit, where n is the number of 	     */
 657	   /* allocation granules per object.  If	     */
 658	   /* MARK_BIT_PER_OBJ is defined, we only use the   */
 659   	   /* initial group of mark bits, and it is safe     */
 660	   /* to allocate smaller header for large objects.  */
 661
 662# ifdef USE_MARK_BYTES
 663#   define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
 664	/* Unlike the other case, this is in units of bytes.		*/
 665	/* Since we force doubleword alignment, we need at most one	*/
 666	/* mark bit per 2 words.  But we do allocate and set one	*/
 667	/* extra mark bit to avoid an explicit check for the 		*/
 668	/* partial object at the end of each block.			*/
 669# else
 670#   define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
 671# endif
 672
 673#ifdef PARALLEL_MARK
 674# include <atomic_ops.h>
 675  typedef AO_t counter_t;
 676#else
 677  typedef size_t counter_t;
 678#endif
 679
 680/* We maintain layout maps for heap blocks containing objects of a given */
 681/* size.  Each entry in this map describes a byte offset and has the	 */
 682/* following type.							 */
 683struct hblkhdr {
 684    struct hblk * hb_next; 	/* Link field for hblk free list	 */
 685    				/* and for lists of chunks waiting to be */
 686    				/* reclaimed.				 */
 687    struct hblk * hb_prev;	/* Backwards link for free list.	*/
 688    struct hblk * hb_block;	/* The corresponding block.		*/
 689    unsigned char hb_obj_kind;
 690    			 /* Kind of objects in the block.  Each kind 	*/
 691    			 /* identifies a mark procedure and a set of 	*/
 692    			 /* list headers.  Sometimes called regions.	*/
 693    unsigned char hb_flags;
 694#	define IGNORE_OFF_PAGE	1	/* Ignore pointers that do not	*/
 695					/* point to the first page of 	*/
 696					/* this object.			*/
 697#	define WAS_UNMAPPED 2	/* This is a free block, which has	*/
 698				/* been unmapped from the address 	*/
 699				/* space.				*/
 700				/* GC_remap must be invoked on it	*/
 701				/* before it can be reallocated.	*/
 702				/* Only set with USE_MUNMAP.		*/
 703#	define FREE_BLK 4	/* Block is free, i.e. not in use.	*/
 704    unsigned short hb_last_reclaimed;
 705    				/* Value of GC_gc_no when block was	*/
 706    				/* last allocated or swept. May wrap.   */
 707				/* For a free block, this is maintained */
 708				/* only for USE_MUNMAP, and indicates	*/
 709				/* when the header was allocated, or	*/
 710				/* when the size of the block last	*/
 711				/* changed.				*/
 712    size_t hb_sz;  /* If in use, size in bytes, of objects in the block. */
 713		   /* if free, the size in bytes of the whole block      */
 714		   /* We assume that this is convertible to signed_word	 */
 715		   /* without generating a negative result.  We avoid	 */
 716		   /* generating free blocks larger than that.		 */
 717    word hb_descr;   		/* object descriptor for marking.  See	*/
 718    				/* mark.h.				*/
 719#   ifdef MARK_BIT_PER_OBJ
 720      unsigned32 hb_inv_sz;	/* A good upper bound for 2**32/hb_sz.	*/
 721    				/* For large objects, we use		*/
 722    				/* LARGE_INV_SZ.			*/
 723#     define LARGE_INV_SZ (1 << 16)
 724#   else
 725      unsigned char hb_large_block;
 726      short * hb_map;		/* Essentially a table of remainders	*/
 727      				/* mod BYTES_TO_GRANULES(hb_sz), except	*/
 728      				/* for large blocks.  See GC_obj_map.	*/
 729#   endif
 730    counter_t hb_n_marks;	/* Number of set mark bits, excluding 	*/
 731    				/* the one always set at the end.	*/
 732    				/* Currently it is concurrently 	*/
 733    				/* updated and hence only approximate.  */
 734    				/* But a zero value does guarantee that	*/
 735    				/* the block contains no marked		*/
 736    				/* objects.				*/
 737    				/* Ensuring this property means that we	*/
 738    				/* never decrement it to zero during a	*/
 739    				/* collection, and hence the count may 	*/
 740    				/* be one too high.  Due to concurrent	*/
 741    				/* updates, an arbitrary number of	*/
 742    				/* increments, but not all of them (!)	*/
 743    				/* may be lost, hence it may in theory	*/
 744    				/* be much too low.			*/
 745    				/* The count may also be too high if	*/
 746    				/* multiple mark threads mark the	*/
 747    				/* same object due to a race.		*/
 748    				/* Without parallel marking, the count	*/
 749    				/* is accurate.				*/
 750#   ifdef USE_MARK_BYTES
 751      union {
 752        char _hb_marks[MARK_BITS_SZ];
 753			    /* The i'th byte is 1 if the object 	*/
 754			    /* starting at granule i or object i is	*/
 755			    /* marked, 0 o.w.				*/
 756			    /* The mark bit for the "one past the	*/
 757			    /* end" object is always set to avoid a 	*/
 758			    /* special case test in the marker.		*/
 759	word dummy;	/* Force word alignment of mark bytes. */
 760      } _mark_byte_union;
 761#     define hb_marks _mark_byte_union._hb_marks
 762#   else
 763      word hb_marks[MARK_BITS_SZ];
 764#   endif /* !USE_MARK_BYTES */
 765};
 766
 767# define ANY_INDEX 23	/* "Random" mark bit index for assertions */
 768
 769/*  heap block body */
 770
 771# define HBLK_WORDS (HBLKSIZE/sizeof(word))
 772# define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
 773
 774/* The number of objects in a block dedicated to a certain size.	*/
 775/* may erroneously yield zero (instead of one) for large objects.	*/
 776# define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
 777
 778struct hblk {
 779    char hb_body[HBLKSIZE];
 780};
 781
 782# define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
 783
 784# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1)
 785    /* Size of block (in units of HBLKSIZE) needed to hold objects of	*/
 786    /* given sz (in bytes).						*/
 787
 788/* Object free list link */
 789# define obj_link(p) (*(void  **)(p))
 790
 791# define LOG_MAX_MARK_PROCS 6
 792# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
 793
 794/* Root sets.  Logically private to mark_rts.c.  But we don't want the	*/
 795/* tables scanned, so we put them here.					*/
 796/* MAX_ROOT_SETS is the maximum number of ranges that can be 	*/
 797/* registered as static roots. 					*/
 798# ifdef LARGE_CONFIG
 799#   define MAX_ROOT_SETS 8192
 800# else
 801#   ifdef SMALL_CONFIG
 802#     define MAX_ROOT_SETS 512
 803#   else
 804#     define MAX_ROOT_SETS 2048
 805#   endif
 806# endif
 807
 808# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
 809/* Maximum number of segments that can be excluded from root sets.	*/
 810
 811/*
 812 * Data structure for excluded static roots.
 813 */
 814struct exclusion {
 815    ptr_t e_start;
 816    ptr_t e_end;
 817};
 818
 819/* Data structure for list of root sets.				*/
 820/* We keep a hash table, so that we can filter out duplicate additions.	*/
 821/* Under Win32, we need to do a better job of filtering overlaps, so	*/
 822/* we resort to sequential search, and pay the price.			*/
 823struct roots {
 824	ptr_t r_start;
 825	ptr_t r_end;
 826#	if !defined(MSWIN32) && !defined(MSWINCE)
 827	  struct roots * r_next;
 828#	endif
 829	GC_bool r_tmp;
 830	  	/* Delete before registering new dynamic libraries */
 831};
 832
 833#if !defined(MSWIN32) && !defined(MSWINCE)
 834    /* Size of hash table index to roots.	*/
 835#   define LOG_RT_SIZE 6
 836#   define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
 837#endif
 838
 839/* Lists of all heap blocks and free lists	*/
 840/* as well as other random data structures	*/
 841/* that should not be scanned by the		*/
 842/* collector.					*/
 843/* These are grouped together in a struct	*/
 844/* so that they can be easily skipped by the	*/
 845/* GC_mark routine.				*/
 846/* The ordering is weird to make GC_malloc	*/
 847/* faster by keeping the important fields	*/
 848/* sufficiently close together that a		*/
 849/* single load of a base register will do.	*/
 850/* Scalars that could easily appear to		*/
 851/* be pointers are also put here.		*/
 852/* The main fields should precede any 		*/
 853/* conditionally included fields, so that	*/
 854/* gc_inl.h will work even if a different set	*/
 855/* of macros is defined when the client is	*/
 856/* compiled.					*/
 857
 858struct _GC_arrays {
 859  word _heapsize;		/* Heap size in bytes.			*/
 860  word _max_heapsize;
 861  word _requested_heapsize;	/* Heap size due to explicit expansion */
 862  ptr_t _last_heap_addr;
 863  ptr_t _prev_heap_addr;
 864  word _large_free_bytes;
 865	/* Total bytes contained in blocks on large object free */
 866	/* list.						*/
 867  word _large_allocd_bytes;
 868  	/* Total number of bytes in allocated large objects blocks.	*/
 869  	/* For the purposes of this counter and the next one only, a 	*/
 870  	/* large object is one that occupies a block of at least	*/
 871  	/* 2*HBLKSIZE.							*/
 872  word _max_large_allocd_bytes;
 873  	/* Maximum number of bytes that were ever allocated in		*/
 874  	/* large object blocks.  This is used to help decide when it	*/
 875  	/* is safe to split up a large block.				*/
 876  word _bytes_allocd_before_gc;
 877		/* Number of words allocated before this	*/
 878		/* collection cycle.				*/
 879# ifndef SEPARATE_GLOBALS
 880    word _bytes_allocd;
 881  	/* Number of words allocated during this collection cycle */
 882# endif
 883  word _bytes_dropped;
 884  	/* Number of black-listed bytes dropped during GC cycle	*/
 885	/* as a result of repeated scanning during allocation	*/
 886	/* attempts.  These are treated largely as allocated,	*/
 887	/* even though they are not useful to the client.	*/
 888  word _bytes_finalized;
 889  	/* Approximate number of bytes in objects (and headers)	*/
 890  	/* That became ready for finalization in the last 	*/
 891  	/* collection.						*/
 892  word _non_gc_bytes_at_gc;
 893  	/* Number of explicitly managed bytes of storage 	*/
 894  	/* at last collection.					*/
 895  word _bytes_freed;
 896  	/* Number of explicitly deallocated bytes of memory	*/
 897  	/* since last collection.				*/
 898  word _finalizer_bytes_freed;
 899  	/* Bytes of memory explicitly deallocated while 	*/
 900  	/* finalizers were running.  Used to approximate mem.	*/
 901  	/* explicitly deallocated by finalizers.		*/
 902  ptr_t _scratch_end_ptr;
 903  ptr_t _scratch_last_end_ptr;
 904	/* Used by headers.c, and can easily appear to point to	*/
 905	/* heap.						*/
 906  GC_mark_proc _mark_procs[MAX_MARK_PROCS];
 907  	/* Table of user-defined mark procedures.  There is	*/
 908	/* a small number of these, which can be referenced	*/
 909	/* by DS_PROC mark descriptors.  See gc_mark.h.		*/
 910
 911# ifndef SEPARATE_GLOBALS
 912    void *_objfreelist[MAXOBJGRANULES+1];
 913			  /* free list for objects */
 914    void *_aobjfreelist[MAXOBJGRANULES+1];
 915			  /* free list for atomic objs 	*/
 916# endif
 917
 918  void *_uobjfreelist[MAXOBJGRANULES+1];
 919			  /* uncollectable but traced objs 	*/
 920			  /* objects on this and auobjfreelist  */
 921			  /* are always marked, except during   */
 922			  /* garbage collections.		*/
 923# ifdef ATOMIC_UNCOLLECTABLE
 924    void *_auobjfreelist[MAXOBJGRANULES+1];
 925# endif
 926			  /* uncollectable but traced objs 	*/
 927
 928    word _composite_in_use;
 929   		/* Number of words in accessible composite	*/
 930		/* objects.					*/
 931    word _atomic_in_use;
 932   		/* Number of words in accessible atomic		*/
 933		/* objects.					*/
 934# ifdef USE_MUNMAP
 935    word _unmapped_bytes;
 936# endif
 937
 938    size_t _size_map[MAXOBJBYTES+1];
 939    	/* Number of words to allocate for a given allocation request in */
 940    	/* bytes.							 */
 941
 942# ifdef STUBBORN_ALLOC
 943    ptr_t _sobjfreelist[MAXOBJGRANULES+1];
 944# endif
 945  			  /* free list for immutable objects	*/
 946# ifdef MARK_BIT_PER_GRANULE
 947    short * _obj_map[MAXOBJGRANULES+1];
 948                       /* If not NIL, then a pointer to a map of valid  */
 949    		       /* object addresses.				*/
 950		       /* _obj_map[sz_in_granules][i] is 		*/
 951  		       /* i % sz_in_granules.				*/
 952  		       /* This is now used purely to replace a 		*/
 953  		       /* division in the marker by a table lookup.	*/
 954    		       /* _obj_map[0] is used for large objects and	*/
 955    		       /* contains all nonzero entries.  This gets us	*/
 956    		       /* out of the marker fast path without an extra 	*/
 957    		       /* test.						*/
 958#   define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
 959# endif
 960#   define VALID_OFFSET_SZ HBLKSIZE
 961  char _valid_offsets[VALID_OFFSET_SZ];
 962				/* GC_valid_offsets[i] == TRUE ==> i 	*/
 963				/* is registered as a displacement.	*/
 964  char _modws_valid_offsets[sizeof(word)];
 965				/* GC_valid_offsets[i] ==>		  */
 966				/* GC_modws_valid_offsets[i%sizeof(word)] */
 967# ifdef STUBBORN_ALLOC
 968    page_hash_table _changed_pages;
 969        /* Stubborn object pages that were changes since last call to	*/
 970	/* GC_read_changed.						*/
 971    page_hash_table _prev_changed_pages;
 972        /* Stubborn object pages that were changes before last call to	*/
 973	/* GC_read_changed.						*/
 974# endif
 975# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
 976     defined(GWW_VDB) || defined(MANUAL_VDB)
 977    page_hash_table _grungy_pages; /* Pages that were dirty at last 	   */
 978				     /* GC_read_dirty.			   */
 979# endif
 980# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
 981    volatile page_hash_table _dirty_pages;	
 982			/* Pages dirtied since last GC_read_dirty. */
 983# endif
 984# if defined(PROC_VDB) || defined(GWW_VDB)
 985    page_hash_table _written_pages;	/* Pages ever dirtied	*/
 986# endif
 987# ifdef LARGE_CONFIG
 988#   if CPP_WORDSZ > 32
 989#     define MAX_HEAP_SECTS 4096 	/* overflows at roughly 64 GB	   */
 990#   else
 991#     define MAX_HEAP_SECTS 768		/* Separately added heap sections. */
 992#   endif
 993# else
 994#   ifdef SMALL_CONFIG
 995#     define MAX_HEAP_SECTS 128		/* Roughly 256MB (128*2048*1K)	*/
 996#   else
 997#     define MAX_HEAP_SECTS 384		/* Roughly 3GB			*/
 998#   endif
 999# endif
1000  struct HeapSect {
1001      ptr_t hs_start; size_t hs_bytes;
1002  } _heap_sects[MAX_HEAP_SECTS];	/* Heap segments potentially 	*/
1003  					/* client objects.		*/
1004# if defined(USE_PROC_FOR_LIBRARIES)
1005     struct HeapSect _our_memory[MAX_HEAP_SECTS];
1006     					/* All GET_MEM allocated	*/
1007					/* memory.  Includes block 	*/
1008					/* headers and the like.	*/
1009# endif
1010# if defined(MSWIN32) || defined(MSWINCE)
1011    ptr_t _heap_bases[MAX_HEAP_SECTS];
1012    		/* Start address of memory regions obtained from kernel. */
1013# endif
1014# ifdef MSWINCE
1015    word _heap_lengths[MAX_HEAP_SECTS];
1016    		/* Commited lengths of memory regions obtained from kernel. */
1017# endif
1018  struct roots _static_roots[MAX_ROOT_SETS];
1019# if !defined(MSWIN32) && !defined(MSWINCE)
1020    struct roots * _root_index[RT_SIZE];
1021# endif
1022  struct exclusion _excl_table[MAX_EXCLUSIONS];
1023  /* Block header index; see gc_headers.h */
1024  bottom_index * _all_nils;
1025  bottom_index * _top_index [TOP_SZ];
1026#ifdef ENABLE_TRACE
1027  ptr_t _trace_addr;
1028#endif
1029#ifdef SAVE_CALL_CHAIN
1030  struct callinfo _last_stack[NFRAMES];	/* Stack at last garbage collection.*/
1031  					/* Useful for debugging	mysterious  */
1032  					/* object disappearances.	    */
1033  					/* In the multithreaded case, we    */
1034  					/* currently only save the calling  */
1035  					/* stack.			    */
1036#endif
1037};
1038
1039GC_API GC_FAR struct _GC_arrays GC_arrays; 
1040
1041# ifndef SEPARATE_GLOBALS
1042#   define GC_objfreelist GC_arrays._objfreelist
1043#   define GC_aobjfreelist GC_arrays._aobjfreelist
1044#   define GC_bytes_allocd GC_arrays._bytes_allocd
1045# endif
1046# define GC_uobjfreelist GC_arrays._uobjfreelist
1047# ifdef ATOMIC_UNCOLLECTABLE
1048#   define GC_auobjfreelist GC_arrays._auobjfreelist
1049# endif
1050# define GC_sobjfreelist GC_arrays._sobjfreelist
1051# define GC_valid_offsets GC_arrays._valid_offsets
1052# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
1053# ifdef STUBBORN_ALLOC
1054#    define GC_changed_pages GC_arrays._changed_pages
1055#    define GC_prev_changed_pages GC_arrays._prev_changed_pages
1056# endif
1057# ifdef MARK_BIT_PER_GRANULE
1058#   define GC_obj_map GC_arrays._obj_map
1059# endif
1060# define GC_last_heap_addr GC_arrays._last_heap_addr
1061# define GC_prev_heap_addr GC_arrays._prev_heap_addr
1062# define GC_large_free_bytes GC_arrays._large_free_bytes
1063# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
1064# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
1065# define GC_bytes_dropped GC_arrays._bytes_dropped
1066# define GC_bytes_finalized GC_arrays._bytes_finalized
1067# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
1068# define GC_bytes_freed GC_arrays._bytes_freed
1069# define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
1070# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
1071# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
1072# define GC_mark_procs GC_arrays._mark_procs
1073# define GC_heapsize GC_arrays._heapsize
1074# define GC_max_heapsize GC_arrays._max_heapsize
1075# define GC_requested_heapsize GC_arrays._requested_heapsize
1076# define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
1077# define GC_heap_sects GC_arrays._heap_sects
1078# ifdef USE_PROC_FOR_LIBRARIES
1079#   define GC_our_memory GC_arrays._our_memory
1080# endif
1081# define GC_last_stack GC_arrays._last_stack
1082#ifdef ENABLE_TRACE
1083#define GC_trace_addr GC_arrays._trace_addr
1084#endif
1085# ifdef USE_MUNMAP
1086#   define GC_unmapped_bytes GC_arrays._unmapped_bytes
1087# endif
1088# if defined(MSWIN32) || defined(MSWINCE)
1089#   define GC_heap_bases GC_arrays._heap_bases
1090# endif
1091# ifdef MSWINCE
1092#   define GC_heap_lengths GC_arrays._heap_lengths
1093# endif
1094# define GC_static_roots GC_arrays._static_roots
1095# define GC_root_index GC_arrays._root_index
1096# define GC_excl_table GC_arrays._excl_table
1097# define GC_all_nils GC_arrays._all_nils
1098# define GC_top_index GC_arrays._top_index
1099# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
1100     defined(GWW_VDB) || defined(MANUAL_VDB)
1101#   define GC_grungy_pages GC_arrays._grungy_pages
1102# endif
1103# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
1104#   define GC_dirty_pages GC_arrays._dirty_pages
1105# endif
1106# if defined(PROC_VDB) || defined(GWW_VDB)
1107#   define GC_written_pages GC_arrays._written_pages
1108# endif
1109# define GC_composite_in_use GC_arrays._composite_in_use
1110# define GC_atomic_in_use GC_arrays._atomic_in_use
1111# define GC_size_map GC_arrays._size_map
1112
1113# define beginGC_arrays ((ptr_t)(&GC_arrays))
1114# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
1115
1116#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
1117
1118/* Object kinds: */
1119# define MAXOBJKINDS 16
1120
1121extern struct obj_kind {
1122   void **ok_freelist;	/* Array of free listheaders for this kind of object */
1123   			/* Point either to GC_arrays or to storage allocated */
1124   			/* with GC_scratch_alloc.			     */
1125   struct hblk **ok_reclaim_list;
1126   			/* List headers for lists of blocks waiting to be */
1127   			/* swept.					  */
1128   			/* Indexed by object size in granules.		  */
1129   word ok_descriptor;  /* Descriptor template for objects in this	*/
1130   			/* block.					*/
1131   GC_bool ok_relocate_descr;
1132   			/* Add object size in bytes to descriptor 	*/
1133   			/* template to obtain descriptor.  Otherwise	*/
1134   			/* template is used as is.			*/
1135   GC_bool ok_init;   /* Clear objects before putting them on the free list. */
1136} GC_obj_kinds[MAXOBJKINDS];
1137
1138# define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
1139# define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
1140
1141/* Variables that used to be in GC_arrays, but need to be accessed by 	*/
1142/* inline allocation code.  If they were in GC_arrays, the inlined 	*/
1143/* allocation code would include GC_arrays offsets (as it did), which	*/
1144/* introduce maintenance problems.					*/
1145
1146#ifdef SEPARATE_GLOBALS
1147  word GC_bytes_allocd;
1148  	/* Number of words allocated during this collection cycle */
1149  ptr_t GC_objfreelist[MAXOBJGRANULES+1];
1150			  /* free list for NORMAL objects */
1151# define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
1152# define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
1153
1154  ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
1155			  /* free list for atomic (PTRFREE) objs 	*/
1156# define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
1157# define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
1158#endif
1159
1160/* Predefined kinds: */
1161# define PTRFREE 0
1162# define NORMAL  1
1163# define UNCOLLECTABLE 2
1164# ifdef ATOMIC_UNCOLLECTABLE
1165#   define AUNCOLLECTABLE 3
1166#   define STUBBORN 4
1167#   define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
1168# else
1169#   define STUBBORN 3
1170#   define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
1171# endif
1172
1173extern unsigned GC_n_kinds;
1174
1175GC_API word GC_fo_entries;
1176
1177extern word GC_n_heap_sects;	/* Number of separately added heap	*/
1178				/* sections.				*/
1179
1180#ifdef USE_PROC_FOR_LIBRARIES
1181  extern word GC_n_memory;	/* Number of GET_MEM allocated memory	*/
1182				/* sections.				*/
1183#endif
1184
1185extern word GC_page_size;
1186
1187# if defined(MSWIN32) || defined(MSWINCE)
1188  struct _SYSTEM_INFO;
1189  extern struct _SYSTEM_INFO GC_sysinfo;
1190  extern word GC_n_heap_bases;	/* See GC_heap_bases.	*/
1191# endif
1192
1193extern word GC_total_stack_black_listed;
1194			/* Number of bytes on stack blacklist. 	*/
1195
1196extern word GC_black_list_spacing;
1197			/* Average number of bytes between blacklisted	*/
1198			/* blocks. Approximate.				*/
1199			/* Counts only blocks that are 			*/
1200			/* "stack-blacklisted", i.e. that are 		*/
1201			/* problematic in the interior of an object.	*/
1202
1203extern struct hblk * GC_hblkfreelist[];
1204				/* List of completely empty heap blocks	*/
1205				/* Linked through hb_next field of 	*/
1206				/* header structure associated with	*/
1207				/* block.				*/
1208
1209extern GC_bool GC_objects_are_marked;	/* There are marked objects in  */
1210					/* the heap.			*/
1211
1212#ifndef SMALL_CONFIG
1213  extern GC_bool GC_incremental;
1214			/* Using incremental/generational collection. */
1215# define TRUE_INCREMENTAL \
1216	(GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
1217	/* True incremental, not just generational, mode */
1218#else
1219# define GC_incremental FALSE
1220			/* Hopefully allow optimizer to remove some code. */
1221# define TRUE_INCREMENTAL FALSE
1222#endif
1223
1224extern GC_bool GC_dirty_maintained;
1225				/* Dirty bits are being maintained, 	*/
1226				/* either for incremental collection,	*/
1227				/* or to limit the root set.		*/
1228
1229extern word GC_root_size;	/* Total size of registered root sections */
1230
1231extern GC_bool GC_debugging_started;	/* GC_debug_malloc has been called. */ 
1232
1233extern long GC_large_alloc_warn_interval;
1234	/* Interval between unsuppressed warnings.	*/
1235
1236extern long GC_large_alloc_warn_suppressed;
1237	/* Number of warnings suppressed so far.	*/
1238
1239#ifdef THREADS
1240  extern GC_bool GC_world_stopped;
1241#endif
1242
1243/* Operations */
1244# ifndef abs
1245#   define abs(x)  ((x) < 0? (-(x)) : (x))
1246# endif
1247
1248
1249/*  Marks are in a reserved area in                          */
1250/*  each heap block.  Each word has one mark bit associated  */
1251/*  with it. Only those corresponding to the beginning of an */
1252/*  object are used.                                         */
1253
1254/* Set mark bit correctly, even if mark bits may be concurrently 	*/
1255/* accessed.								*/
1256#ifdef PARALLEL_MARK
1257# define OR_WORD(addr, bits) \
1258	{ AO_or((volatile AO_t *)(addr), (AO_t)bits); }
1259#else
1260# define OR_WORD(addr, bits) *(addr) |= (bits)
1261#endif
1262
1263/* Mark bit operations */
1264
1265/*
1266 * Retrieve, set, clear the nth mark bit in a given heap block.
1267 *
1268 * (Recall that bit n corresponds to nth object or allocation granule
1269 * relative to the beginning of the block, including unused words)
1270 */
1271
1272#ifdef USE_MARK_BYTES
1273# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
1274# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 1
1275# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 0
1276#else /* !USE_MARK_BYTES */
1277# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
1278			    >> (modWORDSZ(n))) & (word)1)
1279# define set_mark_bit_from_hdr(hhdr,n) \
1280			    OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \
1281				    (word)1 << modWORDSZ(n))
1282# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
1283				&= ~((word)1 << modWORDSZ(n))
1284#endif /* !USE_MARK_BYTES */
1285
1286#ifdef MARK_BIT_PER_OBJ
1287#  define MARK_BIT_NO(offset, sz) (((unsigned)(offset))/(sz))
1288	/* Get the mark bit index corresponding to the given byte	*/
1289	/* offset and size (in bytes). 				        */
1290#  define MARK_BIT_OFFSET(sz) 1
1291	/* Spacing between useful mark bits.				*/
1292#  define IF_PER_OBJ(x) x
1293#  define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
1294	/* Position of final, always set, mark bit.			*/
1295#else /* MARK_BIT_PER_GRANULE */
1296#  define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((unsigned)(offset))
1297#  define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
1298#  define IF_PER_OBJ(x)
1299#  define FINAL_MARK_BIT(sz) \
1300	((sz) > MAXOBJBYTES? MARK_BITS_PER_HBLK \
1301	 		: BYTES_TO_GRANULES(sz * HBLK_OBJS(sz)))
1302#endif
1303
1304/* Important internal collector routines */
1305
1306ptr_t GC_approx_sp(void);
1307  
1308GC_bool GC_should_collect(void);
1309  
1310void GC_apply_to_all_blocks(void (*fn) (struct hblk *h, word client_data),
1311    			    word client_data);
1312  			/* Invoke fn(hbp, client_data) for each 	*/
1313  			/* allocated heap block.			*/
1314struct hblk * GC_next_used_block(struct hblk * h);
1315  			/* Return first in-use block >= h	*/
1316struct hblk * GC_prev_block(struct hblk * h);
1317  			/* Return last block <= h.  Returned block	*/
1318  			/* is managed by GC, but may or may not be in	*/
1319			/* use.						*/
1320void GC_mark_init(void);
1321void GC_clear_marks(void);	/* Clear mark bits for all heap objects. */
1322void GC_invalidate_mark_state(void);
1323					/* Tell the marker that	marked 	   */
1324  					/* objects may point to	unmarked   */
1325  					/* ones, and roots may point to	   */
1326  					/* unmarked objects.		   */
1327  					/* Reset mark stack.		   */
1328GC_bool GC_mark_stack_empty(void);
1329GC_bool GC_mark_some(ptr_t cold_gc_frame);
1330  			/* Perform about one pages worth of marking	*/
1331  			/* work of whatever kind is needed.  Returns	*/
1332  			/* quickly if no collection is in progress.	*/
1333  			/* Return TRUE if mark phase finished.		*/
1334void GC_initiate_gc(void);
1335				/* initiate collection.			*/
1336  				/* If the mark state is invalid, this	*/
1337  				/* becomes full colleection.  Otherwise */
1338  				/* it's partial.			*/
1339void GC_push_all(ptr_t bottom, ptr_t top);
1340				/* Push everything in a range 		*/
1341  				/* onto mark stack.			*/
1342void GC_push_selected(ptr_t bottom, ptr_t top,
1343		      int (*dirty_fn) (struct hblk *h),
1344		      void (*push_fn) (ptr_t bottom, ptr_t top) );
1345				  /* Push all pages h in [b,t) s.t. 	*/
1346				  /* select_fn(h) != 0 onto mark stack. */
1347#ifndef SMALL_CONFIG
1348  void GC_push_conditional (ptr_t b, ptr_t t, GC_bool all);
1349#else
1350# define GC_push_conditional(b, t, all) GC_push_all(b, t)
1351#endif
1352                                /* Do either of the above, depending	*/
1353				/* on the third arg.			*/
1354void GC_push_all_stack (ptr_t b, ptr_t t);
1355				    /* As above, but consider		*/
1356				    /*  interior pointers as valid  	*/
1357void GC_push_all_eager (ptr_t b, ptr_t t);
1358				    /* Same as GC_push_all_stack, but   */
1359				    /* ensures that stack is scanned	*/
1360				    /* immediately, not just scheduled  */
1361				    /* for scanning.			*/
1362#ifndef THREADS
1363  void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
1364		  			 ptr_t cold_gc_frame);
1365			/* Similar to GC_push_all_eager, but only the	*/
1366			/* part hotter than cold_gc_frame is scanned	*/
1367			/* immediately.  Needed to ensure that callee-	*/
1368			/* save registers are not missed.		*/
1369#else
1370  /* In the threads case, we push part of the current thread stack	*/
1371  /* with GC_push_all_eager when we push the registers.  This gets the  */
1372  /* callee-save registers that may disappear.  The remainder of the	*/
1373  /* stacks are scheduled for scanning in *GC_push_other_roots, which	*/
1374  /* is thread-package-specific.					*/
1375#endif
1376void GC_push_current_stack(ptr_t cold_gc_frame, void *context);
1377  			/* Push enough of the current stack eagerly to	*/
1378  			/* ensure that callee-save registers saved in	*/
1379  			/* GC frames are scanned.			*/
1380  			/* In the non-threads case, schedule entire	*/
1381  			/* stack for scanning.				*/
1382			/* The second argument is a pointer to the 	*/
1383			/* (possibly null) thread context, for		*/
1384			/* (currently hypothetical) more precise	*/
1385			/* stack scanning.				*/
1386void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
1387  			/* Push all or dirty roots.	*/
1388extern void (*GC_push_other_roots)(void);
1389  			/* Push system or application specific roots	*/
1390  			/* onto the mark stack.  In some environments	*/
1391  			/* (e.g. threads environments) this is		*/
1392  			/* predfined to be non-zero.  A client supplied */
1393  			/* replacement should also call the original	*/
1394  			/* function.					*/
1395extern void GC_push_gc_structures(void);
1396			/* Push GC internal roots.  These are normally	*/
1397			/* included in the static data segment, and 	*/
1398			/* Thus implicitly pushed.  But we must do this	*/
1399			/* explicitly if normal root processing is 	*/
1400			/* disabled.  Calls the following:		*/
1401	extern void GC_push_finalizer_structures(void);
1402	extern void GC_push_stubborn_structures (void);
1403#	ifdef THREADS
1404	  extern void GC_push_thread_structures (void);
1405#	endif
1406	extern void (*GC_push_typed_structures) (void);
1407			/* A pointer such that we can avoid linking in	*/
1408			/* the typed allocation support if unused.	*/
1409extern void (*GC_start_call_back) (void);
1410  			/* Called at start of full collections.		*/
1411  			/* Not called if 0.  Called with allocation 	*/
1412  			/* lock held.					*/
1413  			/* 0 by default.				*/
1414void GC_push_regs_and_stack(ptr_t cold_gc_frame);
1415
1416void GC_push_regs(void);
1417
1418void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
1419				 ptr_t arg);
1420
1421# if defined(SPARC) || defined(IA64)
1422  /* Cause all stacked registers to be saved in memory.  Return a	*/
1423  /* pointer to the top of the corresponding memory stack.		*/
1424  ptr_t GC_save_regs_in_stack(void);
1425# endif
1426			/* Push register contents onto mark stack.	*/
1427  			/* If NURSERY is defined, the default push	*/
1428  			/* action can be overridden with GC_push_proc	*/
1429
1430# ifdef NURSERY
1431    extern void (*GC_push_proc)(ptr_t);
1432# endif
1433# if defined(MSWIN32) || defined(MSWINCE)
1434  void __cdecl GC_push_one(word p);
1435# else
1436  void GC_push_one(word p);
1437			      /* If p points to an object, mark it    */
1438                              /* and push contents on the mark stack  */
1439  			      /* Pointer recognition test always      */
1440  	

Large files files are truncated, but you can click here to view the full file