/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jsgc.cpp
C++ | 2026 lines | 1351 code | 233 blank | 442 comment | 217 complexity | 14d9b2f3e86efb6165b035660f48be19 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
Large files files are truncated, but you can click here to view the full file
- /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sw=4 et tw=78:
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla Communicator client code, released
- * March 31, 1998.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1998
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
- /*
- * JS Mark-and-Sweep Garbage Collector.
- *
- * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
- * jsgc.h). It allocates from a special GC arena pool with each arena allocated
- * using malloc. It uses an ideally parallel array of flag bytes to hold the
- * mark bit, finalizer type index, etc.
- *
- * XXX swizzle page to freelist for better locality of reference
- */
- #include "jsstddef.h"
- #include <stdlib.h> /* for free */
- #include <math.h>
- #include <string.h> /* for memset used when DEBUG */
- #include "jstypes.h"
- #include "jsutil.h" /* Added by JSIFY */
- #include "jshash.h" /* Added by JSIFY */
- #include "jsbit.h"
- #include "jsclist.h"
- #include "jsprf.h"
- #include "jsapi.h"
- #include "jsatom.h"
- #include "jscntxt.h"
- #include "jsversion.h"
- #include "jsdbgapi.h"
- #include "jsexn.h"
- #include "jsfun.h"
- #include "jsgc.h"
- #include "jsinterp.h"
- #include "jsiter.h"
- #include "jslock.h"
- #include "jsnum.h"
- #include "jsobj.h"
- #include "jsparse.h"
- #include "jsscope.h"
- #include "jsscript.h"
- #include "jsstr.h"
- #include "jstracer.h"
- #if JS_HAS_XML_SUPPORT
- #include "jsxml.h"
- #endif
- /*
- * Check if posix_memalign is available.
- */
- #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
- # define HAS_POSIX_MEMALIGN 1
- #else
- # define HAS_POSIX_MEMALIGN 0
- #endif
- /*
- * jemalloc provides posix_memalign.
- */
- #ifdef MOZ_MEMORY
- extern "C" {
- #include "../../memory/jemalloc/jemalloc.h"
- }
- #endif
- /*
- * Include the headers for mmap unless we have posix_memalign and do not
- * insist on mmap.
- */
- #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
- # if defined(XP_WIN)
- # ifndef JS_GC_USE_MMAP
- # define JS_GC_USE_MMAP 1
- # endif
- # include <windows.h>
- # else
- # if defined(XP_UNIX) || defined(XP_BEOS)
- # include <unistd.h>
- # endif
- # if _POSIX_MAPPED_FILES > 0
- # ifndef JS_GC_USE_MMAP
- # define JS_GC_USE_MMAP 1
- # endif
- # include <sys/mman.h>
- /* On Mac OS X MAP_ANONYMOUS is not defined. */
- # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
- # define MAP_ANONYMOUS MAP_ANON
- # endif
- # else
- # if JS_GC_USE_MMAP
- # error "JS_GC_USE_MMAP is set when mmap is not available"
- # endif
- # endif
- # endif
- #endif
- /*
- * A GC arena contains a fixed number of flag bits for each thing in its heap,
- * and supports O(1) lookup of a flag given its thing's address.
- *
- * To implement this, we allocate things of the same size from a GC arena
- * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
- * following picture shows arena's layout:
- *
- * +------------------------------+--------------------+---------------+
- * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
- * +------------------------------+--------------------+---------------+
- *
- * To find the flag bits for the thing we calculate the thing index counting
- * from arena's start using:
- *
- * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
- *
- * The details of flag's lookup depend on thing's kind. For all GC things
- * except doubles we use one byte of flags where the 4 bits determine thing's
- * type and the rest is used to implement GC marking, finalization and
- * locking. We calculate the address of flag's byte using:
- *
- * flagByteAddress =
- * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
- *
- * where
- *
- * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
- *
- * is the last byte of flags' area.
- *
- * This implies that the things are allocated from the start of their area and
- * flags are allocated from the end. This arrangement avoids a relatively
- * expensive calculation of the location of the boundary separating things and
- * flags. The boundary's offset from the start of the arena is given by:
- *
- * thingsPerArena * thingSize
- *
- * where thingsPerArena is the number of things that the arena can hold:
- *
- * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
- *
- * To allocate doubles we use a specialized arena. It can contain only numbers
- * so we do not need the type bits. Moreover, since the doubles do not require
- * a finalizer and very few of them are locked via js_LockGCThing API, we use
- * just one bit of flags per double to denote if it was marked during the
- * marking phase of the GC. The locking is implemented via a hash table. Thus
- * for doubles the flag area becomes a bitmap.
- *
- * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
- * When it is true, a platform-dependent function like mmap is used to get
- * memory aligned on CPU page boundaries. If the macro is false or undefined,
- * posix_memalign is used when available. Otherwise the code uses malloc to
- * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
- * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
- * see NewGCChunk/DestroyGCChunk below.
- *
- * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
- * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
- * not be a compile-time constant as the system page size is not known until
- * runtime.
- */
- #if JS_GC_USE_MMAP
- static uint32 js_gcArenasPerChunk = 0;
- static JSBool js_gcUseMmap = JS_FALSE;
- #elif HAS_POSIX_MEMALIGN
- # define js_gcArenasPerChunk 1
- #else
- # define js_gcArenasPerChunk 7
- #endif
- #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
- # define CHUNKED_ARENA_ALLOCATION 0
- #else
- # define CHUNKED_ARENA_ALLOCATION 1
- #endif
- #define GC_ARENA_SHIFT 12
- #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
- #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
- /*
- * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
- * It is used to improve allocation efficiency when using posix_memalign. If
- * malloc's implementation uses internal headers, then calling
- *
- * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
- *
- * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
- * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
- * calls
- *
- * posix_memalign(&p, GC_ARENA_SIZE,
- * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
- *
- * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
- * system header, the system can pack all allocations together without holes.
- *
- * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
- * comes from jemalloc that does not use any headers/trailers.
- */
- #ifndef JS_GC_ARENA_PAD
- # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
- # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
- # else
- # define JS_GC_ARENA_PAD 0
- # endif
- #endif
- struct JSGCArenaInfo {
- /*
- * Allocation list for the arena or NULL if the arena holds double values.
- */
- JSGCArenaList *list;
- /*
- * Pointer to the previous arena in a linked list. The arena can either
- * belong to one of JSContext.gcArenaList lists or, when it does not have
- * any allocated GC things, to the list of free arenas in the chunk with
- * head stored in JSGCChunkInfo.lastFreeArena.
- */
- JSGCArenaInfo *prev;
- #if !CHUNKED_ARENA_ALLOCATION
- jsuword prevUntracedPage;
- #else
- /*
- * A link field for the list of arenas with marked but not yet traced
- * things. The field is encoded as arena's page to share the space with
- * firstArena and arenaIndex fields.
- */
- jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
- /*
- * When firstArena is false, the index of arena in the chunk. When
- * firstArena is true, the index of a free arena holding JSGCChunkInfo or
- * NO_FREE_ARENAS if there are no free arenas in the chunk.
- *
- * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
- * access either of indexes.
- */
- jsuword arenaIndex : GC_ARENA_SHIFT - 1;
- /* Flag indicating if the arena is the first in the chunk. */
- jsuword firstArena : 1;
- #endif
- union {
- jsuword untracedThings; /* bitset for fast search of marked
- but not yet traced things */
- JSBool hasMarkedDoubles; /* the arena has marked doubles */
- } u;
- #if JS_GC_ARENA_PAD != 0
- uint8 pad[JS_GC_ARENA_PAD];
- #endif
- };
- /*
- * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
- * as possible. The code does not rely on this check so if on a particular
- * platform this does not compile, then, as a workaround, comment the assert
- * out and submit a bug report.
- */
- JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
- /*
- * Macros to convert between JSGCArenaInfo, the start address of the arena and
- * arena's page defined as (start address) >> GC_ARENA_SHIFT.
- */
- #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
- #define IS_ARENA_INFO_ADDRESS(arena) \
- (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
- #define ARENA_START_TO_INFO(arenaStart) \
- (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
- (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
- #define ARENA_INFO_TO_START(arena) \
- (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
- (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
- #define ARENA_PAGE_TO_INFO(arenaPage) \
- (JS_ASSERT(arenaPage != 0), \
- JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
- ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
- #define ARENA_INFO_TO_PAGE(arena) \
- (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
- ((jsuword) (arena) >> GC_ARENA_SHIFT))
- #define GET_ARENA_INFO(chunk, index) \
- (JS_ASSERT((index) < js_gcArenasPerChunk), \
- ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
- #if CHUNKED_ARENA_ALLOCATION
- /*
- * Definitions for allocating arenas in chunks.
- *
- * All chunks that have at least one free arena are put on the doubly-linked
- * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
- * the head of the chunk's free arena list together with the link fields for
- * gcChunkList.
- *
- * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
- * the index of this arena. When all arenas in the chunk are used, it is
- * removed from the list and the index is set to NO_FREE_ARENAS indicating
- * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
- */
- struct JSGCChunkInfo {
- JSGCChunkInfo **prevp;
- JSGCChunkInfo *next;
- JSGCArenaInfo *lastFreeArena;
- uint32 numFreeArenas;
- };
- #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
- #ifdef js_gcArenasPerChunk
- JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
- js_gcArenasPerChunk <= NO_FREE_ARENAS);
- #endif
- #define GET_ARENA_CHUNK(arena, index) \
- (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
- ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
- #define GET_ARENA_INDEX(arena) \
- ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
- #define GET_CHUNK_INFO_INDEX(chunk) \
- ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
- #define SET_CHUNK_INFO_INDEX(chunk, index) \
- (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
- (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
- #define GET_CHUNK_INFO(chunk, infoIndex) \
- (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
- JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
- (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
- #define CHUNK_INFO_TO_INDEX(ci) \
- GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
- #endif
- /*
- * Macros for GC-thing operations.
- */
- #define THINGS_PER_ARENA(thingSize) \
- ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
- #define THING_TO_ARENA(thing) \
- ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
- 1 - sizeof(JSGCArenaInfo)))
- #define THING_TO_INDEX(thing, thingSize) \
- ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
- #define THING_FLAGS_END(arena) ((uint8 *)(arena))
- #define THING_FLAGP(arena, thingIndex) \
- (JS_ASSERT((jsuword) (thingIndex) \
- < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
- (uint8 *)(arena) - 1 - (thingIndex))
- #define THING_TO_FLAGP(thing, thingSize) \
- THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
- #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
- #define FLAGP_TO_INDEX(flagp) \
- (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
- (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
- #define FLAGP_TO_THING(flagp, thingSize) \
- (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
- (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
- (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
- (thingSize) * FLAGP_TO_INDEX(flagp)))
- /*
- * Macros for the specialized arena for doubles.
- *
- * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
- * hold. We find it as the following. Let n be the number of doubles in the
- * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
- * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
- * which the following holds:
- *
- * n*s + ceil(n/B) <= M (1)
- *
- * where "/" denotes normal real division,
- * ceil(r) gives the least integer not smaller than the number r,
- * s is the number of words in jsdouble,
- * B is number of bits per word or B == JS_BITS_PER_WORD
- * M is the number of words in the arena before JSGCArenaInfo or
- * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
- * M == ARENA_INFO_OFFSET / sizeof(jsuword)
- *
- * We rewrite the inequality as
- *
- * n*B*s/B + ceil(n/B) <= M,
- * ceil(n*B*s/B + n/B) <= M,
- * ceil(n*(B*s + 1)/B) <= M (2)
- *
- * We define a helper function e(n, s, B),
- *
- * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
- *
- * It gives:
- *
- * n*(B*s + 1)/B + e(n, s, B) <= M,
- * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
- *
- * We apply the floor function to both sides of the last equation, where
- * floor(r) gives the biggest integer not greater than r. As a consequence we
- * have:
- *
- * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
- * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
- * n <= floor(M*B/(B*s + 1)), (3)
- *
- * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
- * Thus any n that satisfies the original constraint (1) or its equivalent (2),
- * must also satisfy (3). That is, we got an upper estimate for the maximum
- * value of n. Lets show that this upper estimate,
- *
- * floor(M*B/(B*s + 1)), (4)
- *
- * also satisfies (1) and, as such, gives the required maximum value.
- * Substituting it into (2) gives:
- *
- * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
- *
- * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
- *
- * ceil(floor(M/X)*X) <= ceil(M) == M.
- *
- * Thus the value of (4) gives the maximum n satisfying (1).
- *
- * For the final result we observe that in (4)
- *
- * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
- * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
- *
- * and
- *
- * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
- * == JS_BITS_PER_DOUBLE.
- */
- #define DOUBLES_PER_ARENA \
- ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
- /*
- * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
- */
- JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
- JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
- JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
- #define DOUBLES_ARENA_BITMAP_WORDS \
- (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
- /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
- JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
- DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
- ARENA_INFO_OFFSET);
- JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
- sizeof(jsuword) *
- JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
- ARENA_INFO_OFFSET);
- /*
- * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
- * last byte of the occupation bitmap are unused.
- */
- #define UNUSED_DOUBLE_BITMAP_BITS \
- (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
- JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
- #define DOUBLES_ARENA_BITMAP_OFFSET \
- (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
- #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
- (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
- JS_ASSERT(!(arenaInfo)->list)) \
- /*
- * Get the start of the bitmap area containing double mark flags in the arena.
- * To access the flag the code uses
- *
- * JS_TEST_BIT(bitmapStart, index)
- *
- * That is, compared with the case of arenas with non-double things, we count
- * flags from the start of the bitmap area, not from the end.
- */
- #define DOUBLE_ARENA_BITMAP(arenaInfo) \
- (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
- (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
- #define DOUBLE_THING_TO_INDEX(thing) \
- (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
- JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
- DOUBLES_ARENA_BITMAP_OFFSET), \
- ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
- static void
- ClearDoubleArenaFlags(JSGCArenaInfo *a)
- {
- jsbitmap *bitmap, mask;
- uintN nused;
- /*
- * When some high bits in the last byte of the double occupation bitmap
- * are unused, we must set them. Otherwise RefillDoubleFreeList will
- * assume that they corresponds to some free cells and tries to allocate
- * them.
- *
- * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
- */
- bitmap = DOUBLE_ARENA_BITMAP(a);
- memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
- mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
- nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
- bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
- }
- static JS_ALWAYS_INLINE JSBool
- IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
- {
- jsbitmap *bitmap;
- JS_ASSERT(a->u.hasMarkedDoubles);
- bitmap = DOUBLE_ARENA_BITMAP(a);
- return JS_TEST_BIT(bitmap, index);
- }
- /*
- * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
- *
- * 1. The next byte in the bitmap area for doubles to check for unmarked
- * (or free) doubles.
- * 2. Or to the end of the bitmap area when all GC cells of the arena are
- * allocated.
- * 3. Or to a special sentinel value indicating that there are no arenas
- * to check for unmarked doubles.
- *
- * We set the sentinel to ARENA_INFO_OFFSET so the single check
- *
- * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
- *
- * will cover both the second and the third cases.
- */
- #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
- #ifdef JS_THREADSAFE
- /*
- * The maximum number of things to put on the local free list by taking
- * several things from the global free list or from the tail of the last
- * allocated arena to amortize the cost of rt->gcLock.
- *
- * We use number 8 based on benchmarks from bug 312238.
- */
- #define MAX_THREAD_LOCAL_THINGS 8
- #endif
- JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
- JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
- JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
- /* We want to use all the available GC thing space for object's slots. */
- JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
- /*
- * Ensure that JSObject is allocated from a different GC-list rather than
- * jsdouble and JSString so we can easily finalize JSObject before these 2
- * types of GC things. See comments in js_GC.
- */
- JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
- GC_FREELIST_INDEX(sizeof(JSObject)));
- JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
- GC_FREELIST_INDEX(sizeof(JSObject)));
- /*
- * JSPtrTable capacity growth descriptor. The table grows by powers of two
- * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
- * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
- */
- typedef struct JSPtrTableInfo {
- uint16 minCapacity;
- uint16 linearGrowthThreshold;
- } JSPtrTableInfo;
- #define GC_ITERATOR_TABLE_MIN 4
- #define GC_ITERATOR_TABLE_LINEAR 1024
- static const JSPtrTableInfo iteratorTableInfo = {
- GC_ITERATOR_TABLE_MIN,
- GC_ITERATOR_TABLE_LINEAR
- };
- /* Calculate table capacity based on the current value of JSPtrTable.count. */
- static size_t
- PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
- {
- size_t linear, log, capacity;
- linear = info->linearGrowthThreshold;
- JS_ASSERT(info->minCapacity <= linear);
- if (count == 0) {
- capacity = 0;
- } else if (count < linear) {
- log = JS_CEILING_LOG2W(count);
- JS_ASSERT(log != JS_BITS_PER_WORD);
- capacity = (size_t)1 << log;
- if (capacity < info->minCapacity)
- capacity = info->minCapacity;
- } else {
- capacity = JS_ROUNDUP(count, linear);
- }
- JS_ASSERT(capacity >= count);
- return capacity;
- }
- static void
- FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
- {
- if (table->array) {
- JS_ASSERT(table->count > 0);
- free(table->array);
- table->array = NULL;
- table->count = 0;
- }
- JS_ASSERT(table->count == 0);
- }
- static JSBool
- AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
- void *ptr)
- {
- size_t count, capacity;
- void **array;
- count = table->count;
- capacity = PtrTableCapacity(count, info);
- if (count == capacity) {
- if (capacity < info->minCapacity) {
- JS_ASSERT(capacity == 0);
- JS_ASSERT(!table->array);
- capacity = info->minCapacity;
- } else {
- /*
- * Simplify the overflow detection assuming pointer is bigger
- * than byte.
- */
- JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
- capacity = (capacity < info->linearGrowthThreshold)
- ? 2 * capacity
- : capacity + info->linearGrowthThreshold;
- if (capacity > (size_t)-1 / sizeof table->array[0])
- goto bad;
- }
- array = (void **) realloc(table->array,
- capacity * sizeof table->array[0]);
- if (!array)
- goto bad;
- #ifdef DEBUG
- memset(array + count, JS_FREE_PATTERN,
- (capacity - count) * sizeof table->array[0]);
- #endif
- table->array = array;
- }
- table->array[count] = ptr;
- table->count = count + 1;
- return JS_TRUE;
- bad:
- JS_ReportOutOfMemory(cx);
- return JS_FALSE;
- }
- static void
- ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
- size_t newCount)
- {
- size_t oldCapacity, capacity;
- void **array;
- JS_ASSERT(newCount <= table->count);
- if (newCount == table->count)
- return;
- oldCapacity = PtrTableCapacity(table->count, info);
- table->count = newCount;
- capacity = PtrTableCapacity(newCount, info);
- if (oldCapacity != capacity) {
- array = table->array;
- JS_ASSERT(array);
- if (capacity == 0) {
- free(array);
- table->array = NULL;
- return;
- }
- array = (void **) realloc(array, capacity * sizeof array[0]);
- if (array)
- table->array = array;
- }
- #ifdef DEBUG
- memset(table->array + newCount, JS_FREE_PATTERN,
- (capacity - newCount) * sizeof table->array[0]);
- #endif
- }
- #ifdef JS_GCMETER
- # define METER(x) ((void) (x))
- # define METER_IF(condition, x) ((void) ((condition) && (x)))
- #else
- # define METER(x) ((void) 0)
- # define METER_IF(condition, x) ((void) 0)
- #endif
- #define METER_UPDATE_MAX(maxLval, rval) \
- METER_IF((maxLval) < (rval), (maxLval) = (rval))
- #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
- /*
- * For chunks allocated via over-sized malloc, get a pointer to store the gap
- * between the malloc's result and the first arena in the chunk.
- */
- static uint32 *
- GetMallocedChunkGapPtr(jsuword chunk)
- {
- JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
- /* Use the memory after the chunk, see NewGCChunk for details. */
- return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
- }
- #endif
- static jsuword
- NewGCChunk(void)
- {
- void *p;
- #if JS_GC_USE_MMAP
- if (js_gcUseMmap) {
- # if defined(XP_WIN)
- p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
- MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
- return (jsuword) p;
- # else
- p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- return (p == MAP_FAILED) ? 0 : (jsuword) p;
- # endif
- }
- #endif
- #if HAS_POSIX_MEMALIGN
- if (0 != posix_memalign(&p, GC_ARENA_SIZE,
- GC_ARENA_SIZE * js_gcArenasPerChunk -
- JS_GC_ARENA_PAD)) {
- return 0;
- }
- return (jsuword) p;
- #else
- /*
- * Implement chunk allocation using oversized malloc if mmap and
- * posix_memalign are not available.
- *
- * Since malloc allocates pointers aligned on the word boundary, to get
- * js_gcArenasPerChunk aligned arenas, we need to malloc only
- *
- * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
- *
- * bytes. But since we stores the gap between the malloced pointer and the
- * first arena in the chunk after the chunk, we need to ask for
- *
- * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
- *
- * bytes to ensure that we always have room to store the gap.
- */
- p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
- if (!p)
- return 0;
- {
- jsuword chunk;
- chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
- *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
- return chunk;
- }
- #endif
- }
- static void
- DestroyGCChunk(jsuword chunk)
- {
- JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
- #if JS_GC_USE_MMAP
- if (js_gcUseMmap) {
- # if defined(XP_WIN)
- VirtualFree((void *) chunk, 0, MEM_RELEASE);
- # elif defined(SOLARIS)
- munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
- # else
- munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
- # endif
- return;
- }
- #endif
- #if HAS_POSIX_MEMALIGN
- free((void *) chunk);
- #else
- /* See comments in NewGCChunk. */
- JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
- free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
- #endif
- }
- #if CHUNKED_ARENA_ALLOCATION
- static void
- AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
- {
- ci->prevp = &rt->gcChunkList;
- ci->next = rt->gcChunkList;
- if (rt->gcChunkList) {
- JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
- rt->gcChunkList->prevp = &ci->next;
- }
- rt->gcChunkList = ci;
- }
- static void
- RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
- {
- *ci->prevp = ci->next;
- if (ci->next) {
- JS_ASSERT(ci->next->prevp == &ci->next);
- ci->next->prevp = ci->prevp;
- }
- }
- #endif
- static JSGCArenaInfo *
- NewGCArena(JSRuntime *rt)
- {
- jsuword chunk;
- JSGCArenaInfo *a;
- if (rt->gcBytes >= rt->gcMaxBytes)
- return NULL;
- #if CHUNKED_ARENA_ALLOCATION
- if (js_gcArenasPerChunk == 1) {
- #endif
- chunk = NewGCChunk();
- if (chunk == 0)
- return NULL;
- a = ARENA_START_TO_INFO(chunk);
- #if CHUNKED_ARENA_ALLOCATION
- } else {
- JSGCChunkInfo *ci;
- uint32 i;
- JSGCArenaInfo *aprev;
- ci = rt->gcChunkList;
- if (!ci) {
- chunk = NewGCChunk();
- if (chunk == 0)
- return NULL;
- JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
- a = GET_ARENA_INFO(chunk, 0);
- a->firstArena = JS_TRUE;
- a->arenaIndex = 0;
- aprev = NULL;
- i = 0;
- do {
- a->prev = aprev;
- aprev = a;
- ++i;
- a = GET_ARENA_INFO(chunk, i);
- a->firstArena = JS_FALSE;
- a->arenaIndex = i;
- } while (i != js_gcArenasPerChunk - 1);
- ci = GET_CHUNK_INFO(chunk, 0);
- ci->lastFreeArena = aprev;
- ci->numFreeArenas = js_gcArenasPerChunk - 1;
- AddChunkToList(rt, ci);
- } else {
- JS_ASSERT(ci->prevp == &rt->gcChunkList);
- a = ci->lastFreeArena;
- aprev = a->prev;
- if (!aprev) {
- JS_ASSERT(ci->numFreeArenas == 1);
- JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
- RemoveChunkFromList(rt, ci);
- chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
- SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
- } else {
- JS_ASSERT(ci->numFreeArenas >= 2);
- JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
- ci->lastFreeArena = aprev;
- ci->numFreeArenas--;
- }
- }
- }
- #endif
- rt->gcBytes += GC_ARENA_SIZE;
- a->prevUntracedPage = 0;
- memset(&a->u, 0, sizeof(a->u));
- return a;
- }
- static void
- DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
- {
- JSGCArenaInfo *a;
- while (last) {
- a = last;
- last = last->prev;
- METER(rt->gcStats.afree++);
- JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
- rt->gcBytes -= GC_ARENA_SIZE;
- #if CHUNKED_ARENA_ALLOCATION
- if (js_gcArenasPerChunk == 1) {
- #endif
- DestroyGCChunk(ARENA_INFO_TO_START(a));
- #if CHUNKED_ARENA_ALLOCATION
- } else {
- uint32 arenaIndex;
- jsuword chunk;
- uint32 chunkInfoIndex;
- JSGCChunkInfo *ci;
- # ifdef DEBUG
- jsuword firstArena;
- firstArena = a->firstArena;
- arenaIndex = a->arenaIndex;
- memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
- GC_ARENA_SIZE - JS_GC_ARENA_PAD);
- a->firstArena = firstArena;
- a->arenaIndex = arenaIndex;
- # endif
- arenaIndex = GET_ARENA_INDEX(a);
- chunk = GET_ARENA_CHUNK(a, arenaIndex);
- chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
- if (chunkInfoIndex == NO_FREE_ARENAS) {
- chunkInfoIndex = arenaIndex;
- SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
- ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
- a->prev = NULL;
- ci->lastFreeArena = a;
- ci->numFreeArenas = 1;
- AddChunkToList(rt, ci);
- } else {
- JS_ASSERT(chunkInfoIndex != arenaIndex);
- ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
- JS_ASSERT(ci->numFreeArenas != 0);
- JS_ASSERT(ci->lastFreeArena);
- JS_ASSERT(a != ci->lastFreeArena);
- if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
- RemoveChunkFromList(rt, ci);
- DestroyGCChunk(chunk);
- } else {
- ++ci->numFreeArenas;
- a->prev = ci->lastFreeArena;
- ci->lastFreeArena = a;
- }
- }
- }
- # endif
- }
- }
- static void
- InitGCArenaLists(JSRuntime *rt)
- {
- uintN i, thingSize;
- JSGCArenaList *arenaList;
- for (i = 0; i < GC_NUM_FREELISTS; i++) {
- arenaList = &rt->gcArenaList[i];
- thingSize = GC_FREELIST_NBYTES(i);
- JS_ASSERT((size_t)(uint16)thingSize == thingSize);
- arenaList->last = NULL;
- arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
- arenaList->thingSize = (uint16) thingSize;
- arenaList->freeList = NULL;
- }
- rt->gcDoubleArenaList.first = NULL;
- rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
- }
- static void
- FinishGCArenaLists(JSRuntime *rt)
- {
- uintN i;
- JSGCArenaList *arenaList;
- for (i = 0; i < GC_NUM_FREELISTS; i++) {
- arenaList = &rt->gcArenaList[i];
- DestroyGCArenas(rt, arenaList->last);
- arenaList->last = NULL;
- arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
- arenaList->freeList = NULL;
- }
- DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
- rt->gcDoubleArenaList.first = NULL;
- rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
- rt->gcBytes = 0;
- JS_ASSERT(rt->gcChunkList == 0);
- }
- /*
- * This function must not be called when thing is jsdouble.
- */
- static uint8 *
- GetGCThingFlags(void *thing)
- {
- JSGCArenaInfo *a;
- uint32 index;
- a = THING_TO_ARENA(thing);
- index = THING_TO_INDEX(thing, a->list->thingSize);
- return THING_FLAGP(a, index);
- }
- /*
- * This function returns null when thing is jsdouble.
- */
- static uint8 *
- GetGCThingFlagsOrNull(void *thing)
- {
- JSGCArenaInfo *a;
- uint32 index;
- a = THING_TO_ARENA(thing);
- if (!a->list)
- return NULL;
- index = THING_TO_INDEX(thing, a->list->thingSize);
- return THING_FLAGP(a, index);
- }
- intN
- js_GetExternalStringGCType(JSString *str)
- {
- uintN type;
- type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
- JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
- return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
- }
- static uint32
- MapGCFlagsToTraceKind(uintN flags)
- {
- uint32 type;
- type = flags & GCF_TYPEMASK;
- JS_ASSERT(type != GCX_DOUBLE);
- JS_ASSERT(type < GCX_NTYPES);
- return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
- }
- JS_FRIEND_API(uint32)
- js_GetGCThingTraceKind(void *thing)
- {
- JSGCArenaInfo *a;
- uint32 index;
- a = THING_TO_ARENA(thing);
- if (!a->list)
- return JSTRACE_DOUBLE;
- index = THING_TO_INDEX(thing, a->list->thingSize);
- return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
- }
- JSRuntime*
- js_GetGCStringRuntime(JSString *str)
- {
- JSGCArenaList *list;
- list = THING_TO_ARENA(str)->list;
- JS_ASSERT(list->thingSize == sizeof(JSGCThing));
- JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
- return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
- }
- JSBool
- js_IsAboutToBeFinalized(JSContext *cx, void *thing)
- {
- JSGCArenaInfo *a;
- uint32 index, flags;
- a = THING_TO_ARENA(thing);
- if (!a->list) {
- /*
- * Check if arena has no marked doubles. In that case the bitmap with
- * the mark flags contains all garbage as it is initialized only when
- * marking the first double in the arena.
- */
- if (!a->u.hasMarkedDoubles)
- return JS_TRUE;
- index = DOUBLE_THING_TO_INDEX(thing);
- return !IsMarkedDouble(a, index);
- }
- index = THING_TO_INDEX(thing, a->list->thingSize);
- flags = *THING_FLAGP(a, index);
- return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
- }
- /* This is compatible with JSDHashEntryStub. */
- typedef struct JSGCRootHashEntry {
- JSDHashEntryHdr hdr;
- void *root;
- const char *name;
- } JSGCRootHashEntry;
- /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
- #define GC_ROOTS_SIZE 256
- #if CHUNKED_ARENA_ALLOCATION
- /*
- * For a CPU with extremely large pages using them for GC things wastes
- * too much memory.
- */
- # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
- JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
- #endif
- JSBool
- js_InitGC(JSRuntime *rt, uint32 maxbytes)
- {
- #if JS_GC_USE_MMAP
- if (js_gcArenasPerChunk == 0) {
- size_t cpuPageSize, arenasPerPage;
- # if defined(XP_WIN)
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- cpuPageSize = si.dwPageSize;
- # elif defined(XP_UNIX) || defined(XP_BEOS)
- cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
- # else
- # error "Not implemented"
- # endif
- /* cpuPageSize is a power of 2. */
- JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
- arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
- #ifdef DEBUG
- if (arenasPerPage == 0) {
- fprintf(stderr,
- "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
- "paged allocation for the garbage collector. Please report this.\n",
- (unsigned) cpuPageSize);
- }
- #endif
- if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
- /*
- * Use at least 4 GC arenas per paged allocation chunk to minimize
- * the overhead of mmap/VirtualAlloc.
- */
- js_gcUseMmap = JS_TRUE;
- js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
- } else {
- js_gcUseMmap = JS_FALSE;
- js_gcArenasPerChunk = 7;
- }
- }
- JS_ASSERT(1 <= js_gcArenasPerChunk &&
- js_gcArenasPerChunk <= NO_FREE_ARENAS);
- #endif
- InitGCArenaLists(rt);
- if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
- sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
- rt->gcRootsHash.ops = NULL;
- return JS_FALSE;
- }
- rt->gcLocksHash = NULL; /* create lazily */
- /*
- * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
- * for default backward API compatibility.
- */
- rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
- rt->gcEmptyArenaPoolLifespan = 30000;
- METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
- return JS_TRUE;
- }
- #ifdef JS_GCMETER
- static void
- UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
- uint32 nthings)
- {
- size_t narenas;
- narenas = nlivearenas + nkilledArenas;
- JS_ASSERT(narenas >= st->livearenas);
- st->newarenas = narenas - st->livearenas;
- st->narenas = narenas;
- st->livearenas = nlivearenas;
- if (st->maxarenas < narenas)
- st->maxarenas = narenas;
- st->totalarenas += narenas;
- st->nthings = nthings;
- if (st->maxthings < nthings)
- st->maxthings = nthings;
- st->totalthings += nthings;
- }
- JS_FRIEND_API(void)
- js_DumpGCStats(JSRuntime *rt, FILE *fp)
- {
- int i;
- size_t sumArenas, sumTotalArenas;
- size_t sumThings, sumMaxThings;
- size_t sumThingSize, sumTotalThingSize;
- size_t sumArenaCapacity, sumTotalArenaCapacity;
- JSGCArenaStats *st;
- size_t thingSize, thingsPerArena;
- size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
- fprintf(fp, "\nGC allocation statistics:\n");
- #define UL(x) ((unsigned long)(x))
- #define ULSTAT(x) UL(rt->gcStats.x)
- #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
- sumArenas = 0;
- sumTotalArenas = 0;
- sumThings = 0;
- sumMaxThings = 0;
- sumThingSize = 0;
- sumTotalThingSize = 0;
- sumArenaCapacity = 0;
- sumTotalArenaCapacity = 0;
- sumAlloc = 0;
- sumLocalAlloc = 0;
- sumFail = 0;
- sumRetry = 0;
- for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
- if (i == -1) {
- thingSize = sizeof(jsdouble);
- thingsPerArena = DOUBLES_PER_ARENA;
- st = &rt->gcStats.doubleArenaStats;
- fprintf(fp,
- "Arena list for double values (%lu doubles per arena):",
- UL(thingsPerArena));
- } else {
- thingSize = rt->gcArenaList[i].thingSize;
- thingsPerArena = THINGS_PER_ARENA(thingSize);
- st = &rt->gcStats.arenaStats[i];
- fprintf(fp,
- "Arena list %d (thing size %lu, %lu things per arena):",
- i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
- }
- if (st->maxarenas == 0) {
- fputs(" NEVER USED\n", fp);
- continue;
- }
- putc('\n', fp);
- fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
- fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
- UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
- fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
- UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
- fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
- fprintf(fp, " things: %lu\n", UL(st->nthings));
- fprintf(fp, " GC cell utilization: %.1f%%\n",
- PERCENT(st->nthings, thingsPerArena * st->narenas));
- fprintf(fp, " average cell utilization: %.1f%%\n",
- PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
- fprintf(fp, " max things: %lu\n", UL(st->maxthings));
- fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
- fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
- UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
- sumArenas += st->narenas;
- sumTotalArenas += st->totalarenas;
- sumThings += st->nthings;
- sumMaxThings += st->maxthings;
- sumThingSize += thingSize * st->nthings;
- sumTotalThingSize += thingSize * st->totalthings;
- sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
- sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
- sumAlloc += st->alloc;
- sumLocalAlloc += st->localalloc;
- sumFail += st->fail;
- sumRetry += st->retry;
- }
- fprintf(fp, "TOTAL STATS:\n");
- fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
- fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
- fprintf(fp, " total GC things: %lu\n", UL(sumThings));
- fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
- fprintf(fp, " GC cell utilization: %.1f%%\n",
- PERCENT(sumThingSize, sumArenaCapacity));
- fprintf(fp, " average cell utilization: %.1f%%\n",
- PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
- fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
- fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
- fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
- UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
- fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
- fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
- fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
- fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
- fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
- fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
- fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
- fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
- fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
- #ifdef DEBUG
- fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
- #endif
- fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
- fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
- fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
- fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
- fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
- fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
- fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
- fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
- fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
- #undef UL
- #undef ULSTAT
- #undef PERCENT
- #ifdef JS_ARENAMETER
- JS_DumpArenaStats(fp);
- #endif
- }
- #endif
- #ifdef DEBUG
- static void
- CheckLeakedRoots(JSRuntime *rt);
- #endif
- #ifdef JS_THREADSAFE
- static void
- TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount);
- #endif
- void
- js_FinishGC(JSRuntime *rt)
- {
- #ifdef JS_ARENAMETER
- JS_DumpArenaStats(stdout);
- #endif
- #ifdef JS_GCMETER
- js_DumpGCStats(rt, stdout);
- #endif
- FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
- #ifdef JS_THREADSAFE
- TrimGCFreeListsPool(rt, 0);
- JS_ASSERT(!rt->gcFreeListsPool);
- #endif
- FinishGCArenaLists(rt);
- if (rt->gcRootsHash.ops) {
- #ifdef DEBUG
- CheckLeakedRoots(rt);
- #endif
- JS_DHashTableFinish(&rt->gcRootsHash);
- rt->gcRootsHash.ops = NULL;
- }
- if (rt->gcLocksHash) {
- JS_DHashTableDestroy(rt->gcLocksHash);
- rt->gcLocksHash = NULL;
- }
- }
- JSBool
- js_AddRoot(JSContext *cx, void *rp, const char *name)
- {
- JSBool ok = js_AddRootRT(cx->runtime, rp, name);
- if (!ok)
- JS_ReportOutOfMemory(cx);
- return ok;
- }
- JSBool
- js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
- {
- JSBool ok;
- JSGCRootHashEntry *rhe;
- /*
- * Due to the long-standing, but now removed, use of rt->gcLock across the
- * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
- * properly with a racing GC, without calling JS_AddRoot from a request.
- * We have to preserve API compatibility here, now that we avoid holding
- * rt->gcLock across the mark phase (including the root hashtable mark).
- *
- * If the GC is running and we're called on another thread, wait for this
- * GC activation to finish. We can safely wait here (in the case where we
- * are called within a request on another thread's context) without fear
- * of deadlock because the GC doesn't set rt->gcRunning until after it has
- * waited for all active requests to end.
- */
- JS_LOCK_GC(rt);
- #ifdef JS_THREADSAFE
- JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
- if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
- do {
- JS_AWAIT_GC_DONE(rt);
- } while (rt->gcLevel > 0);
- }
- #endif
- rhe = (JSGCRootHashEntry *)
- JS_DHashTableOperate(&rt->gcRootsHas…
Large files files are truncated, but you can click here to view the full file