PageRenderTime 341ms CodeModel.GetById 70ms app.highlight 217ms RepoModel.GetById 35ms app.codeStats 1ms

/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jsgc.cpp

http://github.com/onedayitwillmake/RealtimeMultiplayerNodeJs
C++ | 2026 lines | 1351 code | 233 blank | 442 comment | 217 complexity | 14d9b2f3e86efb6165b035660f48be19 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
   2 * vim: set ts=8 sw=4 et tw=78:
   3 *
   4 * ***** BEGIN LICENSE BLOCK *****
   5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
   6 *
   7 * The contents of this file are subject to the Mozilla Public License Version
   8 * 1.1 (the "License"); you may not use this file except in compliance with
   9 * the License. You may obtain a copy of the License at
  10 * http://www.mozilla.org/MPL/
  11 *
  12 * Software distributed under the License is distributed on an "AS IS" basis,
  13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  14 * for the specific language governing rights and limitations under the
  15 * License.
  16 *
  17 * The Original Code is Mozilla Communicator client code, released
  18 * March 31, 1998.
  19 *
  20 * The Initial Developer of the Original Code is
  21 * Netscape Communications Corporation.
  22 * Portions created by the Initial Developer are Copyright (C) 1998
  23 * the Initial Developer. All Rights Reserved.
  24 *
  25 * Contributor(s):
  26 *
  27 * Alternatively, the contents of this file may be used under the terms of
  28 * either of the GNU General Public License Version 2 or later (the "GPL"),
  29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  30 * in which case the provisions of the GPL or the LGPL are applicable instead
  31 * of those above. If you wish to allow use of your version of this file only
  32 * under the terms of either the GPL or the LGPL, and not to allow others to
  33 * use your version of this file under the terms of the MPL, indicate your
  34 * decision by deleting the provisions above and replace them with the notice
  35 * and other provisions required by the GPL or the LGPL. If you do not delete
  36 * the provisions above, a recipient may use your version of this file under
  37 * the terms of any one of the MPL, the GPL or the LGPL.
  38 *
  39 * ***** END LICENSE BLOCK ***** */
  40
  41/*
  42 * JS Mark-and-Sweep Garbage Collector.
  43 *
  44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
  45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
  46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
  47 * mark bit, finalizer type index, etc.
  48 *
  49 * XXX swizzle page to freelist for better locality of reference
  50 */
  51#include "jsstddef.h"
  52#include <stdlib.h>     /* for free */
  53#include <math.h>
  54#include <string.h>     /* for memset used when DEBUG */
  55#include "jstypes.h"
  56#include "jsutil.h" /* Added by JSIFY */
  57#include "jshash.h" /* Added by JSIFY */
  58#include "jsbit.h"
  59#include "jsclist.h"
  60#include "jsprf.h"
  61#include "jsapi.h"
  62#include "jsatom.h"
  63#include "jscntxt.h"
  64#include "jsversion.h"
  65#include "jsdbgapi.h"
  66#include "jsexn.h"
  67#include "jsfun.h"
  68#include "jsgc.h"
  69#include "jsinterp.h"
  70#include "jsiter.h"
  71#include "jslock.h"
  72#include "jsnum.h"
  73#include "jsobj.h"
  74#include "jsparse.h"
  75#include "jsscope.h"
  76#include "jsscript.h"
  77#include "jsstr.h"
  78#include "jstracer.h"
  79
  80#if JS_HAS_XML_SUPPORT
  81#include "jsxml.h"
  82#endif
  83
  84/*
  85 * Check if posix_memalign is available.
  86 */
  87#if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
  88# define HAS_POSIX_MEMALIGN 1
  89#else
  90# define HAS_POSIX_MEMALIGN 0
  91#endif
  92
  93/*
  94 * jemalloc provides posix_memalign.
  95 */
  96#ifdef MOZ_MEMORY
  97extern "C" {
  98#include "../../memory/jemalloc/jemalloc.h"
  99}
 100#endif
 101
 102/*
 103 * Include the headers for mmap unless we have posix_memalign and do not
 104 * insist on mmap.
 105 */
 106#if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
 107# if defined(XP_WIN)
 108#  ifndef JS_GC_USE_MMAP
 109#   define JS_GC_USE_MMAP 1
 110#  endif
 111#  include <windows.h>
 112# else
 113#  if defined(XP_UNIX) || defined(XP_BEOS)
 114#   include <unistd.h>
 115#  endif
 116#  if _POSIX_MAPPED_FILES > 0
 117#   ifndef JS_GC_USE_MMAP
 118#    define JS_GC_USE_MMAP 1
 119#   endif
 120#   include <sys/mman.h>
 121
 122/* On Mac OS X MAP_ANONYMOUS is not defined. */
 123#   if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
 124#    define MAP_ANONYMOUS MAP_ANON
 125#   endif
 126#  else
 127#   if JS_GC_USE_MMAP
 128#    error "JS_GC_USE_MMAP is set when mmap is not available"
 129#   endif
 130#  endif
 131# endif
 132#endif
 133
 134/*
 135 * A GC arena contains a fixed number of flag bits for each thing in its heap,
 136 * and supports O(1) lookup of a flag given its thing's address.
 137 *
 138 * To implement this, we allocate things of the same size from a GC arena
 139 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
 140 * following picture shows arena's layout:
 141 *
 142 *  +------------------------------+--------------------+---------------+
 143 *  | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
 144 *  +------------------------------+--------------------+---------------+
 145 *
 146 * To find the flag bits for the thing we calculate the thing index counting
 147 * from arena's start using:
 148 *
 149 *   thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
 150 *
 151 * The details of flag's lookup depend on thing's kind. For all GC things
 152 * except doubles we use one byte of flags where the 4 bits determine thing's
 153 * type and the rest is used to implement GC marking, finalization and
 154 * locking. We calculate the address of flag's byte using:
 155 *
 156 *   flagByteAddress =
 157 *       (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
 158 *
 159 * where
 160 *
 161 *   (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
 162 *
 163 * is the last byte of flags' area.
 164 *
 165 * This implies that the things are allocated from the start of their area and
 166 * flags are allocated from the end. This arrangement avoids a relatively
 167 * expensive calculation of the location of the boundary separating things and
 168 * flags. The boundary's offset from the start of the arena is given by:
 169 *
 170 *   thingsPerArena * thingSize
 171 *
 172 * where thingsPerArena is the number of things that the arena can hold:
 173 *
 174 *   (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
 175 *
 176 * To allocate doubles we use a specialized arena. It can contain only numbers
 177 * so we do not need the type bits. Moreover, since the doubles do not require
 178 * a finalizer and very few of them are locked via js_LockGCThing API, we use
 179 * just one bit of flags per double to denote if it was marked during the
 180 * marking phase of the GC. The locking is implemented via a hash table. Thus
 181 * for doubles the flag area becomes a bitmap.
 182 *
 183 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
 184 * When it is true, a platform-dependent function like mmap is used to get
 185 * memory aligned on CPU page boundaries. If the macro is false or undefined,
 186 * posix_memalign is used when available. Otherwise the code uses malloc to
 187 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
 188 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
 189 * see NewGCChunk/DestroyGCChunk below.
 190 *
 191 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
 192 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
 193 * not be a compile-time constant as the system page size is not known until
 194 * runtime.
 195 */
 196#if JS_GC_USE_MMAP
 197static uint32 js_gcArenasPerChunk = 0;
 198static JSBool js_gcUseMmap = JS_FALSE;
 199#elif HAS_POSIX_MEMALIGN
 200# define js_gcArenasPerChunk 1
 201#else
 202# define js_gcArenasPerChunk 7
 203#endif
 204
 205#if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
 206# define CHUNKED_ARENA_ALLOCATION 0
 207#else
 208# define CHUNKED_ARENA_ALLOCATION 1
 209#endif
 210
 211#define GC_ARENA_SHIFT              12
 212#define GC_ARENA_MASK               ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
 213#define GC_ARENA_SIZE               JS_BIT(GC_ARENA_SHIFT)
 214
 215/*
 216 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
 217 * It is used to improve allocation efficiency when using posix_memalign. If
 218 * malloc's implementation uses internal headers, then calling
 219 *
 220 *   posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
 221 *
 222 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
 223 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
 224 * calls
 225 *
 226 *     posix_memalign(&p, GC_ARENA_SIZE,
 227 *                    GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
 228 *
 229 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
 230 * system header, the system can pack all allocations together without holes.
 231 *
 232 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
 233 * comes from jemalloc that does not use any headers/trailers.
 234 */
 235#ifndef JS_GC_ARENA_PAD
 236# if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
 237#  define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
 238# else
 239#  define JS_GC_ARENA_PAD 0
 240# endif
 241#endif
 242
 243struct JSGCArenaInfo {
 244    /*
 245     * Allocation list for the arena or NULL if the arena holds double values.
 246     */
 247    JSGCArenaList   *list;
 248
 249    /*
 250     * Pointer to the previous arena in a linked list. The arena can either
 251     * belong to one of JSContext.gcArenaList lists or, when it does not have
 252     * any allocated GC things, to the list of free arenas in the chunk with
 253     * head stored in JSGCChunkInfo.lastFreeArena.
 254     */
 255    JSGCArenaInfo   *prev;
 256
 257#if !CHUNKED_ARENA_ALLOCATION
 258    jsuword         prevUntracedPage;
 259#else
 260    /*
 261     * A link field for the list of arenas with marked but not yet traced
 262     * things. The field is encoded as arena's page to share the space with
 263     * firstArena and arenaIndex fields.
 264     */
 265    jsuword         prevUntracedPage :  JS_BITS_PER_WORD - GC_ARENA_SHIFT;
 266
 267    /*
 268     * When firstArena is false, the index of arena in the chunk. When
 269     * firstArena is true, the index of a free arena holding JSGCChunkInfo or
 270     * NO_FREE_ARENAS if there are no free arenas in the chunk.
 271     *
 272     * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
 273     * access either of indexes.
 274     */
 275    jsuword         arenaIndex :        GC_ARENA_SHIFT - 1;
 276
 277    /* Flag indicating if the arena is the first in the chunk. */
 278    jsuword         firstArena :        1;
 279#endif
 280
 281    union {
 282        jsuword     untracedThings;     /* bitset for fast search of marked
 283                                           but not yet traced things */
 284        JSBool      hasMarkedDoubles;   /* the arena has marked doubles */
 285    } u;
 286
 287#if JS_GC_ARENA_PAD != 0
 288    uint8           pad[JS_GC_ARENA_PAD];
 289#endif
 290};
 291
 292/*
 293 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
 294 * as possible. The code does not rely on this check so if on a particular
 295 * platform this does not compile, then, as a workaround, comment the assert
 296 * out and submit a bug report.
 297 */
 298JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
 299
 300/*
 301 * Macros to convert between JSGCArenaInfo, the start address of the arena and
 302 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
 303 */
 304#define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
 305
 306#define IS_ARENA_INFO_ADDRESS(arena)                                          \
 307    (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
 308
 309#define ARENA_START_TO_INFO(arenaStart)                                       \
 310    (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0),                \
 311     (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
 312
 313#define ARENA_INFO_TO_START(arena)                                            \
 314    (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)),                                 \
 315     (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
 316
 317#define ARENA_PAGE_TO_INFO(arenaPage)                                         \
 318    (JS_ASSERT(arenaPage != 0),                                               \
 319     JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
 320     ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
 321
 322#define ARENA_INFO_TO_PAGE(arena)                                             \
 323    (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)),                                 \
 324     ((jsuword) (arena) >> GC_ARENA_SHIFT))
 325
 326#define GET_ARENA_INFO(chunk, index)                                          \
 327    (JS_ASSERT((index) < js_gcArenasPerChunk),                                \
 328     ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
 329
 330#if CHUNKED_ARENA_ALLOCATION
 331/*
 332 * Definitions for allocating arenas in chunks.
 333 *
 334 * All chunks that have at least one free arena are put on the doubly-linked
 335 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
 336 * the head of the chunk's free arena list together with the link fields for
 337 * gcChunkList.
 338 *
 339 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
 340 * the index of this arena. When all arenas in the chunk are used, it is
 341 * removed from the list and the index is set to NO_FREE_ARENAS indicating
 342 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
 343 */
 344
 345struct JSGCChunkInfo {
 346    JSGCChunkInfo   **prevp;
 347    JSGCChunkInfo   *next;
 348    JSGCArenaInfo   *lastFreeArena;
 349    uint32          numFreeArenas;
 350};
 351
 352#define NO_FREE_ARENAS              JS_BITMASK(GC_ARENA_SHIFT - 1)
 353
 354#ifdef js_gcArenasPerChunk
 355JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
 356                 js_gcArenasPerChunk <= NO_FREE_ARENAS);
 357#endif
 358
 359#define GET_ARENA_CHUNK(arena, index)                                         \
 360    (JS_ASSERT(GET_ARENA_INDEX(arena) == index),                              \
 361     ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
 362
 363#define GET_ARENA_INDEX(arena)                                                \
 364    ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
 365
 366#define GET_CHUNK_INFO_INDEX(chunk)                                           \
 367    ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
 368
 369#define SET_CHUNK_INFO_INDEX(chunk, index)                                    \
 370    (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS),   \
 371     (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
 372
 373#define GET_CHUNK_INFO(chunk, infoIndex)                                      \
 374    (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)),                   \
 375     JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk),                   \
 376     (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
 377
 378#define CHUNK_INFO_TO_INDEX(ci)                                               \
 379    GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
 380
 381#endif
 382
 383/*
 384 * Macros for GC-thing operations.
 385 */
 386#define THINGS_PER_ARENA(thingSize)                                           \
 387    ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
 388
 389#define THING_TO_ARENA(thing)                                                 \
 390    ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) +                  \
 391                       1 - sizeof(JSGCArenaInfo)))
 392
 393#define THING_TO_INDEX(thing, thingSize)                                      \
 394    ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
 395
 396#define THING_FLAGS_END(arena) ((uint8 *)(arena))
 397
 398#define THING_FLAGP(arena, thingIndex)                                        \
 399    (JS_ASSERT((jsuword) (thingIndex)                                         \
 400               < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)),       \
 401     (uint8 *)(arena) - 1 - (thingIndex))
 402
 403#define THING_TO_FLAGP(thing, thingSize)                                      \
 404    THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
 405
 406#define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
 407
 408#define FLAGP_TO_INDEX(flagp)                                                 \
 409    (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET),      \
 410     (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
 411
 412#define FLAGP_TO_THING(flagp, thingSize)                                      \
 413    (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >=                         \
 414               (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))),            \
 415     (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) +                     \
 416                   (thingSize) * FLAGP_TO_INDEX(flagp)))
 417
 418/*
 419 * Macros for the specialized arena for doubles.
 420 *
 421 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
 422 * hold. We find it as the following. Let n be the number of doubles in the
 423 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
 424 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
 425 * which the following holds:
 426 *
 427 *   n*s + ceil(n/B) <= M                                               (1)
 428 *
 429 * where "/" denotes normal real division,
 430 *       ceil(r) gives the least integer not smaller than the number r,
 431 *       s is the number of words in jsdouble,
 432 *       B is number of bits per word or B == JS_BITS_PER_WORD
 433 *       M is the number of words in the arena before JSGCArenaInfo or
 434 *       M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
 435 *       M == ARENA_INFO_OFFSET / sizeof(jsuword)
 436 *
 437 * We rewrite the inequality as
 438 *
 439 *   n*B*s/B + ceil(n/B) <= M,
 440 *   ceil(n*B*s/B + n/B) <= M,
 441 *   ceil(n*(B*s + 1)/B) <= M                                           (2)
 442 *
 443 * We define a helper function e(n, s, B),
 444 *
 445 *   e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
 446 *
 447 * It gives:
 448 *
 449 *   n*(B*s + 1)/B + e(n, s, B) <= M,
 450 *   n + e*B/(B*s + 1) <= M*B/(B*s + 1)
 451 *
 452 * We apply the floor function to both sides of the last equation, where
 453 * floor(r) gives the biggest integer not greater than r. As a consequence we
 454 * have:
 455 *
 456 *   floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
 457 *   n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
 458 *   n <= floor(M*B/(B*s + 1)),                                         (3)
 459 *
 460 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
 461 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
 462 * must also satisfy (3). That is, we got an upper estimate for the maximum
 463 * value of n. Lets show that this upper estimate,
 464 *
 465 *   floor(M*B/(B*s + 1)),                                              (4)
 466 *
 467 * also satisfies (1) and, as such, gives the required maximum value.
 468 * Substituting it into (2) gives:
 469 *
 470 *   ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
 471 *
 472 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
 473 *
 474 *   ceil(floor(M/X)*X) <= ceil(M) == M.
 475 *
 476 * Thus the value of (4) gives the maximum n satisfying (1).
 477 *
 478 * For the final result we observe that in (4)
 479 *
 480 *    M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
 481 *        == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
 482 *
 483 *  and
 484 *
 485 *    B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
 486 *        == JS_BITS_PER_DOUBLE.
 487 */
 488#define DOUBLES_PER_ARENA                                                     \
 489    ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
 490
 491/*
 492 * Check that  ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
 493 */
 494JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
 495JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
 496JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
 497
 498#define DOUBLES_ARENA_BITMAP_WORDS                                            \
 499    (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
 500
 501/* Check that DOUBLES_PER_ARENA indeed maximises (1). */
 502JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
 503                 DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
 504                 ARENA_INFO_OFFSET);
 505
 506JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
 507                 sizeof(jsuword) *
 508                 JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
 509                 ARENA_INFO_OFFSET);
 510
 511/*
 512 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
 513 * last byte of the occupation bitmap are unused.
 514 */
 515#define UNUSED_DOUBLE_BITMAP_BITS                                             \
 516    (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
 517
 518JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
 519
 520#define DOUBLES_ARENA_BITMAP_OFFSET                                           \
 521    (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
 522
 523#define CHECK_DOUBLE_ARENA_INFO(arenaInfo)                                    \
 524    (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)),                             \
 525     JS_ASSERT(!(arenaInfo)->list))                                           \
 526
 527/*
 528 * Get the start of the bitmap area containing double mark flags in the arena.
 529 * To access the flag the code uses
 530 *
 531 *   JS_TEST_BIT(bitmapStart, index)
 532 *
 533 * That is, compared with the case of arenas with non-double things, we count
 534 * flags from the start of the bitmap area, not from the end.
 535 */
 536#define DOUBLE_ARENA_BITMAP(arenaInfo)                                        \
 537    (CHECK_DOUBLE_ARENA_INFO(arenaInfo),                                      \
 538     (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
 539
 540#define DOUBLE_THING_TO_INDEX(thing)                                          \
 541    (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)),                          \
 542     JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) <                          \
 543               DOUBLES_ARENA_BITMAP_OFFSET),                                  \
 544     ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
 545
 546static void
 547ClearDoubleArenaFlags(JSGCArenaInfo *a)
 548{
 549    jsbitmap *bitmap, mask;
 550    uintN nused;
 551
 552    /*
 553     * When some high bits in the last byte of the double occupation bitmap
 554     * are unused, we must set them. Otherwise RefillDoubleFreeList will
 555     * assume that they corresponds to some free cells and tries to allocate
 556     * them.
 557     *
 558     * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
 559     */
 560    bitmap = DOUBLE_ARENA_BITMAP(a);
 561    memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
 562    mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
 563    nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
 564    bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
 565}
 566
 567static JS_ALWAYS_INLINE JSBool
 568IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
 569{
 570    jsbitmap *bitmap;
 571
 572    JS_ASSERT(a->u.hasMarkedDoubles);
 573    bitmap = DOUBLE_ARENA_BITMAP(a);
 574    return JS_TEST_BIT(bitmap, index);
 575}
 576
 577/*
 578 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
 579 *
 580 *   1. The next byte in the bitmap area for doubles to check for unmarked
 581 *      (or free) doubles.
 582 *   2. Or to the end of the bitmap area when all GC cells of the arena are
 583 *      allocated.
 584 *   3. Or to a special sentinel value indicating that there are no arenas
 585 *      to check for unmarked doubles.
 586 *
 587 * We set the sentinel to ARENA_INFO_OFFSET so the single check
 588 *
 589 *   ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
 590 *
 591 * will cover both the second and the third cases.
 592 */
 593#define DOUBLE_BITMAP_SENTINEL  ((jsbitmap *) ARENA_INFO_OFFSET)
 594
 595#ifdef JS_THREADSAFE
 596/*
 597 * The maximum number of things to put on the local free list by taking
 598 * several things from the global free list or from the tail of the last
 599 * allocated arena to amortize the cost of rt->gcLock.
 600 *
 601 * We use number 8 based on benchmarks from bug 312238.
 602 */
 603#define MAX_THREAD_LOCAL_THINGS 8
 604
 605#endif
 606
 607JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
 608
 609JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
 610JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
 611
 612/* We want to use all the available GC thing space for object's slots. */
 613JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
 614
 615/*
 616 * Ensure that JSObject is allocated from a different GC-list rather than
 617 * jsdouble and JSString so we can easily finalize JSObject before these 2
 618 * types of GC things. See comments in js_GC.
 619 */
 620JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
 621                 GC_FREELIST_INDEX(sizeof(JSObject)));
 622JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
 623                 GC_FREELIST_INDEX(sizeof(JSObject)));
 624
 625/*
 626 * JSPtrTable capacity growth descriptor. The table grows by powers of two
 627 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
 628 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
 629 */
 630typedef struct JSPtrTableInfo {
 631    uint16      minCapacity;
 632    uint16      linearGrowthThreshold;
 633} JSPtrTableInfo;
 634
 635#define GC_ITERATOR_TABLE_MIN     4
 636#define GC_ITERATOR_TABLE_LINEAR  1024
 637
 638static const JSPtrTableInfo iteratorTableInfo = {
 639    GC_ITERATOR_TABLE_MIN,
 640    GC_ITERATOR_TABLE_LINEAR
 641};
 642
 643/* Calculate table capacity based on the current value of JSPtrTable.count. */
 644static size_t
 645PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
 646{
 647    size_t linear, log, capacity;
 648
 649    linear = info->linearGrowthThreshold;
 650    JS_ASSERT(info->minCapacity <= linear);
 651
 652    if (count == 0) {
 653        capacity = 0;
 654    } else if (count < linear) {
 655        log = JS_CEILING_LOG2W(count);
 656        JS_ASSERT(log != JS_BITS_PER_WORD);
 657        capacity = (size_t)1 << log;
 658        if (capacity < info->minCapacity)
 659            capacity = info->minCapacity;
 660    } else {
 661        capacity = JS_ROUNDUP(count, linear);
 662    }
 663
 664    JS_ASSERT(capacity >= count);
 665    return capacity;
 666}
 667
 668static void
 669FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
 670{
 671    if (table->array) {
 672        JS_ASSERT(table->count > 0);
 673        free(table->array);
 674        table->array = NULL;
 675        table->count = 0;
 676    }
 677    JS_ASSERT(table->count == 0);
 678}
 679
 680static JSBool
 681AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
 682              void *ptr)
 683{
 684    size_t count, capacity;
 685    void **array;
 686
 687    count = table->count;
 688    capacity = PtrTableCapacity(count, info);
 689
 690    if (count == capacity) {
 691        if (capacity < info->minCapacity) {
 692            JS_ASSERT(capacity == 0);
 693            JS_ASSERT(!table->array);
 694            capacity = info->minCapacity;
 695        } else {
 696            /*
 697             * Simplify the overflow detection assuming pointer is bigger
 698             * than byte.
 699             */
 700            JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
 701            capacity = (capacity < info->linearGrowthThreshold)
 702                       ? 2 * capacity
 703                       : capacity + info->linearGrowthThreshold;
 704            if (capacity > (size_t)-1 / sizeof table->array[0])
 705                goto bad;
 706        }
 707        array = (void **) realloc(table->array,
 708                                  capacity * sizeof table->array[0]);
 709        if (!array)
 710            goto bad;
 711#ifdef DEBUG
 712        memset(array + count, JS_FREE_PATTERN,
 713               (capacity - count) * sizeof table->array[0]);
 714#endif
 715        table->array = array;
 716    }
 717
 718    table->array[count] = ptr;
 719    table->count = count + 1;
 720
 721    return JS_TRUE;
 722
 723  bad:
 724    JS_ReportOutOfMemory(cx);
 725    return JS_FALSE;
 726}
 727
 728static void
 729ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
 730               size_t newCount)
 731{
 732    size_t oldCapacity, capacity;
 733    void **array;
 734
 735    JS_ASSERT(newCount <= table->count);
 736    if (newCount == table->count)
 737        return;
 738
 739    oldCapacity = PtrTableCapacity(table->count, info);
 740    table->count = newCount;
 741    capacity = PtrTableCapacity(newCount, info);
 742
 743    if (oldCapacity != capacity) {
 744        array = table->array;
 745        JS_ASSERT(array);
 746        if (capacity == 0) {
 747            free(array);
 748            table->array = NULL;
 749            return;
 750        }
 751        array = (void **) realloc(array, capacity * sizeof array[0]);
 752        if (array)
 753            table->array = array;
 754    }
 755#ifdef DEBUG
 756    memset(table->array + newCount, JS_FREE_PATTERN,
 757           (capacity - newCount) * sizeof table->array[0]);
 758#endif
 759}
 760
 761#ifdef JS_GCMETER
 762# define METER(x)               ((void) (x))
 763# define METER_IF(condition, x) ((void) ((condition) && (x)))
 764#else
 765# define METER(x)               ((void) 0)
 766# define METER_IF(condition, x) ((void) 0)
 767#endif
 768
 769#define METER_UPDATE_MAX(maxLval, rval)                                       \
 770    METER_IF((maxLval) < (rval), (maxLval) = (rval))
 771
 772#if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
 773
 774/*
 775 * For chunks allocated via over-sized malloc, get a pointer to store the gap
 776 * between the malloc's result and the first arena in the chunk.
 777 */
 778static uint32 *
 779GetMallocedChunkGapPtr(jsuword chunk)
 780{
 781    JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
 782
 783    /* Use the memory after the chunk, see NewGCChunk for details. */
 784    return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
 785}
 786
 787#endif
 788
 789static jsuword
 790NewGCChunk(void)
 791{
 792    void *p;
 793
 794#if JS_GC_USE_MMAP
 795    if (js_gcUseMmap) {
 796# if defined(XP_WIN)
 797        p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
 798                         MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
 799        return (jsuword) p;
 800# else
 801        p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
 802                 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 803        return (p == MAP_FAILED) ? 0 : (jsuword) p;
 804# endif
 805    }
 806#endif
 807
 808#if HAS_POSIX_MEMALIGN
 809    if (0 != posix_memalign(&p, GC_ARENA_SIZE,
 810                            GC_ARENA_SIZE * js_gcArenasPerChunk -
 811                            JS_GC_ARENA_PAD)) {
 812        return 0;
 813    }
 814    return (jsuword) p;
 815#else
 816    /*
 817     * Implement chunk allocation using oversized malloc if mmap and
 818     * posix_memalign are not available.
 819     *
 820     * Since malloc allocates pointers aligned on the word boundary, to get
 821     * js_gcArenasPerChunk aligned arenas, we need to malloc only
 822     *
 823     *   ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
 824     *
 825     * bytes. But since we stores the gap between the malloced pointer and the
 826     * first arena in the chunk after the chunk, we need to ask for
 827     *
 828     *   ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
 829     *
 830     * bytes to ensure that we always have room to store the gap.
 831     */
 832    p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
 833    if (!p)
 834        return 0;
 835
 836    {
 837        jsuword chunk;
 838
 839        chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
 840        *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
 841        return chunk;
 842    }
 843#endif
 844}
 845
 846static void
 847DestroyGCChunk(jsuword chunk)
 848{
 849    JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
 850#if JS_GC_USE_MMAP
 851    if (js_gcUseMmap) {
 852# if defined(XP_WIN)
 853        VirtualFree((void *) chunk, 0, MEM_RELEASE);
 854# elif defined(SOLARIS)
 855        munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
 856# else
 857        munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
 858# endif
 859        return;
 860    }
 861#endif
 862
 863#if HAS_POSIX_MEMALIGN
 864    free((void *) chunk);
 865#else
 866    /* See comments in NewGCChunk. */
 867    JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
 868    free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
 869#endif
 870}
 871
 872#if CHUNKED_ARENA_ALLOCATION
 873
 874static void
 875AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
 876{
 877    ci->prevp = &rt->gcChunkList;
 878    ci->next = rt->gcChunkList;
 879    if (rt->gcChunkList) {
 880        JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
 881        rt->gcChunkList->prevp = &ci->next;
 882    }
 883    rt->gcChunkList = ci;
 884}
 885
 886static void
 887RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
 888{
 889    *ci->prevp = ci->next;
 890    if (ci->next) {
 891        JS_ASSERT(ci->next->prevp == &ci->next);
 892        ci->next->prevp = ci->prevp;
 893    }
 894}
 895
 896#endif
 897
 898static JSGCArenaInfo *
 899NewGCArena(JSRuntime *rt)
 900{
 901    jsuword chunk;
 902    JSGCArenaInfo *a;
 903
 904    if (rt->gcBytes >= rt->gcMaxBytes)
 905        return NULL;
 906
 907#if CHUNKED_ARENA_ALLOCATION
 908    if (js_gcArenasPerChunk == 1) {
 909#endif
 910        chunk = NewGCChunk();
 911        if (chunk == 0)
 912            return NULL;
 913        a = ARENA_START_TO_INFO(chunk);
 914#if CHUNKED_ARENA_ALLOCATION
 915    } else {
 916        JSGCChunkInfo *ci;
 917        uint32 i;
 918        JSGCArenaInfo *aprev;
 919
 920        ci = rt->gcChunkList;
 921        if (!ci) {
 922            chunk = NewGCChunk();
 923            if (chunk == 0)
 924                return NULL;
 925            JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
 926            a = GET_ARENA_INFO(chunk, 0);
 927            a->firstArena = JS_TRUE;
 928            a->arenaIndex = 0;
 929            aprev = NULL;
 930            i = 0;
 931            do {
 932                a->prev = aprev;
 933                aprev = a;
 934                ++i;
 935                a = GET_ARENA_INFO(chunk, i);
 936                a->firstArena = JS_FALSE;
 937                a->arenaIndex = i;
 938            } while (i != js_gcArenasPerChunk - 1);
 939            ci = GET_CHUNK_INFO(chunk, 0);
 940            ci->lastFreeArena = aprev;
 941            ci->numFreeArenas = js_gcArenasPerChunk - 1;
 942            AddChunkToList(rt, ci);
 943        } else {
 944            JS_ASSERT(ci->prevp == &rt->gcChunkList);
 945            a = ci->lastFreeArena;
 946            aprev = a->prev;
 947            if (!aprev) {
 948                JS_ASSERT(ci->numFreeArenas == 1);
 949                JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
 950                RemoveChunkFromList(rt, ci);
 951                chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
 952                SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
 953            } else {
 954                JS_ASSERT(ci->numFreeArenas >= 2);
 955                JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
 956                ci->lastFreeArena = aprev;
 957                ci->numFreeArenas--;
 958            }
 959        }
 960    }
 961#endif
 962
 963    rt->gcBytes += GC_ARENA_SIZE;
 964    a->prevUntracedPage = 0;
 965    memset(&a->u, 0, sizeof(a->u));
 966
 967    return a;
 968}
 969
 970static void
 971DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
 972{
 973    JSGCArenaInfo *a;
 974
 975    while (last) {
 976        a = last;
 977        last = last->prev;
 978
 979        METER(rt->gcStats.afree++);
 980        JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
 981        rt->gcBytes -= GC_ARENA_SIZE;
 982
 983#if CHUNKED_ARENA_ALLOCATION
 984        if (js_gcArenasPerChunk == 1) {
 985#endif
 986            DestroyGCChunk(ARENA_INFO_TO_START(a));
 987#if CHUNKED_ARENA_ALLOCATION
 988        } else {
 989            uint32 arenaIndex;
 990            jsuword chunk;
 991            uint32 chunkInfoIndex;
 992            JSGCChunkInfo *ci;
 993# ifdef DEBUG
 994            jsuword firstArena;
 995
 996            firstArena = a->firstArena;
 997            arenaIndex = a->arenaIndex;
 998            memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
 999                   GC_ARENA_SIZE - JS_GC_ARENA_PAD);
1000            a->firstArena = firstArena;
1001            a->arenaIndex = arenaIndex;
1002# endif
1003            arenaIndex = GET_ARENA_INDEX(a);
1004            chunk = GET_ARENA_CHUNK(a, arenaIndex);
1005            chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
1006            if (chunkInfoIndex == NO_FREE_ARENAS) {
1007                chunkInfoIndex = arenaIndex;
1008                SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
1009                ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1010                a->prev = NULL;
1011                ci->lastFreeArena = a;
1012                ci->numFreeArenas = 1;
1013                AddChunkToList(rt, ci);
1014            } else {
1015                JS_ASSERT(chunkInfoIndex != arenaIndex);
1016                ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1017                JS_ASSERT(ci->numFreeArenas != 0);
1018                JS_ASSERT(ci->lastFreeArena);
1019                JS_ASSERT(a != ci->lastFreeArena);
1020                if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
1021                    RemoveChunkFromList(rt, ci);
1022                    DestroyGCChunk(chunk);
1023                } else {
1024                    ++ci->numFreeArenas;
1025                    a->prev = ci->lastFreeArena;
1026                    ci->lastFreeArena = a;
1027                }
1028            }
1029        }
1030# endif
1031    }
1032}
1033
1034static void
1035InitGCArenaLists(JSRuntime *rt)
1036{
1037    uintN i, thingSize;
1038    JSGCArenaList *arenaList;
1039
1040    for (i = 0; i < GC_NUM_FREELISTS; i++) {
1041        arenaList = &rt->gcArenaList[i];
1042        thingSize = GC_FREELIST_NBYTES(i);
1043        JS_ASSERT((size_t)(uint16)thingSize == thingSize);
1044        arenaList->last = NULL;
1045        arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
1046        arenaList->thingSize = (uint16) thingSize;
1047        arenaList->freeList = NULL;
1048    }
1049    rt->gcDoubleArenaList.first = NULL;
1050    rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1051}
1052
1053static void
1054FinishGCArenaLists(JSRuntime *rt)
1055{
1056    uintN i;
1057    JSGCArenaList *arenaList;
1058
1059    for (i = 0; i < GC_NUM_FREELISTS; i++) {
1060        arenaList = &rt->gcArenaList[i];
1061        DestroyGCArenas(rt, arenaList->last);
1062        arenaList->last = NULL;
1063        arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
1064        arenaList->freeList = NULL;
1065    }
1066    DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
1067    rt->gcDoubleArenaList.first = NULL;
1068    rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1069
1070    rt->gcBytes = 0;
1071    JS_ASSERT(rt->gcChunkList == 0);
1072}
1073
1074/*
1075 * This function must not be called when thing is jsdouble.
1076 */
1077static uint8 *
1078GetGCThingFlags(void *thing)
1079{
1080    JSGCArenaInfo *a;
1081    uint32 index;
1082
1083    a = THING_TO_ARENA(thing);
1084    index = THING_TO_INDEX(thing, a->list->thingSize);
1085    return THING_FLAGP(a, index);
1086}
1087
1088/*
1089 * This function returns null when thing is jsdouble.
1090 */
1091static uint8 *
1092GetGCThingFlagsOrNull(void *thing)
1093{
1094    JSGCArenaInfo *a;
1095    uint32 index;
1096
1097    a = THING_TO_ARENA(thing);
1098    if (!a->list)
1099        return NULL;
1100    index = THING_TO_INDEX(thing, a->list->thingSize);
1101    return THING_FLAGP(a, index);
1102}
1103
1104intN
1105js_GetExternalStringGCType(JSString *str)
1106{
1107    uintN type;
1108
1109    type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
1110    JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
1111    return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
1112}
1113
1114static uint32
1115MapGCFlagsToTraceKind(uintN flags)
1116{
1117    uint32 type;
1118
1119    type = flags & GCF_TYPEMASK;
1120    JS_ASSERT(type != GCX_DOUBLE);
1121    JS_ASSERT(type < GCX_NTYPES);
1122    return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
1123}
1124
1125JS_FRIEND_API(uint32)
1126js_GetGCThingTraceKind(void *thing)
1127{
1128    JSGCArenaInfo *a;
1129    uint32 index;
1130
1131    a = THING_TO_ARENA(thing);
1132    if (!a->list)
1133        return JSTRACE_DOUBLE;
1134
1135    index = THING_TO_INDEX(thing, a->list->thingSize);
1136    return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
1137}
1138
1139JSRuntime*
1140js_GetGCStringRuntime(JSString *str)
1141{
1142    JSGCArenaList *list;
1143
1144    list = THING_TO_ARENA(str)->list;
1145
1146    JS_ASSERT(list->thingSize == sizeof(JSGCThing));
1147    JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
1148
1149    return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
1150}
1151
1152JSBool
1153js_IsAboutToBeFinalized(JSContext *cx, void *thing)
1154{
1155    JSGCArenaInfo *a;
1156    uint32 index, flags;
1157
1158    a = THING_TO_ARENA(thing);
1159    if (!a->list) {
1160        /*
1161         * Check if arena has no marked doubles. In that case the bitmap with
1162         * the mark flags contains all garbage as it is initialized only when
1163         * marking the first double in the arena.
1164         */
1165        if (!a->u.hasMarkedDoubles)
1166            return JS_TRUE;
1167        index = DOUBLE_THING_TO_INDEX(thing);
1168        return !IsMarkedDouble(a, index);
1169    }
1170    index = THING_TO_INDEX(thing, a->list->thingSize);
1171    flags = *THING_FLAGP(a, index);
1172    return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
1173}
1174
1175/* This is compatible with JSDHashEntryStub. */
1176typedef struct JSGCRootHashEntry {
1177    JSDHashEntryHdr hdr;
1178    void            *root;
1179    const char      *name;
1180} JSGCRootHashEntry;
1181
1182/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1183#define GC_ROOTS_SIZE   256
1184
1185#if CHUNKED_ARENA_ALLOCATION
1186
1187/*
1188 * For a CPU with extremely large pages using them for GC things wastes
1189 * too much memory.
1190 */
1191# define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1192
1193JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
1194
1195#endif
1196
1197JSBool
1198js_InitGC(JSRuntime *rt, uint32 maxbytes)
1199{
1200#if JS_GC_USE_MMAP
1201    if (js_gcArenasPerChunk == 0) {
1202        size_t cpuPageSize, arenasPerPage;
1203# if defined(XP_WIN)
1204        SYSTEM_INFO si;
1205
1206        GetSystemInfo(&si);
1207        cpuPageSize = si.dwPageSize;
1208
1209# elif defined(XP_UNIX) || defined(XP_BEOS)
1210        cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
1211# else
1212#  error "Not implemented"
1213# endif
1214        /* cpuPageSize is a power of 2. */
1215        JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
1216        arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
1217#ifdef DEBUG
1218        if (arenasPerPage == 0) {
1219            fprintf(stderr,
1220"JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1221"paged allocation for the garbage collector. Please report this.\n",
1222                    (unsigned) cpuPageSize);
1223        }
1224#endif
1225        if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
1226            /*
1227             * Use at least 4 GC arenas per paged allocation chunk to minimize
1228             * the overhead of mmap/VirtualAlloc.
1229             */
1230            js_gcUseMmap = JS_TRUE;
1231            js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
1232        } else {
1233            js_gcUseMmap = JS_FALSE;
1234            js_gcArenasPerChunk = 7;
1235        }
1236    }
1237    JS_ASSERT(1 <= js_gcArenasPerChunk &&
1238              js_gcArenasPerChunk <= NO_FREE_ARENAS);
1239#endif
1240
1241    InitGCArenaLists(rt);
1242    if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
1243                           sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
1244        rt->gcRootsHash.ops = NULL;
1245        return JS_FALSE;
1246    }
1247    rt->gcLocksHash = NULL;     /* create lazily */
1248
1249    /*
1250     * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1251     * for default backward API compatibility.
1252     */
1253    rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
1254    rt->gcEmptyArenaPoolLifespan = 30000;
1255
1256    METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
1257    return JS_TRUE;
1258}
1259
1260#ifdef JS_GCMETER
1261
1262static void
1263UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
1264                 uint32 nthings)
1265{
1266    size_t narenas;
1267
1268    narenas = nlivearenas + nkilledArenas;
1269    JS_ASSERT(narenas >= st->livearenas);
1270
1271    st->newarenas = narenas - st->livearenas;
1272    st->narenas = narenas;
1273    st->livearenas = nlivearenas;
1274    if (st->maxarenas < narenas)
1275        st->maxarenas = narenas;
1276    st->totalarenas += narenas;
1277
1278    st->nthings = nthings;
1279    if (st->maxthings < nthings)
1280        st->maxthings = nthings;
1281    st->totalthings += nthings;
1282}
1283
1284JS_FRIEND_API(void)
1285js_DumpGCStats(JSRuntime *rt, FILE *fp)
1286{
1287    int i;
1288    size_t sumArenas, sumTotalArenas;
1289    size_t sumThings, sumMaxThings;
1290    size_t sumThingSize, sumTotalThingSize;
1291    size_t sumArenaCapacity, sumTotalArenaCapacity;
1292    JSGCArenaStats *st;
1293    size_t thingSize, thingsPerArena;
1294    size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
1295
1296    fprintf(fp, "\nGC allocation statistics:\n");
1297
1298#define UL(x)       ((unsigned long)(x))
1299#define ULSTAT(x)   UL(rt->gcStats.x)
1300#define PERCENT(x,y)  (100.0 * (double) (x) / (double) (y))
1301
1302    sumArenas = 0;
1303    sumTotalArenas = 0;
1304    sumThings = 0;
1305    sumMaxThings = 0;
1306    sumThingSize = 0;
1307    sumTotalThingSize = 0;
1308    sumArenaCapacity = 0;
1309    sumTotalArenaCapacity = 0;
1310    sumAlloc = 0;
1311    sumLocalAlloc = 0;
1312    sumFail = 0;
1313    sumRetry = 0;
1314    for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
1315        if (i == -1) {
1316            thingSize = sizeof(jsdouble);
1317            thingsPerArena = DOUBLES_PER_ARENA;
1318            st = &rt->gcStats.doubleArenaStats;
1319            fprintf(fp,
1320                    "Arena list for double values (%lu doubles per arena):",
1321                    UL(thingsPerArena));
1322        } else {
1323            thingSize = rt->gcArenaList[i].thingSize;
1324            thingsPerArena = THINGS_PER_ARENA(thingSize);
1325            st = &rt->gcStats.arenaStats[i];
1326            fprintf(fp,
1327                    "Arena list %d (thing size %lu, %lu things per arena):",
1328                    i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
1329        }
1330        if (st->maxarenas == 0) {
1331            fputs(" NEVER USED\n", fp);
1332            continue;
1333        }
1334        putc('\n', fp);
1335        fprintf(fp, "           arenas before GC: %lu\n", UL(st->narenas));
1336        fprintf(fp, "       new arenas before GC: %lu (%.1f%%)\n",
1337                UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
1338        fprintf(fp, "            arenas after GC: %lu (%.1f%%)\n",
1339                UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
1340        fprintf(fp, "                 max arenas: %lu\n", UL(st->maxarenas));
1341        fprintf(fp, "                     things: %lu\n", UL(st->nthings));
1342        fprintf(fp, "        GC cell utilization: %.1f%%\n",
1343                PERCENT(st->nthings, thingsPerArena * st->narenas));
1344        fprintf(fp, "   average cell utilization: %.1f%%\n",
1345                PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
1346        fprintf(fp, "                 max things: %lu\n", UL(st->maxthings));
1347        fprintf(fp, "             alloc attempts: %lu\n", UL(st->alloc));
1348        fprintf(fp, "        alloc without locks: %1u  (%.1f%%)\n",
1349                UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
1350        sumArenas += st->narenas;
1351        sumTotalArenas += st->totalarenas;
1352        sumThings += st->nthings;
1353        sumMaxThings += st->maxthings;
1354        sumThingSize += thingSize * st->nthings;
1355        sumTotalThingSize += thingSize * st->totalthings;
1356        sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
1357        sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
1358        sumAlloc += st->alloc;
1359        sumLocalAlloc += st->localalloc;
1360        sumFail += st->fail;
1361        sumRetry += st->retry;
1362    }
1363    fprintf(fp, "TOTAL STATS:\n");
1364    fprintf(fp, "            bytes allocated: %lu\n", UL(rt->gcBytes));
1365    fprintf(fp, "            total GC arenas: %lu\n", UL(sumArenas));
1366    fprintf(fp, "            total GC things: %lu\n", UL(sumThings));
1367    fprintf(fp, "        max total GC things: %lu\n", UL(sumMaxThings));
1368    fprintf(fp, "        GC cell utilization: %.1f%%\n",
1369            PERCENT(sumThingSize, sumArenaCapacity));
1370    fprintf(fp, "   average cell utilization: %.1f%%\n",
1371            PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
1372    fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
1373    fprintf(fp, "             alloc attempts: %lu\n", UL(sumAlloc));
1374    fprintf(fp, "        alloc without locks: %1u  (%.1f%%)\n",
1375            UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
1376    fprintf(fp, "        allocation failures: %lu\n", UL(sumFail));
1377    fprintf(fp, "         things born locked: %lu\n", ULSTAT(lockborn));
1378    fprintf(fp, "           valid lock calls: %lu\n", ULSTAT(lock));
1379    fprintf(fp, "         valid unlock calls: %lu\n", ULSTAT(unlock));
1380    fprintf(fp, "       mark recursion depth: %lu\n", ULSTAT(depth));
1381    fprintf(fp, "     maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1382    fprintf(fp, "     mark C recursion depth: %lu\n", ULSTAT(cdepth));
1383    fprintf(fp, "   maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1384    fprintf(fp, "      delayed tracing calls: %lu\n", ULSTAT(untraced));
1385#ifdef DEBUG
1386    fprintf(fp, "      max trace later count: %lu\n", ULSTAT(maxuntraced));
1387#endif
1388    fprintf(fp, "   maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1389    fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1390    fprintf(fp, "  thing arenas freed so far: %lu\n", ULSTAT(afree));
1391    fprintf(fp, "     stack segments scanned: %lu\n", ULSTAT(stackseg));
1392    fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1393    fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1394    fprintf(fp, "    max reachable closeable: %lu\n", ULSTAT(maxnclose));
1395    fprintf(fp, "      scheduled close hooks: %lu\n", ULSTAT(closelater));
1396    fprintf(fp, "  max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1397
1398#undef UL
1399#undef ULSTAT
1400#undef PERCENT
1401
1402#ifdef JS_ARENAMETER
1403    JS_DumpArenaStats(fp);
1404#endif
1405}
1406#endif
1407
1408#ifdef DEBUG
1409static void
1410CheckLeakedRoots(JSRuntime *rt);
1411#endif
1412
1413#ifdef JS_THREADSAFE
1414static void
1415TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount);
1416#endif
1417
1418void
1419js_FinishGC(JSRuntime *rt)
1420{
1421#ifdef JS_ARENAMETER
1422    JS_DumpArenaStats(stdout);
1423#endif
1424#ifdef JS_GCMETER
1425    js_DumpGCStats(rt, stdout);
1426#endif
1427
1428    FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1429#ifdef JS_THREADSAFE
1430    TrimGCFreeListsPool(rt, 0);
1431    JS_ASSERT(!rt->gcFreeListsPool);
1432#endif
1433    FinishGCArenaLists(rt);
1434
1435    if (rt->gcRootsHash.ops) {
1436#ifdef DEBUG
1437        CheckLeakedRoots(rt);
1438#endif
1439        JS_DHashTableFinish(&rt->gcRootsHash);
1440        rt->gcRootsHash.ops = NULL;
1441    }
1442    if (rt->gcLocksHash) {
1443        JS_DHashTableDestroy(rt->gcLocksHash);
1444        rt->gcLocksHash = NULL;
1445    }
1446}
1447
1448JSBool
1449js_AddRoot(JSContext *cx, void *rp, const char *name)
1450{
1451    JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1452    if (!ok)
1453        JS_ReportOutOfMemory(cx);
1454    return ok;
1455}
1456
1457JSBool
1458js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1459{
1460    JSBool ok;
1461    JSGCRootHashEntry *rhe;
1462
1463    /*
1464     * Due to the long-standing, but now removed, use of rt->gcLock across the
1465     * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1466     * properly with a racing GC, without calling JS_AddRoot from a request.
1467     * We have to preserve API compatibility here, now that we avoid holding
1468     * rt->gcLock across the mark phase (including the root hashtable mark).
1469     *
1470     * If the GC is running and we're called on another thread, wait for this
1471     * GC activation to finish.  We can safely wait here (in the case where we
1472     * are called within a request on another thread's context) without fear
1473     * of deadlock because the GC doesn't set rt->gcRunning until after it has
1474     * waited for all active requests to end.
1475     */
1476    JS_LOCK_GC(rt);
1477#ifdef JS_THREADSAFE
1478    JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
1479    if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
1480        do {
1481            JS_AWAIT_GC_DONE(rt);
1482        } while (rt->gcLevel > 0);
1483    }
1484#endif
1485    rhe = (JSGCRootHashEntry *)
1486          JS_DHashTableOperate(&rt->gcRootsHas

Large files files are truncated, but you can click here to view the full file