PageRenderTime 50ms CodeModel.GetById 11ms RepoModel.GetById 0ms app.codeStats 1ms

/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jsgc.cpp

http://github.com/onedayitwillmake/RealtimeMultiplayerNodeJs
C++ | 2026 lines | 1351 code | 233 blank | 442 comment | 217 complexity | 14d9b2f3e86efb6165b035660f48be19 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  2. * vim: set ts=8 sw=4 et tw=78:
  3. *
  4. * ***** BEGIN LICENSE BLOCK *****
  5. * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  6. *
  7. * The contents of this file are subject to the Mozilla Public License Version
  8. * 1.1 (the "License"); you may not use this file except in compliance with
  9. * the License. You may obtain a copy of the License at
  10. * http://www.mozilla.org/MPL/
  11. *
  12. * Software distributed under the License is distributed on an "AS IS" basis,
  13. * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  14. * for the specific language governing rights and limitations under the
  15. * License.
  16. *
  17. * The Original Code is Mozilla Communicator client code, released
  18. * March 31, 1998.
  19. *
  20. * The Initial Developer of the Original Code is
  21. * Netscape Communications Corporation.
  22. * Portions created by the Initial Developer are Copyright (C) 1998
  23. * the Initial Developer. All Rights Reserved.
  24. *
  25. * Contributor(s):
  26. *
  27. * Alternatively, the contents of this file may be used under the terms of
  28. * either of the GNU General Public License Version 2 or later (the "GPL"),
  29. * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  30. * in which case the provisions of the GPL or the LGPL are applicable instead
  31. * of those above. If you wish to allow use of your version of this file only
  32. * under the terms of either the GPL or the LGPL, and not to allow others to
  33. * use your version of this file under the terms of the MPL, indicate your
  34. * decision by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL or the LGPL. If you do not delete
  36. * the provisions above, a recipient may use your version of this file under
  37. * the terms of any one of the MPL, the GPL or the LGPL.
  38. *
  39. * ***** END LICENSE BLOCK ***** */
  40. /*
  41. * JS Mark-and-Sweep Garbage Collector.
  42. *
  43. * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
  44. * jsgc.h). It allocates from a special GC arena pool with each arena allocated
  45. * using malloc. It uses an ideally parallel array of flag bytes to hold the
  46. * mark bit, finalizer type index, etc.
  47. *
  48. * XXX swizzle page to freelist for better locality of reference
  49. */
  50. #include "jsstddef.h"
  51. #include <stdlib.h> /* for free */
  52. #include <math.h>
  53. #include <string.h> /* for memset used when DEBUG */
  54. #include "jstypes.h"
  55. #include "jsutil.h" /* Added by JSIFY */
  56. #include "jshash.h" /* Added by JSIFY */
  57. #include "jsbit.h"
  58. #include "jsclist.h"
  59. #include "jsprf.h"
  60. #include "jsapi.h"
  61. #include "jsatom.h"
  62. #include "jscntxt.h"
  63. #include "jsversion.h"
  64. #include "jsdbgapi.h"
  65. #include "jsexn.h"
  66. #include "jsfun.h"
  67. #include "jsgc.h"
  68. #include "jsinterp.h"
  69. #include "jsiter.h"
  70. #include "jslock.h"
  71. #include "jsnum.h"
  72. #include "jsobj.h"
  73. #include "jsparse.h"
  74. #include "jsscope.h"
  75. #include "jsscript.h"
  76. #include "jsstr.h"
  77. #include "jstracer.h"
  78. #if JS_HAS_XML_SUPPORT
  79. #include "jsxml.h"
  80. #endif
  81. /*
  82. * Check if posix_memalign is available.
  83. */
  84. #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
  85. # define HAS_POSIX_MEMALIGN 1
  86. #else
  87. # define HAS_POSIX_MEMALIGN 0
  88. #endif
  89. /*
  90. * jemalloc provides posix_memalign.
  91. */
  92. #ifdef MOZ_MEMORY
  93. extern "C" {
  94. #include "../../memory/jemalloc/jemalloc.h"
  95. }
  96. #endif
  97. /*
  98. * Include the headers for mmap unless we have posix_memalign and do not
  99. * insist on mmap.
  100. */
  101. #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
  102. # if defined(XP_WIN)
  103. # ifndef JS_GC_USE_MMAP
  104. # define JS_GC_USE_MMAP 1
  105. # endif
  106. # include <windows.h>
  107. # else
  108. # if defined(XP_UNIX) || defined(XP_BEOS)
  109. # include <unistd.h>
  110. # endif
  111. # if _POSIX_MAPPED_FILES > 0
  112. # ifndef JS_GC_USE_MMAP
  113. # define JS_GC_USE_MMAP 1
  114. # endif
  115. # include <sys/mman.h>
  116. /* On Mac OS X MAP_ANONYMOUS is not defined. */
  117. # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  118. # define MAP_ANONYMOUS MAP_ANON
  119. # endif
  120. # else
  121. # if JS_GC_USE_MMAP
  122. # error "JS_GC_USE_MMAP is set when mmap is not available"
  123. # endif
  124. # endif
  125. # endif
  126. #endif
  127. /*
  128. * A GC arena contains a fixed number of flag bits for each thing in its heap,
  129. * and supports O(1) lookup of a flag given its thing's address.
  130. *
  131. * To implement this, we allocate things of the same size from a GC arena
  132. * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
  133. * following picture shows arena's layout:
  134. *
  135. * +------------------------------+--------------------+---------------+
  136. * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
  137. * +------------------------------+--------------------+---------------+
  138. *
  139. * To find the flag bits for the thing we calculate the thing index counting
  140. * from arena's start using:
  141. *
  142. * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
  143. *
  144. * The details of flag's lookup depend on thing's kind. For all GC things
  145. * except doubles we use one byte of flags where the 4 bits determine thing's
  146. * type and the rest is used to implement GC marking, finalization and
  147. * locking. We calculate the address of flag's byte using:
  148. *
  149. * flagByteAddress =
  150. * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
  151. *
  152. * where
  153. *
  154. * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
  155. *
  156. * is the last byte of flags' area.
  157. *
  158. * This implies that the things are allocated from the start of their area and
  159. * flags are allocated from the end. This arrangement avoids a relatively
  160. * expensive calculation of the location of the boundary separating things and
  161. * flags. The boundary's offset from the start of the arena is given by:
  162. *
  163. * thingsPerArena * thingSize
  164. *
  165. * where thingsPerArena is the number of things that the arena can hold:
  166. *
  167. * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
  168. *
  169. * To allocate doubles we use a specialized arena. It can contain only numbers
  170. * so we do not need the type bits. Moreover, since the doubles do not require
  171. * a finalizer and very few of them are locked via js_LockGCThing API, we use
  172. * just one bit of flags per double to denote if it was marked during the
  173. * marking phase of the GC. The locking is implemented via a hash table. Thus
  174. * for doubles the flag area becomes a bitmap.
  175. *
  176. * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
  177. * When it is true, a platform-dependent function like mmap is used to get
  178. * memory aligned on CPU page boundaries. If the macro is false or undefined,
  179. * posix_memalign is used when available. Otherwise the code uses malloc to
  180. * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
  181. * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
  182. * see NewGCChunk/DestroyGCChunk below.
  183. *
  184. * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
  185. * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
  186. * not be a compile-time constant as the system page size is not known until
  187. * runtime.
  188. */
  189. #if JS_GC_USE_MMAP
  190. static uint32 js_gcArenasPerChunk = 0;
  191. static JSBool js_gcUseMmap = JS_FALSE;
  192. #elif HAS_POSIX_MEMALIGN
  193. # define js_gcArenasPerChunk 1
  194. #else
  195. # define js_gcArenasPerChunk 7
  196. #endif
  197. #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
  198. # define CHUNKED_ARENA_ALLOCATION 0
  199. #else
  200. # define CHUNKED_ARENA_ALLOCATION 1
  201. #endif
  202. #define GC_ARENA_SHIFT 12
  203. #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
  204. #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
  205. /*
  206. * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
  207. * It is used to improve allocation efficiency when using posix_memalign. If
  208. * malloc's implementation uses internal headers, then calling
  209. *
  210. * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
  211. *
  212. * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
  213. * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
  214. * calls
  215. *
  216. * posix_memalign(&p, GC_ARENA_SIZE,
  217. * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
  218. *
  219. * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
  220. * system header, the system can pack all allocations together without holes.
  221. *
  222. * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
  223. * comes from jemalloc that does not use any headers/trailers.
  224. */
  225. #ifndef JS_GC_ARENA_PAD
  226. # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
  227. # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
  228. # else
  229. # define JS_GC_ARENA_PAD 0
  230. # endif
  231. #endif
  232. struct JSGCArenaInfo {
  233. /*
  234. * Allocation list for the arena or NULL if the arena holds double values.
  235. */
  236. JSGCArenaList *list;
  237. /*
  238. * Pointer to the previous arena in a linked list. The arena can either
  239. * belong to one of JSContext.gcArenaList lists or, when it does not have
  240. * any allocated GC things, to the list of free arenas in the chunk with
  241. * head stored in JSGCChunkInfo.lastFreeArena.
  242. */
  243. JSGCArenaInfo *prev;
  244. #if !CHUNKED_ARENA_ALLOCATION
  245. jsuword prevUntracedPage;
  246. #else
  247. /*
  248. * A link field for the list of arenas with marked but not yet traced
  249. * things. The field is encoded as arena's page to share the space with
  250. * firstArena and arenaIndex fields.
  251. */
  252. jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
  253. /*
  254. * When firstArena is false, the index of arena in the chunk. When
  255. * firstArena is true, the index of a free arena holding JSGCChunkInfo or
  256. * NO_FREE_ARENAS if there are no free arenas in the chunk.
  257. *
  258. * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
  259. * access either of indexes.
  260. */
  261. jsuword arenaIndex : GC_ARENA_SHIFT - 1;
  262. /* Flag indicating if the arena is the first in the chunk. */
  263. jsuword firstArena : 1;
  264. #endif
  265. union {
  266. jsuword untracedThings; /* bitset for fast search of marked
  267. but not yet traced things */
  268. JSBool hasMarkedDoubles; /* the arena has marked doubles */
  269. } u;
  270. #if JS_GC_ARENA_PAD != 0
  271. uint8 pad[JS_GC_ARENA_PAD];
  272. #endif
  273. };
  274. /*
  275. * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
  276. * as possible. The code does not rely on this check so if on a particular
  277. * platform this does not compile, then, as a workaround, comment the assert
  278. * out and submit a bug report.
  279. */
  280. JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
  281. /*
  282. * Macros to convert between JSGCArenaInfo, the start address of the arena and
  283. * arena's page defined as (start address) >> GC_ARENA_SHIFT.
  284. */
  285. #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
  286. #define IS_ARENA_INFO_ADDRESS(arena) \
  287. (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
  288. #define ARENA_START_TO_INFO(arenaStart) \
  289. (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
  290. (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
  291. #define ARENA_INFO_TO_START(arena) \
  292. (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
  293. (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
  294. #define ARENA_PAGE_TO_INFO(arenaPage) \
  295. (JS_ASSERT(arenaPage != 0), \
  296. JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
  297. ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
  298. #define ARENA_INFO_TO_PAGE(arena) \
  299. (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
  300. ((jsuword) (arena) >> GC_ARENA_SHIFT))
  301. #define GET_ARENA_INFO(chunk, index) \
  302. (JS_ASSERT((index) < js_gcArenasPerChunk), \
  303. ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
  304. #if CHUNKED_ARENA_ALLOCATION
  305. /*
  306. * Definitions for allocating arenas in chunks.
  307. *
  308. * All chunks that have at least one free arena are put on the doubly-linked
  309. * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
  310. * the head of the chunk's free arena list together with the link fields for
  311. * gcChunkList.
  312. *
  313. * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
  314. * the index of this arena. When all arenas in the chunk are used, it is
  315. * removed from the list and the index is set to NO_FREE_ARENAS indicating
  316. * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
  317. */
  318. struct JSGCChunkInfo {
  319. JSGCChunkInfo **prevp;
  320. JSGCChunkInfo *next;
  321. JSGCArenaInfo *lastFreeArena;
  322. uint32 numFreeArenas;
  323. };
  324. #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
  325. #ifdef js_gcArenasPerChunk
  326. JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
  327. js_gcArenasPerChunk <= NO_FREE_ARENAS);
  328. #endif
  329. #define GET_ARENA_CHUNK(arena, index) \
  330. (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
  331. ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
  332. #define GET_ARENA_INDEX(arena) \
  333. ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
  334. #define GET_CHUNK_INFO_INDEX(chunk) \
  335. ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
  336. #define SET_CHUNK_INFO_INDEX(chunk, index) \
  337. (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
  338. (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
  339. #define GET_CHUNK_INFO(chunk, infoIndex) \
  340. (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
  341. JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
  342. (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
  343. #define CHUNK_INFO_TO_INDEX(ci) \
  344. GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
  345. #endif
  346. /*
  347. * Macros for GC-thing operations.
  348. */
  349. #define THINGS_PER_ARENA(thingSize) \
  350. ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
  351. #define THING_TO_ARENA(thing) \
  352. ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
  353. 1 - sizeof(JSGCArenaInfo)))
  354. #define THING_TO_INDEX(thing, thingSize) \
  355. ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
  356. #define THING_FLAGS_END(arena) ((uint8 *)(arena))
  357. #define THING_FLAGP(arena, thingIndex) \
  358. (JS_ASSERT((jsuword) (thingIndex) \
  359. < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
  360. (uint8 *)(arena) - 1 - (thingIndex))
  361. #define THING_TO_FLAGP(thing, thingSize) \
  362. THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
  363. #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
  364. #define FLAGP_TO_INDEX(flagp) \
  365. (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
  366. (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
  367. #define FLAGP_TO_THING(flagp, thingSize) \
  368. (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
  369. (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
  370. (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
  371. (thingSize) * FLAGP_TO_INDEX(flagp)))
  372. /*
  373. * Macros for the specialized arena for doubles.
  374. *
  375. * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
  376. * hold. We find it as the following. Let n be the number of doubles in the
  377. * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
  378. * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
  379. * which the following holds:
  380. *
  381. * n*s + ceil(n/B) <= M (1)
  382. *
  383. * where "/" denotes normal real division,
  384. * ceil(r) gives the least integer not smaller than the number r,
  385. * s is the number of words in jsdouble,
  386. * B is number of bits per word or B == JS_BITS_PER_WORD
  387. * M is the number of words in the arena before JSGCArenaInfo or
  388. * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
  389. * M == ARENA_INFO_OFFSET / sizeof(jsuword)
  390. *
  391. * We rewrite the inequality as
  392. *
  393. * n*B*s/B + ceil(n/B) <= M,
  394. * ceil(n*B*s/B + n/B) <= M,
  395. * ceil(n*(B*s + 1)/B) <= M (2)
  396. *
  397. * We define a helper function e(n, s, B),
  398. *
  399. * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
  400. *
  401. * It gives:
  402. *
  403. * n*(B*s + 1)/B + e(n, s, B) <= M,
  404. * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
  405. *
  406. * We apply the floor function to both sides of the last equation, where
  407. * floor(r) gives the biggest integer not greater than r. As a consequence we
  408. * have:
  409. *
  410. * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
  411. * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
  412. * n <= floor(M*B/(B*s + 1)), (3)
  413. *
  414. * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
  415. * Thus any n that satisfies the original constraint (1) or its equivalent (2),
  416. * must also satisfy (3). That is, we got an upper estimate for the maximum
  417. * value of n. Lets show that this upper estimate,
  418. *
  419. * floor(M*B/(B*s + 1)), (4)
  420. *
  421. * also satisfies (1) and, as such, gives the required maximum value.
  422. * Substituting it into (2) gives:
  423. *
  424. * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
  425. *
  426. * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
  427. *
  428. * ceil(floor(M/X)*X) <= ceil(M) == M.
  429. *
  430. * Thus the value of (4) gives the maximum n satisfying (1).
  431. *
  432. * For the final result we observe that in (4)
  433. *
  434. * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
  435. * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
  436. *
  437. * and
  438. *
  439. * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
  440. * == JS_BITS_PER_DOUBLE.
  441. */
  442. #define DOUBLES_PER_ARENA \
  443. ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
  444. /*
  445. * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
  446. */
  447. JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
  448. JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
  449. JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
  450. #define DOUBLES_ARENA_BITMAP_WORDS \
  451. (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
  452. /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
  453. JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
  454. DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
  455. ARENA_INFO_OFFSET);
  456. JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
  457. sizeof(jsuword) *
  458. JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
  459. ARENA_INFO_OFFSET);
  460. /*
  461. * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
  462. * last byte of the occupation bitmap are unused.
  463. */
  464. #define UNUSED_DOUBLE_BITMAP_BITS \
  465. (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
  466. JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
  467. #define DOUBLES_ARENA_BITMAP_OFFSET \
  468. (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
  469. #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
  470. (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
  471. JS_ASSERT(!(arenaInfo)->list)) \
  472. /*
  473. * Get the start of the bitmap area containing double mark flags in the arena.
  474. * To access the flag the code uses
  475. *
  476. * JS_TEST_BIT(bitmapStart, index)
  477. *
  478. * That is, compared with the case of arenas with non-double things, we count
  479. * flags from the start of the bitmap area, not from the end.
  480. */
  481. #define DOUBLE_ARENA_BITMAP(arenaInfo) \
  482. (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
  483. (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
  484. #define DOUBLE_THING_TO_INDEX(thing) \
  485. (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
  486. JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
  487. DOUBLES_ARENA_BITMAP_OFFSET), \
  488. ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
  489. static void
  490. ClearDoubleArenaFlags(JSGCArenaInfo *a)
  491. {
  492. jsbitmap *bitmap, mask;
  493. uintN nused;
  494. /*
  495. * When some high bits in the last byte of the double occupation bitmap
  496. * are unused, we must set them. Otherwise RefillDoubleFreeList will
  497. * assume that they corresponds to some free cells and tries to allocate
  498. * them.
  499. *
  500. * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
  501. */
  502. bitmap = DOUBLE_ARENA_BITMAP(a);
  503. memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
  504. mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
  505. nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
  506. bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
  507. }
  508. static JS_ALWAYS_INLINE JSBool
  509. IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
  510. {
  511. jsbitmap *bitmap;
  512. JS_ASSERT(a->u.hasMarkedDoubles);
  513. bitmap = DOUBLE_ARENA_BITMAP(a);
  514. return JS_TEST_BIT(bitmap, index);
  515. }
  516. /*
  517. * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
  518. *
  519. * 1. The next byte in the bitmap area for doubles to check for unmarked
  520. * (or free) doubles.
  521. * 2. Or to the end of the bitmap area when all GC cells of the arena are
  522. * allocated.
  523. * 3. Or to a special sentinel value indicating that there are no arenas
  524. * to check for unmarked doubles.
  525. *
  526. * We set the sentinel to ARENA_INFO_OFFSET so the single check
  527. *
  528. * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
  529. *
  530. * will cover both the second and the third cases.
  531. */
  532. #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
  533. #ifdef JS_THREADSAFE
  534. /*
  535. * The maximum number of things to put on the local free list by taking
  536. * several things from the global free list or from the tail of the last
  537. * allocated arena to amortize the cost of rt->gcLock.
  538. *
  539. * We use number 8 based on benchmarks from bug 312238.
  540. */
  541. #define MAX_THREAD_LOCAL_THINGS 8
  542. #endif
  543. JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
  544. JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
  545. JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
  546. /* We want to use all the available GC thing space for object's slots. */
  547. JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
  548. /*
  549. * Ensure that JSObject is allocated from a different GC-list rather than
  550. * jsdouble and JSString so we can easily finalize JSObject before these 2
  551. * types of GC things. See comments in js_GC.
  552. */
  553. JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
  554. GC_FREELIST_INDEX(sizeof(JSObject)));
  555. JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
  556. GC_FREELIST_INDEX(sizeof(JSObject)));
  557. /*
  558. * JSPtrTable capacity growth descriptor. The table grows by powers of two
  559. * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
  560. * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
  561. */
  562. typedef struct JSPtrTableInfo {
  563. uint16 minCapacity;
  564. uint16 linearGrowthThreshold;
  565. } JSPtrTableInfo;
  566. #define GC_ITERATOR_TABLE_MIN 4
  567. #define GC_ITERATOR_TABLE_LINEAR 1024
  568. static const JSPtrTableInfo iteratorTableInfo = {
  569. GC_ITERATOR_TABLE_MIN,
  570. GC_ITERATOR_TABLE_LINEAR
  571. };
  572. /* Calculate table capacity based on the current value of JSPtrTable.count. */
  573. static size_t
  574. PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
  575. {
  576. size_t linear, log, capacity;
  577. linear = info->linearGrowthThreshold;
  578. JS_ASSERT(info->minCapacity <= linear);
  579. if (count == 0) {
  580. capacity = 0;
  581. } else if (count < linear) {
  582. log = JS_CEILING_LOG2W(count);
  583. JS_ASSERT(log != JS_BITS_PER_WORD);
  584. capacity = (size_t)1 << log;
  585. if (capacity < info->minCapacity)
  586. capacity = info->minCapacity;
  587. } else {
  588. capacity = JS_ROUNDUP(count, linear);
  589. }
  590. JS_ASSERT(capacity >= count);
  591. return capacity;
  592. }
  593. static void
  594. FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
  595. {
  596. if (table->array) {
  597. JS_ASSERT(table->count > 0);
  598. free(table->array);
  599. table->array = NULL;
  600. table->count = 0;
  601. }
  602. JS_ASSERT(table->count == 0);
  603. }
  604. static JSBool
  605. AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
  606. void *ptr)
  607. {
  608. size_t count, capacity;
  609. void **array;
  610. count = table->count;
  611. capacity = PtrTableCapacity(count, info);
  612. if (count == capacity) {
  613. if (capacity < info->minCapacity) {
  614. JS_ASSERT(capacity == 0);
  615. JS_ASSERT(!table->array);
  616. capacity = info->minCapacity;
  617. } else {
  618. /*
  619. * Simplify the overflow detection assuming pointer is bigger
  620. * than byte.
  621. */
  622. JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
  623. capacity = (capacity < info->linearGrowthThreshold)
  624. ? 2 * capacity
  625. : capacity + info->linearGrowthThreshold;
  626. if (capacity > (size_t)-1 / sizeof table->array[0])
  627. goto bad;
  628. }
  629. array = (void **) realloc(table->array,
  630. capacity * sizeof table->array[0]);
  631. if (!array)
  632. goto bad;
  633. #ifdef DEBUG
  634. memset(array + count, JS_FREE_PATTERN,
  635. (capacity - count) * sizeof table->array[0]);
  636. #endif
  637. table->array = array;
  638. }
  639. table->array[count] = ptr;
  640. table->count = count + 1;
  641. return JS_TRUE;
  642. bad:
  643. JS_ReportOutOfMemory(cx);
  644. return JS_FALSE;
  645. }
  646. static void
  647. ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
  648. size_t newCount)
  649. {
  650. size_t oldCapacity, capacity;
  651. void **array;
  652. JS_ASSERT(newCount <= table->count);
  653. if (newCount == table->count)
  654. return;
  655. oldCapacity = PtrTableCapacity(table->count, info);
  656. table->count = newCount;
  657. capacity = PtrTableCapacity(newCount, info);
  658. if (oldCapacity != capacity) {
  659. array = table->array;
  660. JS_ASSERT(array);
  661. if (capacity == 0) {
  662. free(array);
  663. table->array = NULL;
  664. return;
  665. }
  666. array = (void **) realloc(array, capacity * sizeof array[0]);
  667. if (array)
  668. table->array = array;
  669. }
  670. #ifdef DEBUG
  671. memset(table->array + newCount, JS_FREE_PATTERN,
  672. (capacity - newCount) * sizeof table->array[0]);
  673. #endif
  674. }
  675. #ifdef JS_GCMETER
  676. # define METER(x) ((void) (x))
  677. # define METER_IF(condition, x) ((void) ((condition) && (x)))
  678. #else
  679. # define METER(x) ((void) 0)
  680. # define METER_IF(condition, x) ((void) 0)
  681. #endif
  682. #define METER_UPDATE_MAX(maxLval, rval) \
  683. METER_IF((maxLval) < (rval), (maxLval) = (rval))
  684. #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
  685. /*
  686. * For chunks allocated via over-sized malloc, get a pointer to store the gap
  687. * between the malloc's result and the first arena in the chunk.
  688. */
  689. static uint32 *
  690. GetMallocedChunkGapPtr(jsuword chunk)
  691. {
  692. JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
  693. /* Use the memory after the chunk, see NewGCChunk for details. */
  694. return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
  695. }
  696. #endif
  697. static jsuword
  698. NewGCChunk(void)
  699. {
  700. void *p;
  701. #if JS_GC_USE_MMAP
  702. if (js_gcUseMmap) {
  703. # if defined(XP_WIN)
  704. p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
  705. MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  706. return (jsuword) p;
  707. # else
  708. p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
  709. PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  710. return (p == MAP_FAILED) ? 0 : (jsuword) p;
  711. # endif
  712. }
  713. #endif
  714. #if HAS_POSIX_MEMALIGN
  715. if (0 != posix_memalign(&p, GC_ARENA_SIZE,
  716. GC_ARENA_SIZE * js_gcArenasPerChunk -
  717. JS_GC_ARENA_PAD)) {
  718. return 0;
  719. }
  720. return (jsuword) p;
  721. #else
  722. /*
  723. * Implement chunk allocation using oversized malloc if mmap and
  724. * posix_memalign are not available.
  725. *
  726. * Since malloc allocates pointers aligned on the word boundary, to get
  727. * js_gcArenasPerChunk aligned arenas, we need to malloc only
  728. *
  729. * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
  730. *
  731. * bytes. But since we stores the gap between the malloced pointer and the
  732. * first arena in the chunk after the chunk, we need to ask for
  733. *
  734. * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
  735. *
  736. * bytes to ensure that we always have room to store the gap.
  737. */
  738. p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
  739. if (!p)
  740. return 0;
  741. {
  742. jsuword chunk;
  743. chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
  744. *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
  745. return chunk;
  746. }
  747. #endif
  748. }
  749. static void
  750. DestroyGCChunk(jsuword chunk)
  751. {
  752. JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
  753. #if JS_GC_USE_MMAP
  754. if (js_gcUseMmap) {
  755. # if defined(XP_WIN)
  756. VirtualFree((void *) chunk, 0, MEM_RELEASE);
  757. # elif defined(SOLARIS)
  758. munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
  759. # else
  760. munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
  761. # endif
  762. return;
  763. }
  764. #endif
  765. #if HAS_POSIX_MEMALIGN
  766. free((void *) chunk);
  767. #else
  768. /* See comments in NewGCChunk. */
  769. JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
  770. free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
  771. #endif
  772. }
  773. #if CHUNKED_ARENA_ALLOCATION
  774. static void
  775. AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
  776. {
  777. ci->prevp = &rt->gcChunkList;
  778. ci->next = rt->gcChunkList;
  779. if (rt->gcChunkList) {
  780. JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
  781. rt->gcChunkList->prevp = &ci->next;
  782. }
  783. rt->gcChunkList = ci;
  784. }
  785. static void
  786. RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
  787. {
  788. *ci->prevp = ci->next;
  789. if (ci->next) {
  790. JS_ASSERT(ci->next->prevp == &ci->next);
  791. ci->next->prevp = ci->prevp;
  792. }
  793. }
  794. #endif
  795. static JSGCArenaInfo *
  796. NewGCArena(JSRuntime *rt)
  797. {
  798. jsuword chunk;
  799. JSGCArenaInfo *a;
  800. if (rt->gcBytes >= rt->gcMaxBytes)
  801. return NULL;
  802. #if CHUNKED_ARENA_ALLOCATION
  803. if (js_gcArenasPerChunk == 1) {
  804. #endif
  805. chunk = NewGCChunk();
  806. if (chunk == 0)
  807. return NULL;
  808. a = ARENA_START_TO_INFO(chunk);
  809. #if CHUNKED_ARENA_ALLOCATION
  810. } else {
  811. JSGCChunkInfo *ci;
  812. uint32 i;
  813. JSGCArenaInfo *aprev;
  814. ci = rt->gcChunkList;
  815. if (!ci) {
  816. chunk = NewGCChunk();
  817. if (chunk == 0)
  818. return NULL;
  819. JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
  820. a = GET_ARENA_INFO(chunk, 0);
  821. a->firstArena = JS_TRUE;
  822. a->arenaIndex = 0;
  823. aprev = NULL;
  824. i = 0;
  825. do {
  826. a->prev = aprev;
  827. aprev = a;
  828. ++i;
  829. a = GET_ARENA_INFO(chunk, i);
  830. a->firstArena = JS_FALSE;
  831. a->arenaIndex = i;
  832. } while (i != js_gcArenasPerChunk - 1);
  833. ci = GET_CHUNK_INFO(chunk, 0);
  834. ci->lastFreeArena = aprev;
  835. ci->numFreeArenas = js_gcArenasPerChunk - 1;
  836. AddChunkToList(rt, ci);
  837. } else {
  838. JS_ASSERT(ci->prevp == &rt->gcChunkList);
  839. a = ci->lastFreeArena;
  840. aprev = a->prev;
  841. if (!aprev) {
  842. JS_ASSERT(ci->numFreeArenas == 1);
  843. JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
  844. RemoveChunkFromList(rt, ci);
  845. chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
  846. SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
  847. } else {
  848. JS_ASSERT(ci->numFreeArenas >= 2);
  849. JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
  850. ci->lastFreeArena = aprev;
  851. ci->numFreeArenas--;
  852. }
  853. }
  854. }
  855. #endif
  856. rt->gcBytes += GC_ARENA_SIZE;
  857. a->prevUntracedPage = 0;
  858. memset(&a->u, 0, sizeof(a->u));
  859. return a;
  860. }
  861. static void
  862. DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
  863. {
  864. JSGCArenaInfo *a;
  865. while (last) {
  866. a = last;
  867. last = last->prev;
  868. METER(rt->gcStats.afree++);
  869. JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
  870. rt->gcBytes -= GC_ARENA_SIZE;
  871. #if CHUNKED_ARENA_ALLOCATION
  872. if (js_gcArenasPerChunk == 1) {
  873. #endif
  874. DestroyGCChunk(ARENA_INFO_TO_START(a));
  875. #if CHUNKED_ARENA_ALLOCATION
  876. } else {
  877. uint32 arenaIndex;
  878. jsuword chunk;
  879. uint32 chunkInfoIndex;
  880. JSGCChunkInfo *ci;
  881. # ifdef DEBUG
  882. jsuword firstArena;
  883. firstArena = a->firstArena;
  884. arenaIndex = a->arenaIndex;
  885. memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
  886. GC_ARENA_SIZE - JS_GC_ARENA_PAD);
  887. a->firstArena = firstArena;
  888. a->arenaIndex = arenaIndex;
  889. # endif
  890. arenaIndex = GET_ARENA_INDEX(a);
  891. chunk = GET_ARENA_CHUNK(a, arenaIndex);
  892. chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
  893. if (chunkInfoIndex == NO_FREE_ARENAS) {
  894. chunkInfoIndex = arenaIndex;
  895. SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
  896. ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
  897. a->prev = NULL;
  898. ci->lastFreeArena = a;
  899. ci->numFreeArenas = 1;
  900. AddChunkToList(rt, ci);
  901. } else {
  902. JS_ASSERT(chunkInfoIndex != arenaIndex);
  903. ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
  904. JS_ASSERT(ci->numFreeArenas != 0);
  905. JS_ASSERT(ci->lastFreeArena);
  906. JS_ASSERT(a != ci->lastFreeArena);
  907. if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
  908. RemoveChunkFromList(rt, ci);
  909. DestroyGCChunk(chunk);
  910. } else {
  911. ++ci->numFreeArenas;
  912. a->prev = ci->lastFreeArena;
  913. ci->lastFreeArena = a;
  914. }
  915. }
  916. }
  917. # endif
  918. }
  919. }
  920. static void
  921. InitGCArenaLists(JSRuntime *rt)
  922. {
  923. uintN i, thingSize;
  924. JSGCArenaList *arenaList;
  925. for (i = 0; i < GC_NUM_FREELISTS; i++) {
  926. arenaList = &rt->gcArenaList[i];
  927. thingSize = GC_FREELIST_NBYTES(i);
  928. JS_ASSERT((size_t)(uint16)thingSize == thingSize);
  929. arenaList->last = NULL;
  930. arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
  931. arenaList->thingSize = (uint16) thingSize;
  932. arenaList->freeList = NULL;
  933. }
  934. rt->gcDoubleArenaList.first = NULL;
  935. rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
  936. }
  937. static void
  938. FinishGCArenaLists(JSRuntime *rt)
  939. {
  940. uintN i;
  941. JSGCArenaList *arenaList;
  942. for (i = 0; i < GC_NUM_FREELISTS; i++) {
  943. arenaList = &rt->gcArenaList[i];
  944. DestroyGCArenas(rt, arenaList->last);
  945. arenaList->last = NULL;
  946. arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
  947. arenaList->freeList = NULL;
  948. }
  949. DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
  950. rt->gcDoubleArenaList.first = NULL;
  951. rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
  952. rt->gcBytes = 0;
  953. JS_ASSERT(rt->gcChunkList == 0);
  954. }
  955. /*
  956. * This function must not be called when thing is jsdouble.
  957. */
  958. static uint8 *
  959. GetGCThingFlags(void *thing)
  960. {
  961. JSGCArenaInfo *a;
  962. uint32 index;
  963. a = THING_TO_ARENA(thing);
  964. index = THING_TO_INDEX(thing, a->list->thingSize);
  965. return THING_FLAGP(a, index);
  966. }
  967. /*
  968. * This function returns null when thing is jsdouble.
  969. */
  970. static uint8 *
  971. GetGCThingFlagsOrNull(void *thing)
  972. {
  973. JSGCArenaInfo *a;
  974. uint32 index;
  975. a = THING_TO_ARENA(thing);
  976. if (!a->list)
  977. return NULL;
  978. index = THING_TO_INDEX(thing, a->list->thingSize);
  979. return THING_FLAGP(a, index);
  980. }
  981. intN
  982. js_GetExternalStringGCType(JSString *str)
  983. {
  984. uintN type;
  985. type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
  986. JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
  987. return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
  988. }
  989. static uint32
  990. MapGCFlagsToTraceKind(uintN flags)
  991. {
  992. uint32 type;
  993. type = flags & GCF_TYPEMASK;
  994. JS_ASSERT(type != GCX_DOUBLE);
  995. JS_ASSERT(type < GCX_NTYPES);
  996. return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
  997. }
  998. JS_FRIEND_API(uint32)
  999. js_GetGCThingTraceKind(void *thing)
  1000. {
  1001. JSGCArenaInfo *a;
  1002. uint32 index;
  1003. a = THING_TO_ARENA(thing);
  1004. if (!a->list)
  1005. return JSTRACE_DOUBLE;
  1006. index = THING_TO_INDEX(thing, a->list->thingSize);
  1007. return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
  1008. }
  1009. JSRuntime*
  1010. js_GetGCStringRuntime(JSString *str)
  1011. {
  1012. JSGCArenaList *list;
  1013. list = THING_TO_ARENA(str)->list;
  1014. JS_ASSERT(list->thingSize == sizeof(JSGCThing));
  1015. JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
  1016. return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
  1017. }
  1018. JSBool
  1019. js_IsAboutToBeFinalized(JSContext *cx, void *thing)
  1020. {
  1021. JSGCArenaInfo *a;
  1022. uint32 index, flags;
  1023. a = THING_TO_ARENA(thing);
  1024. if (!a->list) {
  1025. /*
  1026. * Check if arena has no marked doubles. In that case the bitmap with
  1027. * the mark flags contains all garbage as it is initialized only when
  1028. * marking the first double in the arena.
  1029. */
  1030. if (!a->u.hasMarkedDoubles)
  1031. return JS_TRUE;
  1032. index = DOUBLE_THING_TO_INDEX(thing);
  1033. return !IsMarkedDouble(a, index);
  1034. }
  1035. index = THING_TO_INDEX(thing, a->list->thingSize);
  1036. flags = *THING_FLAGP(a, index);
  1037. return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
  1038. }
  1039. /* This is compatible with JSDHashEntryStub. */
  1040. typedef struct JSGCRootHashEntry {
  1041. JSDHashEntryHdr hdr;
  1042. void *root;
  1043. const char *name;
  1044. } JSGCRootHashEntry;
  1045. /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
  1046. #define GC_ROOTS_SIZE 256
  1047. #if CHUNKED_ARENA_ALLOCATION
  1048. /*
  1049. * For a CPU with extremely large pages using them for GC things wastes
  1050. * too much memory.
  1051. */
  1052. # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
  1053. JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
  1054. #endif
  1055. JSBool
  1056. js_InitGC(JSRuntime *rt, uint32 maxbytes)
  1057. {
  1058. #if JS_GC_USE_MMAP
  1059. if (js_gcArenasPerChunk == 0) {
  1060. size_t cpuPageSize, arenasPerPage;
  1061. # if defined(XP_WIN)
  1062. SYSTEM_INFO si;
  1063. GetSystemInfo(&si);
  1064. cpuPageSize = si.dwPageSize;
  1065. # elif defined(XP_UNIX) || defined(XP_BEOS)
  1066. cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
  1067. # else
  1068. # error "Not implemented"
  1069. # endif
  1070. /* cpuPageSize is a power of 2. */
  1071. JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
  1072. arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
  1073. #ifdef DEBUG
  1074. if (arenasPerPage == 0) {
  1075. fprintf(stderr,
  1076. "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
  1077. "paged allocation for the garbage collector. Please report this.\n",
  1078. (unsigned) cpuPageSize);
  1079. }
  1080. #endif
  1081. if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
  1082. /*
  1083. * Use at least 4 GC arenas per paged allocation chunk to minimize
  1084. * the overhead of mmap/VirtualAlloc.
  1085. */
  1086. js_gcUseMmap = JS_TRUE;
  1087. js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
  1088. } else {
  1089. js_gcUseMmap = JS_FALSE;
  1090. js_gcArenasPerChunk = 7;
  1091. }
  1092. }
  1093. JS_ASSERT(1 <= js_gcArenasPerChunk &&
  1094. js_gcArenasPerChunk <= NO_FREE_ARENAS);
  1095. #endif
  1096. InitGCArenaLists(rt);
  1097. if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
  1098. sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
  1099. rt->gcRootsHash.ops = NULL;
  1100. return JS_FALSE;
  1101. }
  1102. rt->gcLocksHash = NULL; /* create lazily */
  1103. /*
  1104. * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
  1105. * for default backward API compatibility.
  1106. */
  1107. rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
  1108. rt->gcEmptyArenaPoolLifespan = 30000;
  1109. METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
  1110. return JS_TRUE;
  1111. }
  1112. #ifdef JS_GCMETER
  1113. static void
  1114. UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
  1115. uint32 nthings)
  1116. {
  1117. size_t narenas;
  1118. narenas = nlivearenas + nkilledArenas;
  1119. JS_ASSERT(narenas >= st->livearenas);
  1120. st->newarenas = narenas - st->livearenas;
  1121. st->narenas = narenas;
  1122. st->livearenas = nlivearenas;
  1123. if (st->maxarenas < narenas)
  1124. st->maxarenas = narenas;
  1125. st->totalarenas += narenas;
  1126. st->nthings = nthings;
  1127. if (st->maxthings < nthings)
  1128. st->maxthings = nthings;
  1129. st->totalthings += nthings;
  1130. }
  1131. JS_FRIEND_API(void)
  1132. js_DumpGCStats(JSRuntime *rt, FILE *fp)
  1133. {
  1134. int i;
  1135. size_t sumArenas, sumTotalArenas;
  1136. size_t sumThings, sumMaxThings;
  1137. size_t sumThingSize, sumTotalThingSize;
  1138. size_t sumArenaCapacity, sumTotalArenaCapacity;
  1139. JSGCArenaStats *st;
  1140. size_t thingSize, thingsPerArena;
  1141. size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
  1142. fprintf(fp, "\nGC allocation statistics:\n");
  1143. #define UL(x) ((unsigned long)(x))
  1144. #define ULSTAT(x) UL(rt->gcStats.x)
  1145. #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
  1146. sumArenas = 0;
  1147. sumTotalArenas = 0;
  1148. sumThings = 0;
  1149. sumMaxThings = 0;
  1150. sumThingSize = 0;
  1151. sumTotalThingSize = 0;
  1152. sumArenaCapacity = 0;
  1153. sumTotalArenaCapacity = 0;
  1154. sumAlloc = 0;
  1155. sumLocalAlloc = 0;
  1156. sumFail = 0;
  1157. sumRetry = 0;
  1158. for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
  1159. if (i == -1) {
  1160. thingSize = sizeof(jsdouble);
  1161. thingsPerArena = DOUBLES_PER_ARENA;
  1162. st = &rt->gcStats.doubleArenaStats;
  1163. fprintf(fp,
  1164. "Arena list for double values (%lu doubles per arena):",
  1165. UL(thingsPerArena));
  1166. } else {
  1167. thingSize = rt->gcArenaList[i].thingSize;
  1168. thingsPerArena = THINGS_PER_ARENA(thingSize);
  1169. st = &rt->gcStats.arenaStats[i];
  1170. fprintf(fp,
  1171. "Arena list %d (thing size %lu, %lu things per arena):",
  1172. i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
  1173. }
  1174. if (st->maxarenas == 0) {
  1175. fputs(" NEVER USED\n", fp);
  1176. continue;
  1177. }
  1178. putc('\n', fp);
  1179. fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
  1180. fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
  1181. UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
  1182. fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
  1183. UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
  1184. fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
  1185. fprintf(fp, " things: %lu\n", UL(st->nthings));
  1186. fprintf(fp, " GC cell utilization: %.1f%%\n",
  1187. PERCENT(st->nthings, thingsPerArena * st->narenas));
  1188. fprintf(fp, " average cell utilization: %.1f%%\n",
  1189. PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
  1190. fprintf(fp, " max things: %lu\n", UL(st->maxthings));
  1191. fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
  1192. fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
  1193. UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
  1194. sumArenas += st->narenas;
  1195. sumTotalArenas += st->totalarenas;
  1196. sumThings += st->nthings;
  1197. sumMaxThings += st->maxthings;
  1198. sumThingSize += thingSize * st->nthings;
  1199. sumTotalThingSize += thingSize * st->totalthings;
  1200. sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
  1201. sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
  1202. sumAlloc += st->alloc;
  1203. sumLocalAlloc += st->localalloc;
  1204. sumFail += st->fail;
  1205. sumRetry += st->retry;
  1206. }
  1207. fprintf(fp, "TOTAL STATS:\n");
  1208. fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
  1209. fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
  1210. fprintf(fp, " total GC things: %lu\n", UL(sumThings));
  1211. fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
  1212. fprintf(fp, " GC cell utilization: %.1f%%\n",
  1213. PERCENT(sumThingSize, sumArenaCapacity));
  1214. fprintf(fp, " average cell utilization: %.1f%%\n",
  1215. PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
  1216. fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
  1217. fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
  1218. fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
  1219. UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
  1220. fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
  1221. fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
  1222. fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
  1223. fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
  1224. fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
  1225. fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
  1226. fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
  1227. fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
  1228. fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
  1229. #ifdef DEBUG
  1230. fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
  1231. #endif
  1232. fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
  1233. fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
  1234. fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
  1235. fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
  1236. fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
  1237. fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
  1238. fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
  1239. fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
  1240. fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
  1241. #undef UL
  1242. #undef ULSTAT
  1243. #undef PERCENT
  1244. #ifdef JS_ARENAMETER
  1245. JS_DumpArenaStats(fp);
  1246. #endif
  1247. }
  1248. #endif
  1249. #ifdef DEBUG
  1250. static void
  1251. CheckLeakedRoots(JSRuntime *rt);
  1252. #endif
  1253. #ifdef JS_THREADSAFE
  1254. static void
  1255. TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount);
  1256. #endif
  1257. void
  1258. js_FinishGC(JSRuntime *rt)
  1259. {
  1260. #ifdef JS_ARENAMETER
  1261. JS_DumpArenaStats(stdout);
  1262. #endif
  1263. #ifdef JS_GCMETER
  1264. js_DumpGCStats(rt, stdout);
  1265. #endif
  1266. FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
  1267. #ifdef JS_THREADSAFE
  1268. TrimGCFreeListsPool(rt, 0);
  1269. JS_ASSERT(!rt->gcFreeListsPool);
  1270. #endif
  1271. FinishGCArenaLists(rt);
  1272. if (rt->gcRootsHash.ops) {
  1273. #ifdef DEBUG
  1274. CheckLeakedRoots(rt);
  1275. #endif
  1276. JS_DHashTableFinish(&rt->gcRootsHash);
  1277. rt->gcRootsHash.ops = NULL;
  1278. }
  1279. if (rt->gcLocksHash) {
  1280. JS_DHashTableDestroy(rt->gcLocksHash);
  1281. rt->gcLocksHash = NULL;
  1282. }
  1283. }
  1284. JSBool
  1285. js_AddRoot(JSContext *cx, void *rp, const char *name)
  1286. {
  1287. JSBool ok = js_AddRootRT(cx->runtime, rp, name);
  1288. if (!ok)
  1289. JS_ReportOutOfMemory(cx);
  1290. return ok;
  1291. }
  1292. JSBool
  1293. js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
  1294. {
  1295. JSBool ok;
  1296. JSGCRootHashEntry *rhe;
  1297. /*
  1298. * Due to the long-standing, but now removed, use of rt->gcLock across the
  1299. * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
  1300. * properly with a racing GC, without calling JS_AddRoot from a request.
  1301. * We have to preserve API compatibility here, now that we avoid holding
  1302. * rt->gcLock across the mark phase (including the root hashtable mark).
  1303. *
  1304. * If the GC is running and we're called on another thread, wait for this
  1305. * GC activation to finish. We can safely wait here (in the case where we
  1306. * are called within a request on another thread's context) without fear
  1307. * of deadlock because the GC doesn't set rt->gcRunning until after it has
  1308. * waited for all active requests to end.
  1309. */
  1310. JS_LOCK_GC(rt);
  1311. #ifdef JS_THREADSAFE
  1312. JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
  1313. if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
  1314. do {
  1315. JS_AWAIT_GC_DONE(rt);
  1316. } while (rt->gcLevel > 0);
  1317. }
  1318. #endif
  1319. rhe = (JSGCRootHashEntry *)
  1320. JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
  1321. if (rhe) {
  1322. rhe->root = rp;
  1323. rhe->name = name;
  1324. ok = JS_TRUE;
  1325. } else {
  1326. ok = JS_FALSE;
  1327. }
  1328. JS_UNLOCK_GC(rt);
  1329. return ok;
  1330. }
  1331. JSBool
  1332. js_RemoveRoot(JSRuntime *rt, void *rp)
  1333. {
  1334. /*
  1335. * Due to the JS_RemoveRootRT API, we may be called outside of a request.
  1336. * Same synchronization drill as above in js_AddRoot.
  1337. */
  1338. JS_LOCK_GC(rt);
  1339. #ifdef JS_THREADSAFE
  1340. JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
  1341. if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
  1342. do {
  1343. JS_AWAIT_GC_DONE(rt);
  1344. } while (rt->gcLevel > 0);
  1345. }
  1346. #endif
  1347. (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
  1348. rt->gcPoke = JS_TRUE;
  1349. JS_UNLOCK_GC(rt);
  1350. return JS_TRUE;
  1351. }
  1352. #ifdef DEBUG
  1353. static JSDHashOperator
  1354. js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
  1355. {
  1356. uint32 *leakedroots = (uint32 *)arg;
  1357. JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
  1358. (*leakedroots)++;
  1359. fprintf(stderr,
  1360. "JS engine warning: leaking GC root \'%s\' at %p\n",
  1361. rhe->name ? (char *)rhe->name : "", rhe->root);
  1362. return JS_DHASH_NEXT;
  1363. }
  1364. static void
  1365. CheckLeakedRoots(JSRuntime *rt)
  1366. {
  1367. uint32 leakedroots = 0;
  1368. /* Warn (but don't assert) debug builds of any remaining roots. */
  1369. JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
  1370. &leakedroots);
  1371. if (leakedroots > 0) {
  1372. if (leakedroots == 1) {
  1373. fprintf(stderr,
  1374. "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
  1375. " This root may point to freed memory. Objects reachable\n"
  1376. " through it have not been finalized.\n",
  1377. (void *) rt);
  1378. } else {
  1379. fprintf(stderr,
  1380. "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
  1381. " These roots may point to freed memory. Objects reachable\n"
  1382. " through them have not been finalized.\n",
  1383. (unsigned long) leakedroots, (void *) rt);
  1384. }
  1385. }
  1386. }
  1387. typedef struct NamedRootDumpArgs {
  1388. void (*dump)(const char *name, void *rp, void *data);
  1389. void *data;
  1390. } NamedRootDumpArgs;
  1391. static JSDHashOperator
  1392. js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
  1393. void *arg)
  1394. {
  1395. NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
  1396. JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
  1397. if (rhe->name)
  1398. args->dump(rhe->name, rhe->root, args->data);
  1399. return JS_DHASH_NEXT;
  1400. }
  1401. JS_BEGIN_EXTERN_C
  1402. void
  1403. js_DumpNamedRoots(JSRuntime *rt,
  1404. void (*dump)(const char *name, void *rp, void *data),
  1405. void *data)
  1406. {
  1407. NamedRootDumpArgs args;
  1408. args.dump = dump;
  1409. args.data = data;
  1410. JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
  1411. }
  1412. JS_END_EXTERN_C
  1413. #endif /* DEBUG */
  1414. typedef struct GCRootMapArgs {
  1415. JSGCRootMapFun map;
  1416. void *data;
  1417. } GCRootMapArgs;
  1418. static JSDHashOperator
  1419. js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
  1420. void *arg)
  1421. {
  1422. GCRootMapArgs *args = (GCRootMapArgs *) arg;
  1423. JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
  1424. intN mapflags;
  1425. int op;
  1426. mapflags = args->map(rhe->root, rhe->name, args->data);
  1427. #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
  1428. JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
  1429. JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
  1430. op = (JSDHashOperator)mapflags;
  1431. #else
  1432. op = JS_DHASH_NEXT;
  1433. if (mapflags & JS_MAP_GCROOT_STOP)
  1434. op |= JS_DHASH_STOP;
  1435. if (mapflags & JS_MAP_GCROOT_REMOVE)
  1436. op |= JS_DHASH_REMOVE;
  1437. #endif
  1438. return (JSDHashOperator) op;
  1439. }
  1440. uint32
  1441. js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
  1442. {
  1443. GCRootMapArgs args;
  1444. uint32 rv;
  1445. args.map = map;
  1446. args.data = data;
  1447. JS_LOCK_GC(rt);
  1448. rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
  1449. JS_UNLOCK_GC(rt);
  1450. return rv;
  1451. }
  1452. JSBool
  1453. js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
  1454. {
  1455. JSRuntime *rt;
  1456. JSBool ok;
  1457. rt = cx->runtime;
  1458. JS_ASSERT(!rt->gcRunning);
  1459. JS_LOCK_GC(rt);
  1460. ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
  1461. JS_UNLOCK_GC(rt);
  1462. return ok;
  1463. }
  1464. static void
  1465. CloseNativeIterators(JSContext *cx)
  1466. {
  1467. JSRuntime *rt;
  1468. size_t count, newCount, i;
  1469. void **array;
  1470. JSObject *obj;
  1471. rt = cx->runtime;
  1472. count = rt->gcIteratorTable.count;
  1473. array = rt->gcIteratorTable.array;
  1474. newCount = 0;
  1475. for (i = 0; i != count; ++i) {
  1476. obj = (JSObject *)array[i];
  1477. if (js_IsAboutToBeFinalized(cx, obj))
  1478. js_CloseNativeIterator(cx, obj);
  1479. else
  1480. array[newCount++] = obj;
  1481. }
  1482. ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
  1483. }
  1484. #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
  1485. #define DEBUG_gchist
  1486. #endif
  1487. #ifdef DEBUG_gchist
  1488. #define NGCHIST 64
  1489. static struct GCHist {
  1490. JSBool lastDitch;
  1491. JSGCThing *freeList;
  1492. } gchist[NGCHIST];
  1493. unsigned gchpos = 0;
  1494. #endif
  1495. #ifdef JS_THREADSAFE
  1496. const JSGCFreeListSet js_GCEmptyFreeListSet = {
  1497. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, NULL
  1498. };
  1499. static void
  1500. TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount)
  1501. {
  1502. JSGCFreeListSet **cursor, *freeLists, *link;
  1503. cursor = &rt->gcFreeListsPool;
  1504. while (keepCount != 0) {
  1505. --keepCount;
  1506. freeLists = *cursor;
  1507. if (!freeLists)
  1508. return;
  1509. memset(freeLists->array, 0, sizeof freeLists->array);
  1510. cursor = &freeLists->link;
  1511. }
  1512. freeLists = *cursor;
  1513. if (freeLists) {
  1514. *cursor = NULL;
  1515. do {
  1516. link = freeLists->link;
  1517. free(freeLists);
  1518. } while ((freeLists = link) != NULL);
  1519. }
  1520. }
  1521. void
  1522. js_RevokeGCLocalFreeLists(JSContext *cx)
  1523. {
  1524. JS_ASSERT(!cx->gcLocalFreeLists->link);
  1525. if (cx->gcLocalFreeLists != &js_GCEmptyFreeListSet) {
  1526. cx->gcLocalFreeLists->link = cx->runtime->gcFreeListsPool;
  1527. cx->runtime->gcFreeListsPool = cx->gcLocalFreeLists;
  1528. cx->gcLocalFreeLists = (JSGCFreeListSet *) &js_GCEmptyFreeListSet;
  1529. }
  1530. }
  1531. static JSGCFreeListSet *
  1532. EnsureLocalFreeList(JSContext *cx)
  1533. {
  1534. JSGCFreeListSet *freeLists;
  1535. freeLists = cx->gcLocalFreeLists;
  1536. if (freeLists != &js_GCEmptyFreeListSet) {
  1537. JS_ASSERT(freeLists);
  1538. return freeLists;
  1539. }
  1540. freeLists = cx->runtime->gcFreeListsPool;
  1541. if (freeLists) {
  1542. cx->runtime->gcFreeListsPool = freeLists->link;
  1543. freeLists->link = NULL;
  1544. } else {
  1545. /* JS_malloc is not used as the caller reports out-of-memory itself. */
  1546. freeLists = (JSGCFreeListSet *) calloc(1, sizeof *freeLists);
  1547. if (!freeLists)
  1548. return NULL;
  1549. }
  1550. cx->gcLocalFreeLists = freeLists;
  1551. return freeLists;
  1552. }
  1553. #endif
  1554. void *
  1555. js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
  1556. {
  1557. JSRuntime *rt;
  1558. uintN flindex;
  1559. JSBool doGC;
  1560. JSGCThing *thing;
  1561. uint8 *flagp;
  1562. JSGCArenaList *arenaList;
  1563. JSGCArenaInfo *a;
  1564. uintN thingsLimit;
  1565. JSLocalRootStack *lrs;
  1566. #ifdef JS_GCMETER
  1567. JSGCArenaStats *astats;
  1568. #endif
  1569. #ifdef JS_THREADSAFE
  1570. JSBool gcLocked;
  1571. uintN localMallocBytes;
  1572. JSGCFreeListSet *freeLists;
  1573. JSGCThing **lastptr;
  1574. JSGCThing *tmpthing;
  1575. uint8 *tmpflagp;
  1576. uintN maxFreeThings; /* max to take from the global free list */
  1577. #endif
  1578. JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
  1579. rt = cx->runtime;
  1580. nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
  1581. flindex = GC_FREELIST_INDEX(nbytes);
  1582. /* Updates of metering counters here may not be thread-safe. */
  1583. METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
  1584. METER(astats->alloc++);
  1585. #ifdef JS_THREADSAFE
  1586. gcLocked = JS_FALSE;
  1587. JS_ASSERT(cx->thread);
  1588. freeLists = cx->gcLocalFreeLists;
  1589. thing = freeLists->array[flindex];
  1590. localMallocBytes = cx->thread->gcMallocBytes;
  1591. if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
  1592. flagp = thing->flagp;
  1593. freeLists->array[flindex] = thing->next;
  1594. METER(astats->localalloc++);
  1595. goto success;
  1596. }
  1597. JS_LOCK_GC(rt);
  1598. gcLocked = JS_TRUE;
  1599. /* Transfer thread-local counter to global one. */
  1600. if (localMallocBytes != 0) {
  1601. cx->thread->gcMallocBytes = 0;
  1602. if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
  1603. rt->gcMallocBytes = rt->gcMaxMallocBytes;
  1604. else
  1605. rt->gcMallocBytes += localMallocBytes;
  1606. }
  1607. #endif
  1608. JS_ASSERT(!rt->gcRunning);
  1609. if (rt->gcRunning) {
  1610. METER(rt->gcStats.finalfail++);
  1611. JS_UNLOCK_GC(rt);
  1612. return NULL;
  1613. }
  1614. doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke);
  1615. #ifdef JS_GC_ZEAL
  1616. doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
  1617. #endif
  1618. arenaList = &rt->gcArenaList[flindex];
  1619. for (;;) {
  1620. if (doGC && !JS_ON_TRACE(cx)) {
  1621. /*
  1622. * Keep rt->gcLock across the call into js_GC so we don't starve
  1623. * and lose to racing threads who deplete the heap just after
  1624. * js_GC has replenished it (or has synchronized with a racing
  1625. * GC that collected a bunch of garbage). This unfair scheduling
  1626. * can happen on certain operating systems. For the gory details,
  1627. * see bug 162779 at https://bugzilla.mozilla.org/.
  1628. */
  1629. js_GC(cx, GC_LAST_DITCH);
  1630. METER(astats->retry++);
  1631. }
  1632. /* Try to get thing from the free list. */
  1633. thing = arenaList->freeList;
  1634. if (thing) {
  1635. arenaList->freeList = thing->next;
  1636. flagp = thing->flagp;
  1637. JS_ASSERT(*flagp & GCF_FINAL);
  1638. #ifdef JS_THREADSAFE
  1639. /*
  1640. * Refill the local free list by taking several things from the
  1641. * global free list unless we are still at rt->gcMaxMallocBytes
  1642. * barrier or the free list is already populated. The former
  1643. * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
  1644. * or no gcPoke. The latter is caused via allocating new things
  1645. * in gcCallback(cx, JSGC_END).
  1646. */
  1647. if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
  1648. break;
  1649. freeLists = EnsureLocalFreeList(cx);
  1650. if (!freeLists)
  1651. goto fail;
  1652. if (freeLists->array[flindex])
  1653. break;
  1654. tmpthing = arenaList->freeList;
  1655. if (tmpthing) {
  1656. maxFreeThings = MAX_THREAD_LOCAL_THINGS;
  1657. do {
  1658. if (!tmpthing->next)
  1659. break;
  1660. tmpthing = tmpthing->next;
  1661. } while (--maxFreeThings != 0);
  1662. freeLists->array[flindex] = arenaList->freeList;
  1663. arenaList->freeList = tmpthing->next;
  1664. tmpthing->next = NULL;
  1665. }
  1666. #endif
  1667. break;
  1668. }
  1669. /*
  1670. * Try to allocate things from the last arena. If it is fully used,
  1671. * check if we can allocate a new one and, if we cannot, consider
  1672. * doing a "last ditch" GC unless already tried.
  1673. */
  1674. thingsLimit = THINGS_PER_ARENA(nbytes);
  1675. if (arenaList->lastCount != thingsLimit) {
  1676. JS_ASSERT(arenaList->lastCount < thingsLimit);
  1677. a = arenaList->last;
  1678. } else {
  1679. a = NewGCArena(rt);
  1680. if (!a) {
  1681. if (doGC || JS_ON_TRACE(cx))
  1682. goto fail;
  1683. doGC = JS_TRUE;
  1684. continue;
  1685. }
  1686. a->list = arenaList;
  1687. a->prev = arenaList->last;
  1688. a->prevUntracedPage = 0;
  1689. a->u.untracedThings = 0;
  1690. arenaList->last = a;
  1691. arenaList->lastCount = 0;
  1692. }
  1693. flagp = THING_FLAGP(a, arenaList->lastCount);
  1694. thing = FLAGP_TO_THING(flagp, nbytes);
  1695. arenaList->lastCount++;
  1696. #ifdef JS_THREADSAFE
  1697. /*
  1698. * Refill the local free list by taking free things from the last
  1699. * arena. Prefer to order free things by ascending address in the
  1700. * (unscientific) hope of better cache locality.
  1701. */
  1702. if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
  1703. break;
  1704. freeLists = EnsureLocalFreeList(cx);
  1705. if (!freeLists)
  1706. goto fail;
  1707. if (freeLists->array[flindex])
  1708. break;
  1709. lastptr = &freeLists->array[flindex];
  1710. maxFreeThings = thingsLimit - arenaList->lastCount;
  1711. if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
  1712. maxFreeThings = MAX_THREAD_LOCAL_THINGS;
  1713. while (maxFreeThings != 0) {
  1714. --maxFreeThings;
  1715. tmpflagp = THING_FLAGP(a, arenaList->lastCount);
  1716. tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
  1717. arenaList->lastCount++;
  1718. tmpthing->flagp = tmpflagp;
  1719. *tmpflagp = GCF_FINAL; /* signifying that thing is free */
  1720. *lastptr = tmpthing;
  1721. lastptr = &tmpthing->next;
  1722. }
  1723. *lastptr = NULL;
  1724. #endif
  1725. break;
  1726. }
  1727. /* We successfully allocated the thing. */
  1728. #ifdef JS_THREADSAFE
  1729. success:
  1730. #endif
  1731. lrs = cx->localRootStack;
  1732. if (lrs) {
  1733. /*
  1734. * If we're in a local root scope, don't set newborn[type] at all, to
  1735. * avoid entraining garbage from it for an unbounded amount of time
  1736. * on this context. A caller will leave the local root scope and pop
  1737. * this reference, allowing thing to be GC'd if it has no other refs.
  1738. * See JS_EnterLocalRootScope and related APIs.
  1739. */
  1740. if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
  1741. /*
  1742. * When we fail for a thing allocated through the tail of the last
  1743. * arena, thing's flag byte is not initialized. So to prevent GC
  1744. * accessing the uninitialized flags during the finalization, we
  1745. * always mark the thing as final. See bug 337407.
  1746. */
  1747. *flagp = GCF_FINAL;
  1748. goto fail;
  1749. }
  1750. } else {
  1751. /*
  1752. * No local root scope, so we're stuck with the old, fragile model of
  1753. * depending on a pigeon-hole newborn per type per context.
  1754. */
  1755. cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
  1756. }
  1757. /* We can't fail now, so update flags. */
  1758. *flagp = (uint8)flags;
  1759. #ifdef DEBUG_gchist
  1760. gchist[gchpos].lastDitch = doGC;
  1761. gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
  1762. if (++gchpos == NGCHIST)
  1763. gchpos = 0;
  1764. #endif
  1765. /* This is not thread-safe for thread-local allocations. */
  1766. METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
  1767. #ifdef JS_THREADSAFE
  1768. if (gcLocked)
  1769. JS_UNLOCK_GC(rt);
  1770. #endif
  1771. JS_COUNT_OPERATION(cx, JSOW_ALLOCATION);
  1772. return thing;
  1773. fail:
  1774. #ifdef JS_THREADSAFE
  1775. if (gcLocked)
  1776. JS_UNLOCK_GC(rt);
  1777. #endif
  1778. METER(astats->fail++);
  1779. if (!JS_ON_TRACE(cx))
  1780. JS_ReportOutOfMemory(cx);
  1781. return NULL;
  1782. }
  1783. static JSGCDoubleCell *
  1784. RefillDoubleFreeList(JSContext *cx)
  1785. {
  1786. JSRuntime *rt;
  1787. jsbitmap *doubleFlags, usedBits;
  1788. JSBool didGC = JS_FALSE;
  1789. JSGCArenaInfo *a;
  1790. uintN bit, index;
  1791. JSGCDoubleCell *cell, *list, *lastcell;
  1792. JS_ASSERT(!cx->doubleFreeList);
  1793. rt = cx->runtime;