/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jslock.cpp

http://github.com/onedayitwillmake/RealtimeMultiplayerNodeJs · C++ · 1417 lines · 925 code · 167 blank · 325 comment · 191 complexity · f51a8ffec4b9a470accc045ada4b356f MD5 · raw file

  1. /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  2. *
  3. * ***** BEGIN LICENSE BLOCK *****
  4. * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  5. *
  6. * The contents of this file are subject to the Mozilla Public License Version
  7. * 1.1 (the "License"); you may not use this file except in compliance with
  8. * the License. You may obtain a copy of the License at
  9. * http://www.mozilla.org/MPL/
  10. *
  11. * Software distributed under the License is distributed on an "AS IS" basis,
  12. * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  13. * for the specific language governing rights and limitations under the
  14. * License.
  15. *
  16. * The Original Code is Mozilla Communicator client code, released
  17. * March 31, 1998.
  18. *
  19. * The Initial Developer of the Original Code is
  20. * Netscape Communications Corporation.
  21. * Portions created by the Initial Developer are Copyright (C) 1998
  22. * the Initial Developer. All Rights Reserved.
  23. *
  24. * Contributor(s):
  25. *
  26. * Alternatively, the contents of this file may be used under the terms of
  27. * either of the GNU General Public License Version 2 or later (the "GPL"),
  28. * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  29. * in which case the provisions of the GPL or the LGPL are applicable instead
  30. * of those above. If you wish to allow use of your version of this file only
  31. * under the terms of either the GPL or the LGPL, and not to allow others to
  32. * use your version of this file under the terms of the MPL, indicate your
  33. * decision by deleting the provisions above and replace them with the notice
  34. * and other provisions required by the GPL or the LGPL. If you do not delete
  35. * the provisions above, a recipient may use your version of this file under
  36. * the terms of any one of the MPL, the GPL or the LGPL.
  37. *
  38. * ***** END LICENSE BLOCK ***** */
  39. #ifdef JS_THREADSAFE
  40. /*
  41. * JS locking stubs.
  42. */
  43. #include "jsstddef.h"
  44. #include <stdlib.h>
  45. #include <string.h>
  46. #include "jspubtd.h"
  47. #include "jsutil.h" /* Added by JSIFY */
  48. #include "jstypes.h"
  49. #include "jsbit.h"
  50. #include "jscntxt.h"
  51. #include "jsdtoa.h"
  52. #include "jsgc.h"
  53. #include "jsfun.h" /* for VALUE_IS_FUNCTION used by *_WRITE_BARRIER */
  54. #include "jslock.h"
  55. #include "jsscope.h"
  56. #include "jsstr.h"
  57. #define ReadWord(W) (W)
  58. /* Implement NativeCompareAndSwap. */
  59. #if defined(_WIN32) && defined(_M_IX86)
  60. #pragma warning( disable : 4035 )
  61. JS_BEGIN_EXTERN_C
  62. extern long __cdecl
  63. _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
  64. JS_END_EXTERN_C
  65. #pragma intrinsic(_InterlockedCompareExchange)
  66. static JS_ALWAYS_INLINE int
  67. NativeCompareAndSwapHelper(jsword *w, jsword ov, jsword nv)
  68. {
  69. _InterlockedCompareExchange(w, nv, ov);
  70. __asm {
  71. sete al
  72. }
  73. }
  74. static JS_ALWAYS_INLINE int
  75. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  76. {
  77. return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
  78. }
  79. #elif defined(XP_MACOSX) || defined(DARWIN)
  80. #include <libkern/OSAtomic.h>
  81. static JS_ALWAYS_INLINE int
  82. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  83. {
  84. /* Details on these functions available in the manpage for atomic */
  85. #if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
  86. return OSAtomicCompareAndSwap64Barrier(ov, nv, (int64_t*) w);
  87. #else
  88. return OSAtomicCompareAndSwap32Barrier(ov, nv, (int32_t*) w);
  89. #endif
  90. }
  91. #elif defined(__GNUC__) && defined(__i386__)
  92. /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
  93. static JS_ALWAYS_INLINE int
  94. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  95. {
  96. unsigned int res;
  97. __asm__ __volatile__ (
  98. "lock\n"
  99. "cmpxchgl %2, (%1)\n"
  100. "sete %%al\n"
  101. "andl $1, %%eax\n"
  102. : "=a" (res)
  103. : "r" (w), "r" (nv), "a" (ov)
  104. : "cc", "memory");
  105. return (int)res;
  106. }
  107. #elif defined(__GNUC__) && defined(__x86_64__)
  108. static JS_ALWAYS_INLINE int
  109. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  110. {
  111. unsigned int res;
  112. __asm__ __volatile__ (
  113. "lock\n"
  114. "cmpxchgq %2, (%1)\n"
  115. "sete %%al\n"
  116. "movzbl %%al, %%eax\n"
  117. : "=a" (res)
  118. : "r" (w), "r" (nv), "a" (ov)
  119. : "cc", "memory");
  120. return (int)res;
  121. }
  122. #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
  123. static JS_ALWAYS_INLINE int
  124. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  125. {
  126. #if defined(__GNUC__)
  127. unsigned int res;
  128. JS_ASSERT(ov != nv);
  129. asm volatile ("\
  130. stbar\n\
  131. cas [%1],%2,%3\n\
  132. cmp %2,%3\n\
  133. be,a 1f\n\
  134. mov 1,%0\n\
  135. mov 0,%0\n\
  136. 1:"
  137. : "=r" (res)
  138. : "r" (w), "r" (ov), "r" (nv));
  139. return (int)res;
  140. #else /* !__GNUC__ */
  141. extern int compare_and_swap(jsword*, jsword, jsword);
  142. JS_ASSERT(ov != nv);
  143. return compare_and_swap(w, ov, nv);
  144. #endif
  145. }
  146. #elif defined(AIX)
  147. #include <sys/atomic_op.h>
  148. static JS_ALWAYS_INLINE int
  149. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  150. {
  151. return !_check_lock((atomic_p)w, ov, nv);
  152. }
  153. #elif defined(USE_ARM_KUSER)
  154. /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
  155. * description of this ABI; this is a function provided at a fixed
  156. * location by the kernel in the memory space of each process.
  157. */
  158. typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
  159. #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
  160. JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
  161. static JS_ALWAYS_INLINE int
  162. NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
  163. {
  164. volatile int *vp = (volatile int *) w;
  165. PRInt32 failed = 1;
  166. /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
  167. do {
  168. failed = __kernel_cmpxchg(ov, nv, vp);
  169. } while (failed && *vp == ov);
  170. return !failed;
  171. }
  172. #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
  173. #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
  174. #endif /* arch-tests */
  175. #if JS_HAS_NATIVE_COMPARE_AND_SWAP
  176. JSBool
  177. js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
  178. {
  179. return !!NativeCompareAndSwap(w, ov, nv);
  180. }
  181. #elif defined(NSPR_LOCK)
  182. # ifdef __GNUC__
  183. # warning "js_CompareAndSwap is implemented using NSSP lock"
  184. # endif
  185. JSBool
  186. js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
  187. {
  188. int result;
  189. static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
  190. JS_ACQUIRE_LOCK(CompareAndSwapLock);
  191. result = (*w == ov);
  192. if (result)
  193. *w = nv;
  194. JS_RELEASE_LOCK(CompareAndSwapLock);
  195. return result;
  196. }
  197. #else /* !defined(NSPR_LOCK) */
  198. #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
  199. #endif
  200. #ifndef NSPR_LOCK
  201. struct JSFatLock {
  202. int susp;
  203. PRLock *slock;
  204. PRCondVar *svar;
  205. JSFatLock *next;
  206. JSFatLock **prevp;
  207. };
  208. typedef struct JSFatLockTable {
  209. JSFatLock *free;
  210. JSFatLock *taken;
  211. } JSFatLockTable;
  212. #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
  213. static void
  214. js_Dequeue(JSThinLock *);
  215. static PRLock **global_locks;
  216. static uint32 global_lock_count = 1;
  217. static uint32 global_locks_log2 = 0;
  218. static uint32 global_locks_mask = 0;
  219. static void
  220. js_LockGlobal(void *id)
  221. {
  222. uint32 i = GLOBAL_LOCK_INDEX(id);
  223. PR_Lock(global_locks[i]);
  224. }
  225. static void
  226. js_UnlockGlobal(void *id)
  227. {
  228. uint32 i = GLOBAL_LOCK_INDEX(id);
  229. PR_Unlock(global_locks[i]);
  230. }
  231. #endif /* !NSPR_LOCK */
  232. void
  233. js_InitLock(JSThinLock *tl)
  234. {
  235. #ifdef NSPR_LOCK
  236. tl->owner = 0;
  237. tl->fat = (JSFatLock*)JS_NEW_LOCK();
  238. #else
  239. memset(tl, 0, sizeof(JSThinLock));
  240. #endif
  241. }
  242. void
  243. js_FinishLock(JSThinLock *tl)
  244. {
  245. #ifdef NSPR_LOCK
  246. tl->owner = 0xdeadbeef;
  247. if (tl->fat)
  248. JS_DESTROY_LOCK(((JSLock*)tl->fat));
  249. #else
  250. JS_ASSERT(tl->owner == 0);
  251. JS_ASSERT(tl->fat == NULL);
  252. #endif
  253. }
  254. #ifdef DEBUG_SCOPE_COUNT
  255. #include <stdio.h>
  256. #include "jsdhash.h"
  257. static FILE *logfp;
  258. static JSDHashTable logtbl;
  259. typedef struct logentry {
  260. JSDHashEntryStub stub;
  261. char op;
  262. const char *file;
  263. int line;
  264. } logentry;
  265. static void
  266. logit(JSScope *scope, char op, const char *file, int line)
  267. {
  268. logentry *entry;
  269. if (!logfp) {
  270. logfp = fopen("/tmp/scope.log", "w");
  271. if (!logfp)
  272. return;
  273. setvbuf(logfp, NULL, _IONBF, 0);
  274. }
  275. fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
  276. if (!logtbl.entryStore &&
  277. !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
  278. sizeof(logentry), 100)) {
  279. return;
  280. }
  281. entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
  282. if (!entry)
  283. return;
  284. entry->stub.key = scope;
  285. entry->op = op;
  286. entry->file = file;
  287. entry->line = line;
  288. }
  289. void
  290. js_unlog_scope(JSScope *scope)
  291. {
  292. if (!logtbl.entryStore)
  293. return;
  294. (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
  295. }
  296. # define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
  297. #else
  298. # define LOGIT(scope,op) /* nothing */
  299. #endif /* DEBUG_SCOPE_COUNT */
  300. /*
  301. * Return true if scope's ownercx, or the ownercx of a single-threaded scope
  302. * for which ownercx is waiting to become multi-threaded and shared, is cx.
  303. * That condition implies deadlock in ClaimScope if cx's thread were to wait
  304. * to share scope.
  305. *
  306. * (i) rt->gcLock held
  307. */
  308. static JSBool
  309. WillDeadlock(JSTitle *title, JSContext *cx)
  310. {
  311. JSContext *ownercx;
  312. do {
  313. ownercx = title->ownercx;
  314. if (ownercx == cx) {
  315. JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
  316. return JS_TRUE;
  317. }
  318. } while (ownercx && (title = ownercx->titleToShare) != NULL);
  319. return JS_FALSE;
  320. }
  321. /*
  322. * Make title multi-threaded, i.e. share its ownership among contexts in rt
  323. * using a "thin" or (if necessary due to contention) "fat" lock. Called only
  324. * from ClaimTitle, immediately below, when we detect deadlock were we to wait
  325. * for title's lock, because its ownercx is waiting on a title owned by the
  326. * calling cx.
  327. *
  328. * (i) rt->gcLock held
  329. */
  330. static void
  331. ShareTitle(JSContext *cx, JSTitle *title)
  332. {
  333. JSRuntime *rt;
  334. JSTitle **todop;
  335. rt = cx->runtime;
  336. if (title->u.link) {
  337. for (todop = &rt->titleSharingTodo; *todop != title;
  338. todop = &(*todop)->u.link) {
  339. JS_ASSERT(*todop != NO_TITLE_SHARING_TODO);
  340. }
  341. *todop = title->u.link;
  342. title->u.link = NULL; /* null u.link for sanity ASAP */
  343. JS_NOTIFY_ALL_CONDVAR(rt->titleSharingDone);
  344. }
  345. js_InitLock(&title->lock);
  346. title->u.count = 0;
  347. js_FinishSharingTitle(cx, title);
  348. }
  349. /*
  350. * js_FinishSharingTitle is the tail part of ShareTitle, split out to become a
  351. * subroutine of JS_EndRequest too. The bulk of the work here involves making
  352. * mutable strings in the title's object's slots be immutable. We have to do
  353. * this because such strings will soon be available to multiple threads, so
  354. * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
  355. * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
  356. * or js_UndependString.
  357. *
  358. * The last bit of work done by js_FinishSharingTitle nulls title->ownercx and
  359. * updates rt->sharedTitles.
  360. */
  361. void
  362. js_FinishSharingTitle(JSContext *cx, JSTitle *title)
  363. {
  364. JSObjectMap *map;
  365. JSScope *scope;
  366. JSObject *obj;
  367. uint32 nslots, i;
  368. jsval v;
  369. map = TITLE_TO_MAP(title);
  370. if (!MAP_IS_NATIVE(map))
  371. return;
  372. scope = (JSScope *)map;
  373. obj = scope->object;
  374. if (obj) {
  375. nslots = scope->map.freeslot;
  376. for (i = 0; i != nslots; ++i) {
  377. v = STOBJ_GET_SLOT(obj, i);
  378. if (JSVAL_IS_STRING(v) &&
  379. !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
  380. /*
  381. * FIXME bug 363059: The following error recovery changes
  382. * runtime execution semantics, arbitrarily and silently
  383. * ignoring errors except out-of-memory, which should have been
  384. * reported through JS_ReportOutOfMemory at this point.
  385. */
  386. STOBJ_SET_SLOT(obj, i, JSVAL_VOID);
  387. }
  388. }
  389. }
  390. title->ownercx = NULL; /* NB: set last, after lock init */
  391. JS_RUNTIME_METER(cx->runtime, sharedTitles);
  392. }
  393. /*
  394. * Given a title with apparently non-null ownercx different from cx, try to
  395. * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
  396. * If we claim ownership, return true. Otherwise, we wait for ownercx to be
  397. * set to null (indicating that title is multi-threaded); or if waiting would
  398. * deadlock, we set ownercx to null ourselves via ShareTitle. In any case,
  399. * once ownercx is null we return false.
  400. */
  401. static JSBool
  402. ClaimTitle(JSTitle *title, JSContext *cx)
  403. {
  404. JSRuntime *rt;
  405. JSContext *ownercx;
  406. jsrefcount saveDepth;
  407. PRStatus stat;
  408. rt = cx->runtime;
  409. JS_RUNTIME_METER(rt, claimAttempts);
  410. JS_LOCK_GC(rt);
  411. /* Reload in case ownercx went away while we blocked on the lock. */
  412. while ((ownercx = title->ownercx) != NULL) {
  413. /*
  414. * Avoid selflock if ownercx is dead, or is not running a request, or
  415. * has the same thread as cx. Set title->ownercx to cx so that the
  416. * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
  417. * fast path around the corresponding js_UnlockTitle or js_UnlockObj
  418. * function call.
  419. *
  420. * If title->u.link is non-null, title has already been inserted on
  421. * the rt->titleSharingTodo list, because another thread's context
  422. * already wanted to lock title while ownercx was running a request.
  423. * We can't claim any title whose u.link is non-null at this point,
  424. * even if ownercx->requestDepth is 0 (see below where we suspend our
  425. * request before waiting on rt->titleSharingDone).
  426. */
  427. if (!title->u.link &&
  428. (!js_ValidContextPointer(rt, ownercx) ||
  429. !ownercx->requestDepth ||
  430. ownercx->thread == cx->thread)) {
  431. JS_ASSERT(title->u.count == 0);
  432. title->ownercx = cx;
  433. JS_UNLOCK_GC(rt);
  434. JS_RUNTIME_METER(rt, claimedTitles);
  435. return JS_TRUE;
  436. }
  437. /*
  438. * Avoid deadlock if title's owner context is waiting on a title that
  439. * we own, by revoking title's ownership. This approach to deadlock
  440. * avoidance works because the engine never nests title locks.
  441. *
  442. * If cx could hold locks on ownercx->titleToShare, or if ownercx could
  443. * hold locks on title, we would need to keep reentrancy counts for all
  444. * such "flyweight" (ownercx != NULL) locks, so that control would
  445. * unwind properly once these locks became "thin" or "fat". The engine
  446. * promotes a title from exclusive to shared access only when locking,
  447. * never when holding or unlocking.
  448. *
  449. * Avoid deadlock before any of this title/context cycle detection if
  450. * cx is on the active GC's thread, because in that case, no requests
  451. * will run until the GC completes. Any title wanted by the GC (from
  452. * a finalizer) that can't be claimed must become shared.
  453. */
  454. if (rt->gcThread == cx->thread ||
  455. (ownercx->titleToShare &&
  456. WillDeadlock(ownercx->titleToShare, cx))) {
  457. ShareTitle(cx, title);
  458. break;
  459. }
  460. /*
  461. * Thanks to the non-zero NO_TITLE_SHARING_TODO link terminator, we
  462. * can decide whether title is on rt->titleSharingTodo with a single
  463. * non-null test, and avoid double-insertion bugs.
  464. */
  465. if (!title->u.link) {
  466. title->u.link = rt->titleSharingTodo;
  467. rt->titleSharingTodo = title;
  468. js_HoldObjectMap(cx, TITLE_TO_MAP(title));
  469. }
  470. /*
  471. * Inline JS_SuspendRequest before we wait on rt->titleSharingDone,
  472. * saving and clearing cx->requestDepth so we don't deadlock if the
  473. * GC needs to run on ownercx.
  474. *
  475. * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
  476. * to decrement rt->requestCount if cx is active on the GC's thread,
  477. * because the GC has already reduced rt->requestCount to exclude all
  478. * such such contexts.
  479. */
  480. saveDepth = cx->requestDepth;
  481. if (saveDepth) {
  482. cx->requestDepth = 0;
  483. if (rt->gcThread != cx->thread) {
  484. JS_ASSERT(rt->requestCount > 0);
  485. rt->requestCount--;
  486. if (rt->requestCount == 0)
  487. JS_NOTIFY_REQUEST_DONE(rt);
  488. }
  489. }
  490. /*
  491. * We know that some other thread's context owns title, which is now
  492. * linked onto rt->titleSharingTodo, awaiting the end of that other
  493. * thread's request. So it is safe to wait on rt->titleSharingDone.
  494. */
  495. cx->titleToShare = title;
  496. stat = PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
  497. JS_ASSERT(stat != PR_FAILURE);
  498. /*
  499. * Inline JS_ResumeRequest after waiting on rt->titleSharingDone,
  500. * restoring cx->requestDepth. Same note as above for the inlined,
  501. * specialized JS_SuspendRequest code: beware rt->gcThread.
  502. */
  503. if (saveDepth) {
  504. if (rt->gcThread != cx->thread) {
  505. while (rt->gcLevel > 0)
  506. JS_AWAIT_GC_DONE(rt);
  507. rt->requestCount++;
  508. }
  509. cx->requestDepth = saveDepth;
  510. }
  511. /*
  512. * Don't clear cx->titleToShare until after we're through waiting on
  513. * all condition variables protected by rt->gcLock -- that includes
  514. * rt->titleSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
  515. * in the inlined JS_ResumeRequest code immediately above).
  516. *
  517. * Otherwise, the GC could easily deadlock with another thread that
  518. * owns a title wanted by a finalizer. By keeping cx->titleToShare
  519. * set till here, we ensure that such deadlocks are detected, which
  520. * results in the finalized object's title being shared (it must, of
  521. * course, have other, live objects sharing it).
  522. */
  523. cx->titleToShare = NULL;
  524. }
  525. JS_UNLOCK_GC(rt);
  526. return JS_FALSE;
  527. }
  528. /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
  529. JS_FRIEND_API(jsval)
  530. js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
  531. {
  532. jsval v;
  533. JSScope *scope;
  534. JSTitle *title;
  535. #ifndef NSPR_LOCK
  536. JSThinLock *tl;
  537. jsword me;
  538. #endif
  539. /*
  540. * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
  541. * all slots starting from 0 as required slots. A property definition or
  542. * some prior arrangement must have allocated slot.
  543. *
  544. * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
  545. * the crucial distinction between a |required slot number| that's passed
  546. * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
  547. * passed to the JS_Get/SetReservedSlot APIs.
  548. */
  549. if (!OBJ_IS_NATIVE(obj))
  550. return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
  551. /*
  552. * Native object locking is inlined here to optimize the single-threaded
  553. * and contention-free multi-threaded cases.
  554. */
  555. scope = OBJ_SCOPE(obj);
  556. title = &scope->title;
  557. JS_ASSERT(title->ownercx != cx);
  558. JS_ASSERT(slot < obj->map->freeslot);
  559. /*
  560. * Avoid locking if called from the GC. Also avoid locking an object
  561. * owning a sealed scope. If neither of those special cases applies, try
  562. * to claim scope's flyweight lock from whatever context may have had it in
  563. * an earlier request.
  564. */
  565. if (CX_THREAD_IS_RUNNING_GC(cx) ||
  566. (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
  567. (title->ownercx && ClaimTitle(title, cx))) {
  568. return STOBJ_GET_SLOT(obj, slot);
  569. }
  570. #ifndef NSPR_LOCK
  571. tl = &title->lock;
  572. me = CX_THINLOCK_ID(cx);
  573. JS_ASSERT(CURRENT_THREAD_IS_ME(me));
  574. if (NativeCompareAndSwap(&tl->owner, 0, me)) {
  575. /*
  576. * Got the lock with one compare-and-swap. Even so, someone else may
  577. * have mutated obj so it now has its own scope and lock, which would
  578. * require either a restart from the top of this routine, or a thin
  579. * lock release followed by fat lock acquisition.
  580. */
  581. if (scope == OBJ_SCOPE(obj)) {
  582. v = STOBJ_GET_SLOT(obj, slot);
  583. if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
  584. /* Assert that scope locks never revert to flyweight. */
  585. JS_ASSERT(title->ownercx != cx);
  586. LOGIT(scope, '1');
  587. title->u.count = 1;
  588. js_UnlockObj(cx, obj);
  589. }
  590. return v;
  591. }
  592. if (!NativeCompareAndSwap(&tl->owner, me, 0))
  593. js_Dequeue(tl);
  594. }
  595. else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
  596. return STOBJ_GET_SLOT(obj, slot);
  597. }
  598. #endif
  599. js_LockObj(cx, obj);
  600. v = STOBJ_GET_SLOT(obj, slot);
  601. /*
  602. * Test whether cx took ownership of obj's scope during js_LockObj.
  603. *
  604. * This does not mean that a given scope reverted to flyweight from "thin"
  605. * or "fat" -- it does mean that obj's map pointer changed due to another
  606. * thread setting a property, requiring obj to cease sharing a prototype
  607. * object's scope (whose lock was not flyweight, else we wouldn't be here
  608. * in the first place!).
  609. */
  610. title = &OBJ_SCOPE(obj)->title;
  611. if (title->ownercx != cx)
  612. js_UnlockTitle(cx, title);
  613. return v;
  614. }
  615. void
  616. js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
  617. {
  618. JSTitle *title;
  619. JSScope *scope;
  620. #ifndef NSPR_LOCK
  621. JSThinLock *tl;
  622. jsword me;
  623. #endif
  624. /* Any string stored in a thread-safe object must be immutable. */
  625. if (JSVAL_IS_STRING(v) &&
  626. !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
  627. /* FIXME bug 363059: See comments in js_FinishSharingScope. */
  628. v = JSVAL_NULL;
  629. }
  630. /*
  631. * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
  632. * for the Get case.
  633. */
  634. if (!OBJ_IS_NATIVE(obj)) {
  635. OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
  636. return;
  637. }
  638. /*
  639. * Native object locking is inlined here to optimize the single-threaded
  640. * and contention-free multi-threaded cases.
  641. */
  642. scope = OBJ_SCOPE(obj);
  643. title = &scope->title;
  644. JS_ASSERT(title->ownercx != cx);
  645. JS_ASSERT(slot < obj->map->freeslot);
  646. /*
  647. * Avoid locking if called from the GC. Also avoid locking an object
  648. * owning a sealed scope. If neither of those special cases applies, try
  649. * to claim scope's flyweight lock from whatever context may have had it in
  650. * an earlier request.
  651. */
  652. if (CX_THREAD_IS_RUNNING_GC(cx) ||
  653. (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
  654. (title->ownercx && ClaimTitle(title, cx))) {
  655. LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
  656. return;
  657. }
  658. #ifndef NSPR_LOCK
  659. tl = &title->lock;
  660. me = CX_THINLOCK_ID(cx);
  661. JS_ASSERT(CURRENT_THREAD_IS_ME(me));
  662. if (NativeCompareAndSwap(&tl->owner, 0, me)) {
  663. if (scope == OBJ_SCOPE(obj)) {
  664. LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
  665. if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
  666. /* Assert that scope locks never revert to flyweight. */
  667. JS_ASSERT(title->ownercx != cx);
  668. LOGIT(scope, '1');
  669. title->u.count = 1;
  670. js_UnlockObj(cx, obj);
  671. }
  672. return;
  673. }
  674. if (!NativeCompareAndSwap(&tl->owner, me, 0))
  675. js_Dequeue(tl);
  676. }
  677. else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
  678. LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
  679. return;
  680. }
  681. #endif
  682. js_LockObj(cx, obj);
  683. LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
  684. /*
  685. * Same drill as above, in js_GetSlotThreadSafe.
  686. */
  687. title = &OBJ_SCOPE(obj)->title;
  688. if (title->ownercx != cx)
  689. js_UnlockTitle(cx, title);
  690. }
  691. #ifndef NSPR_LOCK
  692. static JSFatLock *
  693. NewFatlock()
  694. {
  695. JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
  696. if (!fl) return NULL;
  697. fl->susp = 0;
  698. fl->next = NULL;
  699. fl->prevp = NULL;
  700. fl->slock = PR_NewLock();
  701. fl->svar = PR_NewCondVar(fl->slock);
  702. return fl;
  703. }
  704. static void
  705. DestroyFatlock(JSFatLock *fl)
  706. {
  707. PR_DestroyLock(fl->slock);
  708. PR_DestroyCondVar(fl->svar);
  709. free(fl);
  710. }
  711. static JSFatLock *
  712. ListOfFatlocks(int listc)
  713. {
  714. JSFatLock *m;
  715. JSFatLock *m0;
  716. int i;
  717. JS_ASSERT(listc>0);
  718. m0 = m = NewFatlock();
  719. for (i=1; i<listc; i++) {
  720. m->next = NewFatlock();
  721. m = m->next;
  722. }
  723. return m0;
  724. }
  725. static void
  726. DeleteListOfFatlocks(JSFatLock *m)
  727. {
  728. JSFatLock *m0;
  729. for (; m; m=m0) {
  730. m0 = m->next;
  731. DestroyFatlock(m);
  732. }
  733. }
  734. static JSFatLockTable *fl_list_table = NULL;
  735. static uint32 fl_list_table_len = 0;
  736. static uint32 fl_list_chunk_len = 0;
  737. static JSFatLock *
  738. GetFatlock(void *id)
  739. {
  740. JSFatLock *m;
  741. uint32 i = GLOBAL_LOCK_INDEX(id);
  742. if (fl_list_table[i].free == NULL) {
  743. #ifdef DEBUG
  744. if (fl_list_table[i].taken)
  745. printf("Ran out of fat locks!\n");
  746. #endif
  747. fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
  748. }
  749. m = fl_list_table[i].free;
  750. fl_list_table[i].free = m->next;
  751. m->susp = 0;
  752. m->next = fl_list_table[i].taken;
  753. m->prevp = &fl_list_table[i].taken;
  754. if (fl_list_table[i].taken)
  755. fl_list_table[i].taken->prevp = &m->next;
  756. fl_list_table[i].taken = m;
  757. return m;
  758. }
  759. static void
  760. PutFatlock(JSFatLock *m, void *id)
  761. {
  762. uint32 i;
  763. if (m == NULL)
  764. return;
  765. /* Unlink m from fl_list_table[i].taken. */
  766. *m->prevp = m->next;
  767. if (m->next)
  768. m->next->prevp = m->prevp;
  769. /* Insert m in fl_list_table[i].free. */
  770. i = GLOBAL_LOCK_INDEX(id);
  771. m->next = fl_list_table[i].free;
  772. fl_list_table[i].free = m;
  773. }
  774. #endif /* !NSPR_LOCK */
  775. JSBool
  776. js_SetupLocks(int listc, int globc)
  777. {
  778. #ifndef NSPR_LOCK
  779. uint32 i;
  780. if (global_locks)
  781. return JS_TRUE;
  782. #ifdef DEBUG
  783. if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
  784. printf("Bad number %d in js_SetupLocks()!\n", listc);
  785. if (globc > 100 || globc < 0) /* globc == number of global locks */
  786. printf("Bad number %d in js_SetupLocks()!\n", listc);
  787. #endif
  788. global_locks_log2 = JS_CeilingLog2(globc);
  789. global_locks_mask = JS_BITMASK(global_locks_log2);
  790. global_lock_count = JS_BIT(global_locks_log2);
  791. global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
  792. if (!global_locks)
  793. return JS_FALSE;
  794. for (i = 0; i < global_lock_count; i++) {
  795. global_locks[i] = PR_NewLock();
  796. if (!global_locks[i]) {
  797. global_lock_count = i;
  798. js_CleanupLocks();
  799. return JS_FALSE;
  800. }
  801. }
  802. fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
  803. if (!fl_list_table) {
  804. js_CleanupLocks();
  805. return JS_FALSE;
  806. }
  807. fl_list_table_len = global_lock_count;
  808. for (i = 0; i < global_lock_count; i++)
  809. fl_list_table[i].free = fl_list_table[i].taken = NULL;
  810. fl_list_chunk_len = listc;
  811. #endif /* !NSPR_LOCK */
  812. return JS_TRUE;
  813. }
  814. void
  815. js_CleanupLocks()
  816. {
  817. #ifndef NSPR_LOCK
  818. uint32 i;
  819. if (global_locks) {
  820. for (i = 0; i < global_lock_count; i++)
  821. PR_DestroyLock(global_locks[i]);
  822. free(global_locks);
  823. global_locks = NULL;
  824. global_lock_count = 1;
  825. global_locks_log2 = 0;
  826. global_locks_mask = 0;
  827. }
  828. if (fl_list_table) {
  829. for (i = 0; i < fl_list_table_len; i++) {
  830. DeleteListOfFatlocks(fl_list_table[i].free);
  831. fl_list_table[i].free = NULL;
  832. DeleteListOfFatlocks(fl_list_table[i].taken);
  833. fl_list_table[i].taken = NULL;
  834. }
  835. free(fl_list_table);
  836. fl_list_table = NULL;
  837. fl_list_table_len = 0;
  838. }
  839. #endif /* !NSPR_LOCK */
  840. }
  841. #ifdef NSPR_LOCK
  842. static JS_ALWAYS_INLINE void
  843. ThinLock(JSThinLock *tl, jsword me)
  844. {
  845. JS_ACQUIRE_LOCK((JSLock *) tl->fat);
  846. tl->owner = me;
  847. }
  848. static JS_ALWAYS_INLINE void
  849. ThinUnlock(JSThinLock *tl, jsword /*me*/)
  850. {
  851. tl->owner = 0;
  852. JS_RELEASE_LOCK((JSLock *) tl->fat);
  853. }
  854. #else
  855. /*
  856. * Fast locking and unlocking is implemented by delaying the allocation of a
  857. * system lock (fat lock) until contention. As long as a locking thread A
  858. * runs uncontended, the lock is represented solely by storing A's identity in
  859. * the object being locked.
  860. *
  861. * If another thread B tries to lock the object currently locked by A, B is
  862. * enqueued into a fat lock structure (which might have to be allocated and
  863. * pointed to by the object), and suspended using NSPR conditional variables
  864. * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
  865. * signalling to A that when releasing the lock, B must be dequeued and
  866. * notified.
  867. *
  868. * The basic operation of the locking primitives (js_Lock, js_Unlock,
  869. * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
  870. * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
  871. * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
  872. * succeeds this implies that p is uncontended (no one is waiting because the
  873. * wait bit is not set).
  874. *
  875. * When dequeueing, the lock is released, and one of the threads suspended on
  876. * the lock is notified. If other threads still are waiting, the wait bit is
  877. * kept (in js_Enqueue), and if not, the fat lock is deallocated.
  878. *
  879. * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
  880. * are serialized using a global lock. For scalability, a hashtable of global
  881. * locks is used, which is indexed modulo the thin lock pointer.
  882. */
  883. /*
  884. * Invariants:
  885. * (i) global lock is held
  886. * (ii) fl->susp >= 0
  887. */
  888. static int
  889. js_SuspendThread(JSThinLock *tl)
  890. {
  891. JSFatLock *fl;
  892. PRStatus stat;
  893. if (tl->fat == NULL)
  894. fl = tl->fat = GetFatlock(tl);
  895. else
  896. fl = tl->fat;
  897. JS_ASSERT(fl->susp >= 0);
  898. fl->susp++;
  899. PR_Lock(fl->slock);
  900. js_UnlockGlobal(tl);
  901. stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
  902. JS_ASSERT(stat != PR_FAILURE);
  903. PR_Unlock(fl->slock);
  904. js_LockGlobal(tl);
  905. fl->susp--;
  906. if (fl->susp == 0) {
  907. PutFatlock(fl, tl);
  908. tl->fat = NULL;
  909. }
  910. return tl->fat == NULL;
  911. }
  912. /*
  913. * (i) global lock is held
  914. * (ii) fl->susp > 0
  915. */
  916. static void
  917. js_ResumeThread(JSThinLock *tl)
  918. {
  919. JSFatLock *fl = tl->fat;
  920. PRStatus stat;
  921. JS_ASSERT(fl != NULL);
  922. JS_ASSERT(fl->susp > 0);
  923. PR_Lock(fl->slock);
  924. js_UnlockGlobal(tl);
  925. stat = PR_NotifyCondVar(fl->svar);
  926. JS_ASSERT(stat != PR_FAILURE);
  927. PR_Unlock(fl->slock);
  928. }
  929. static void
  930. js_Enqueue(JSThinLock *tl, jsword me)
  931. {
  932. jsword o, n;
  933. js_LockGlobal(tl);
  934. for (;;) {
  935. o = ReadWord(tl->owner);
  936. n = Thin_SetWait(o);
  937. if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
  938. if (js_SuspendThread(tl))
  939. me = Thin_RemoveWait(me);
  940. else
  941. me = Thin_SetWait(me);
  942. }
  943. else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
  944. js_UnlockGlobal(tl);
  945. return;
  946. }
  947. }
  948. }
  949. static void
  950. js_Dequeue(JSThinLock *tl)
  951. {
  952. jsword o;
  953. js_LockGlobal(tl);
  954. o = ReadWord(tl->owner);
  955. JS_ASSERT(Thin_GetWait(o) != 0);
  956. JS_ASSERT(tl->fat != NULL);
  957. if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
  958. JS_ASSERT(0);
  959. js_ResumeThread(tl);
  960. }
  961. static JS_ALWAYS_INLINE void
  962. ThinLock(JSThinLock *tl, jsword me)
  963. {
  964. JS_ASSERT(CURRENT_THREAD_IS_ME(me));
  965. if (NativeCompareAndSwap(&tl->owner, 0, me))
  966. return;
  967. if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
  968. js_Enqueue(tl, me);
  969. #ifdef DEBUG
  970. else
  971. JS_ASSERT(0);
  972. #endif
  973. }
  974. static JS_ALWAYS_INLINE void
  975. ThinUnlock(JSThinLock *tl, jsword me)
  976. {
  977. JS_ASSERT(CURRENT_THREAD_IS_ME(me));
  978. /*
  979. * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
  980. * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
  981. */
  982. if (NativeCompareAndSwap(&tl->owner, me, 0))
  983. return;
  984. JS_ASSERT(Thin_GetWait(tl->owner));
  985. if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
  986. js_Dequeue(tl);
  987. #ifdef DEBUG
  988. else
  989. JS_ASSERT(0); /* unbalanced unlock */
  990. #endif
  991. }
  992. #endif /* !NSPR_LOCK */
  993. void
  994. js_Lock(JSContext *cx, JSThinLock *tl)
  995. {
  996. ThinLock(tl, CX_THINLOCK_ID(cx));
  997. }
  998. void
  999. js_Unlock(JSContext *cx, JSThinLock *tl)
  1000. {
  1001. ThinUnlock(tl, CX_THINLOCK_ID(cx));
  1002. }
  1003. void
  1004. js_LockRuntime(JSRuntime *rt)
  1005. {
  1006. PR_Lock(rt->rtLock);
  1007. #ifdef DEBUG
  1008. rt->rtLockOwner = js_CurrentThreadId();
  1009. #endif
  1010. }
  1011. void
  1012. js_UnlockRuntime(JSRuntime *rt)
  1013. {
  1014. #ifdef DEBUG
  1015. rt->rtLockOwner = 0;
  1016. #endif
  1017. PR_Unlock(rt->rtLock);
  1018. }
  1019. void
  1020. js_LockTitle(JSContext *cx, JSTitle *title)
  1021. {
  1022. jsword me = CX_THINLOCK_ID(cx);
  1023. JS_ASSERT(CURRENT_THREAD_IS_ME(me));
  1024. JS_ASSERT(title->ownercx != cx);
  1025. if (CX_THREAD_IS_RUNNING_GC(cx))
  1026. return;
  1027. if (title->ownercx && ClaimTitle(title, cx))
  1028. return;
  1029. if (Thin_RemoveWait(ReadWord(title->lock.owner)) == me) {
  1030. JS_ASSERT(title->u.count > 0);
  1031. LOGIT(scope, '+');
  1032. title->u.count++;
  1033. } else {
  1034. ThinLock(&title->lock, me);
  1035. JS_ASSERT(title->u.count == 0);
  1036. LOGIT(scope, '1');
  1037. title->u.count = 1;
  1038. }
  1039. }
  1040. void
  1041. js_UnlockTitle(JSContext *cx, JSTitle *title)
  1042. {
  1043. jsword me = CX_THINLOCK_ID(cx);
  1044. /* We hope compilers use me instead of reloading cx->thread in the macro. */
  1045. if (CX_THREAD_IS_RUNNING_GC(cx))
  1046. return;
  1047. if (cx->lockedSealedTitle == title) {
  1048. cx->lockedSealedTitle = NULL;
  1049. return;
  1050. }
  1051. /*
  1052. * If title->ownercx is not null, it's likely that two contexts not using
  1053. * requests nested locks for title. The first context, cx here, claimed
  1054. * title; the second, title->ownercx here, re-claimed it because the first
  1055. * was not in a request, or was on the same thread. We don't want to keep
  1056. * track of such nesting, because it penalizes the common non-nested case.
  1057. * Instead of asserting here and silently coping, we simply re-claim title
  1058. * for cx and return.
  1059. *
  1060. * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
  1061. * case where an asymmetric thread model (Mozilla's main thread is known
  1062. * to be the only thread that runs the GC) combined with multiple contexts
  1063. * per thread has led to such request-less nesting.
  1064. */
  1065. if (title->ownercx) {
  1066. JS_ASSERT(title->u.count == 0);
  1067. JS_ASSERT(title->lock.owner == 0);
  1068. title->ownercx = cx;
  1069. return;
  1070. }
  1071. JS_ASSERT(title->u.count > 0);
  1072. if (Thin_RemoveWait(ReadWord(title->lock.owner)) != me) {
  1073. JS_ASSERT(0); /* unbalanced unlock */
  1074. return;
  1075. }
  1076. LOGIT(scope, '-');
  1077. if (--title->u.count == 0)
  1078. ThinUnlock(&title->lock, me);
  1079. }
  1080. /*
  1081. * NB: oldtitle may be null if our caller is js_GetMutableScope and it just
  1082. * dropped the last reference to oldtitle.
  1083. */
  1084. void
  1085. js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
  1086. {
  1087. JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
  1088. /*
  1089. * If the last reference to oldtitle went away, newtitle needs no lock
  1090. * state update.
  1091. */
  1092. if (!oldtitle)
  1093. return;
  1094. JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
  1095. /*
  1096. * Special case in js_LockTitle and js_UnlockTitle for the GC calling
  1097. * code that locks, unlocks, or mutates. Nothing to do in these cases,
  1098. * because title and newtitle were "locked" by the GC thread, so neither
  1099. * was actually locked.
  1100. */
  1101. if (CX_THREAD_IS_RUNNING_GC(cx))
  1102. return;
  1103. /*
  1104. * Special case in js_LockObj and js_UnlockTitle for locking the sealed
  1105. * scope of an object that owns that scope (the prototype or mutated obj
  1106. * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
  1107. */
  1108. JS_ASSERT(cx->lockedSealedTitle != newtitle);
  1109. if (cx->lockedSealedTitle == oldtitle) {
  1110. JS_ASSERT(newtitle->ownercx == cx ||
  1111. (!newtitle->ownercx && newtitle->u.count == 1));
  1112. cx->lockedSealedTitle = NULL;
  1113. return;
  1114. }
  1115. /*
  1116. * If oldtitle is single-threaded, there's nothing to do.
  1117. */
  1118. if (oldtitle->ownercx) {
  1119. JS_ASSERT(oldtitle->ownercx == cx);
  1120. JS_ASSERT(newtitle->ownercx == cx ||
  1121. (!newtitle->ownercx && newtitle->u.count == 1));
  1122. return;
  1123. }
  1124. /*
  1125. * We transfer oldtitle->u.count only if newtitle is not single-threaded.
  1126. * Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
  1127. * JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
  1128. * if they find newtitle->ownercx != cx.
  1129. */
  1130. if (newtitle->ownercx != cx) {
  1131. JS_ASSERT(!newtitle->ownercx);
  1132. newtitle->u.count = oldtitle->u.count;
  1133. }
  1134. /*
  1135. * Reset oldtitle's lock state so that it is completely unlocked.
  1136. */
  1137. LOGIT(oldscope, '0');
  1138. oldtitle->u.count = 0;
  1139. ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
  1140. }
  1141. void
  1142. js_LockObj(JSContext *cx, JSObject *obj)
  1143. {
  1144. JSScope *scope;
  1145. JSTitle *title;
  1146. JS_ASSERT(OBJ_IS_NATIVE(obj));
  1147. /*
  1148. * We must test whether the GC is calling and return without mutating any
  1149. * state, especially cx->lockedSealedScope. Note asymmetry with respect to
  1150. * js_UnlockObj, which is a thin-layer on top of js_UnlockTitle.
  1151. */
  1152. if (CX_THREAD_IS_RUNNING_GC(cx))
  1153. return;
  1154. for (;;) {
  1155. scope = OBJ_SCOPE(obj);
  1156. title = &scope->title;
  1157. if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
  1158. !cx->lockedSealedTitle) {
  1159. cx->lockedSealedTitle = title;
  1160. return;
  1161. }
  1162. js_LockTitle(cx, title);
  1163. /* If obj still has this scope, we're done. */
  1164. if (scope == OBJ_SCOPE(obj))
  1165. return;
  1166. /* Lost a race with a mutator; retry with obj's new scope. */
  1167. js_UnlockTitle(cx, title);
  1168. }
  1169. }
  1170. void
  1171. js_UnlockObj(JSContext *cx, JSObject *obj)
  1172. {
  1173. JS_ASSERT(OBJ_IS_NATIVE(obj));
  1174. js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title);
  1175. }
  1176. void
  1177. js_InitTitle(JSContext *cx, JSTitle *title)
  1178. {
  1179. #ifdef JS_THREADSAFE
  1180. title->ownercx = cx;
  1181. memset(&title->lock, 0, sizeof title->lock);
  1182. /*
  1183. * Set u.link = NULL, not u.count = 0, in case the target architecture's
  1184. * null pointer has a non-zero integer representation.
  1185. */
  1186. title->u.link = NULL;
  1187. #ifdef JS_DEBUG_TITLE_LOCKS
  1188. title->file[0] = title->file[1] = title->file[2] = title->file[3] = NULL;
  1189. title->line[0] = title->line[1] = title->line[2] = title->line[3] = 0;
  1190. #endif
  1191. #endif
  1192. }
  1193. void
  1194. js_FinishTitle(JSContext *cx, JSTitle *title)
  1195. {
  1196. #ifdef JS_THREADSAFE
  1197. /* Title must be single-threaded at this point, so set ownercx. */
  1198. JS_ASSERT(title->u.count == 0);
  1199. title->ownercx = cx;
  1200. js_FinishLock(&title->lock);
  1201. #endif
  1202. }
  1203. #ifdef DEBUG
  1204. JSBool
  1205. js_IsRuntimeLocked(JSRuntime *rt)
  1206. {
  1207. return js_CurrentThreadId() == rt->rtLockOwner;
  1208. }
  1209. JSBool
  1210. js_IsObjLocked(JSContext *cx, JSObject *obj)
  1211. {
  1212. JSScope *scope = OBJ_SCOPE(obj);
  1213. return MAP_IS_NATIVE(&scope->map) && js_IsTitleLocked(cx, &scope->title);
  1214. }
  1215. JSBool
  1216. js_IsTitleLocked(JSContext *cx, JSTitle *title)
  1217. {
  1218. /* Special case: the GC locking any object's title, see js_LockTitle. */
  1219. if (CX_THREAD_IS_RUNNING_GC(cx))
  1220. return JS_TRUE;
  1221. /* Special case: locked object owning a sealed scope, see js_LockObj. */
  1222. if (cx->lockedSealedTitle == title)
  1223. return JS_TRUE;
  1224. /*
  1225. * General case: the title is either exclusively owned (by cx), or it has
  1226. * a thin or fat lock to cope with shared (concurrent) ownership.
  1227. */
  1228. if (title->ownercx) {
  1229. JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
  1230. return JS_TRUE;
  1231. }
  1232. return js_CurrentThreadId() ==
  1233. ((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
  1234. }
  1235. #ifdef JS_DEBUG_TITLE_LOCKS
  1236. void
  1237. js_SetScopeInfo(JSScope *scope, const char *file, int line)
  1238. {
  1239. JSTitle *title = &scope->title;
  1240. if (!title->ownercx) {
  1241. jsrefcount count = title->u.count;
  1242. JS_ASSERT_IF(!SCOPE_IS_SEALED(scope), count > 0);
  1243. JS_ASSERT(count <= 4);
  1244. title->file[count - 1] = file;
  1245. title->line[count - 1] = line;
  1246. }
  1247. }
  1248. #endif /* JS_DEBUG_TITLE_LOCKS */
  1249. #endif /* DEBUG */
  1250. #endif /* JS_THREADSAFE */