PageRenderTime 26ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/mono/metadata/monitor.c

https://bitbucket.org/danipen/mono
C | 1554 lines | 971 code | 199 blank | 384 comment | 205 complexity | ffce6f6a0e4c8f2fa32f5de12ae3ef6d MD5 | raw file
Possible License(s): Unlicense, Apache-2.0, LGPL-2.0, MPL-2.0-no-copyleft-exception, CC-BY-SA-3.0, GPL-2.0
  1. /*
  2. * monitor.c: Monitor locking functions
  3. *
  4. * Author:
  5. * Dick Porter (dick@ximian.com)
  6. *
  7. * Copyright 2003 Ximian, Inc (http://www.ximian.com)
  8. * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
  9. */
  10. #include <config.h>
  11. #include <glib.h>
  12. #include <string.h>
  13. #include <mono/metadata/monitor.h>
  14. #include <mono/metadata/threads-types.h>
  15. #include <mono/metadata/exception.h>
  16. #include <mono/metadata/threads.h>
  17. #include <mono/io-layer/io-layer.h>
  18. #include <mono/metadata/object-internals.h>
  19. #include <mono/metadata/class-internals.h>
  20. #include <mono/metadata/gc-internal.h>
  21. #include <mono/metadata/method-builder.h>
  22. #include <mono/metadata/debug-helpers.h>
  23. #include <mono/metadata/tabledefs.h>
  24. #include <mono/metadata/marshal.h>
  25. #include <mono/metadata/profiler-private.h>
  26. #include <mono/utils/mono-time.h>
  27. /*
  28. * Pull the list of opcodes
  29. */
  30. #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
  31. a = i,
  32. enum {
  33. #include "mono/cil/opcode.def"
  34. LAST = 0xff
  35. };
  36. #undef OPDEF
  37. /*#define LOCK_DEBUG(a) do { a; } while (0)*/
  38. #define LOCK_DEBUG(a)
  39. /*
  40. * The monitor implementation here is based on
  41. * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
  42. * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
  43. *
  44. * The Dice paper describes a technique for saving lock record space
  45. * by returning records to a free list when they become unused. That
  46. * sounds like unnecessary complexity to me, though if it becomes
  47. * clear that unused lock records are taking up lots of space or we
  48. * need to shave more time off by avoiding a malloc then we can always
  49. * implement the free list idea later. The timeout parameter to
  50. * try_enter voids some of the assumptions about the reference count
  51. * field in Dice's implementation too. In his version, the thread
  52. * attempting to lock a contended object will block until it succeeds,
  53. * so the reference count will never be decremented while an object is
  54. * locked.
  55. *
  56. * Bacon's thin locks have a fast path that doesn't need a lock record
  57. * for the common case of locking an unlocked or shallow-nested
  58. * object, but the technique relies on encoding the thread ID in 15
  59. * bits (to avoid too much per-object space overhead.) Unfortunately
  60. * I don't think it's possible to reliably encode a pthread_t into 15
  61. * bits. (The JVM implementation used seems to have a 15-bit
  62. * per-thread identifier available.)
  63. *
  64. * This implementation then combines Dice's basic lock model with
  65. * Bacon's simplification of keeping a lock record for the lifetime of
  66. * an object.
  67. */
  68. struct _MonoThreadsSync
  69. {
  70. gsize owner; /* thread ID */
  71. guint32 nest;
  72. #ifdef HAVE_MOVING_COLLECTOR
  73. gint32 hash_code;
  74. #endif
  75. volatile gint32 entry_count;
  76. HANDLE entry_sem;
  77. GSList *wait_list;
  78. void *data;
  79. };
  80. typedef struct _MonitorArray MonitorArray;
  81. struct _MonitorArray {
  82. MonitorArray *next;
  83. int num_monitors;
  84. MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
  85. };
  86. #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
  87. #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
  88. static CRITICAL_SECTION monitor_mutex;
  89. static MonoThreadsSync *monitor_freelist;
  90. static MonitorArray *monitor_allocated;
  91. static int array_size = 16;
  92. #ifdef HAVE_KW_THREAD
  93. static __thread gsize tls_pthread_self MONO_TLS_FAST;
  94. #endif
  95. #ifndef HOST_WIN32
  96. #ifdef HAVE_KW_THREAD
  97. #define GetCurrentThreadId() tls_pthread_self
  98. #else
  99. /*
  100. * The usual problem: we can't replace GetCurrentThreadId () with a macro because
  101. * it is in a public header.
  102. */
  103. #define GetCurrentThreadId() ((gsize)pthread_self ())
  104. #endif
  105. #endif
  106. void
  107. mono_monitor_init (void)
  108. {
  109. InitializeCriticalSection (&monitor_mutex);
  110. }
  111. void
  112. mono_monitor_cleanup (void)
  113. {
  114. MonoThreadsSync *mon;
  115. /* MonitorArray *marray, *next = NULL; */
  116. /*DeleteCriticalSection (&monitor_mutex);*/
  117. /* The monitors on the freelist don't have weak links - mark them */
  118. for (mon = monitor_freelist; mon; mon = mon->data)
  119. mon->wait_list = (gpointer)-1;
  120. /* FIXME: This still crashes with sgen (async_read.exe) */
  121. /*
  122. for (marray = monitor_allocated; marray; marray = next) {
  123. int i;
  124. for (i = 0; i < marray->num_monitors; ++i) {
  125. mon = &marray->monitors [i];
  126. if (mon->wait_list != (gpointer)-1)
  127. mono_gc_weak_link_remove (&mon->data);
  128. }
  129. next = marray->next;
  130. g_free (marray);
  131. }
  132. */
  133. }
  134. /*
  135. * mono_monitor_init_tls:
  136. *
  137. * Setup TLS variables used by the monitor code for the current thread.
  138. */
  139. void
  140. mono_monitor_init_tls (void)
  141. {
  142. #if !defined(HOST_WIN32) && defined(HAVE_KW_THREAD)
  143. tls_pthread_self = pthread_self ();
  144. #endif
  145. }
  146. static int
  147. monitor_is_on_freelist (MonoThreadsSync *mon)
  148. {
  149. MonitorArray *marray;
  150. for (marray = monitor_allocated; marray; marray = marray->next) {
  151. if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
  152. return TRUE;
  153. }
  154. return FALSE;
  155. }
  156. /**
  157. * mono_locks_dump:
  158. * @include_untaken:
  159. *
  160. * Print a report on stdout of the managed locks currently held by
  161. * threads. If @include_untaken is specified, list also inflated locks
  162. * which are unheld.
  163. * This is supposed to be used in debuggers like gdb.
  164. */
  165. void
  166. mono_locks_dump (gboolean include_untaken)
  167. {
  168. int i;
  169. int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
  170. MonoThreadsSync *mon;
  171. MonitorArray *marray;
  172. for (mon = monitor_freelist; mon; mon = mon->data)
  173. on_freelist++;
  174. for (marray = monitor_allocated; marray; marray = marray->next) {
  175. total += marray->num_monitors;
  176. num_arrays++;
  177. for (i = 0; i < marray->num_monitors; ++i) {
  178. mon = &marray->monitors [i];
  179. if (mon->data == NULL) {
  180. if (i < marray->num_monitors - 1)
  181. to_recycle++;
  182. } else {
  183. if (!monitor_is_on_freelist (mon->data)) {
  184. MonoObject *holder = mono_gc_weak_link_get (&mon->data);
  185. if (mon->owner) {
  186. g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
  187. mon, holder, (void*)mon->owner, mon->nest);
  188. if (mon->entry_sem)
  189. g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
  190. } else if (include_untaken) {
  191. g_print ("Lock %p in object %p untaken\n", mon, holder);
  192. }
  193. used++;
  194. }
  195. }
  196. }
  197. }
  198. g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
  199. num_arrays, total, used, on_freelist, to_recycle);
  200. }
  201. /* LOCKING: this is called with monitor_mutex held */
  202. static void
  203. mon_finalize (MonoThreadsSync *mon)
  204. {
  205. LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
  206. if (mon->entry_sem != NULL) {
  207. CloseHandle (mon->entry_sem);
  208. mon->entry_sem = NULL;
  209. }
  210. /* If this isn't empty then something is seriously broken - it
  211. * means a thread is still waiting on the object that owned
  212. * this lock, but the object has been finalized.
  213. */
  214. g_assert (mon->wait_list == NULL);
  215. mon->entry_count = 0;
  216. /* owner and nest are set in mon_new, no need to zero them out */
  217. mon->data = monitor_freelist;
  218. monitor_freelist = mon;
  219. #ifndef DISABLE_PERFCOUNTERS
  220. mono_perfcounters->gc_sync_blocks--;
  221. #endif
  222. }
  223. /* LOCKING: this is called with monitor_mutex held */
  224. static MonoThreadsSync *
  225. mon_new (gsize id)
  226. {
  227. MonoThreadsSync *new;
  228. if (!monitor_freelist) {
  229. MonitorArray *marray;
  230. int i;
  231. /* see if any sync block has been collected */
  232. new = NULL;
  233. for (marray = monitor_allocated; marray; marray = marray->next) {
  234. for (i = 0; i < marray->num_monitors; ++i) {
  235. if (marray->monitors [i].data == NULL) {
  236. new = &marray->monitors [i];
  237. if (new->wait_list) {
  238. /* Orphaned events left by aborted threads */
  239. while (new->wait_list) {
  240. LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", GetCurrentThreadId (), new->wait_list->data));
  241. CloseHandle (new->wait_list->data);
  242. new->wait_list = g_slist_remove (new->wait_list, new->wait_list->data);
  243. }
  244. }
  245. mono_gc_weak_link_remove (&new->data, FALSE);
  246. new->data = monitor_freelist;
  247. monitor_freelist = new;
  248. }
  249. }
  250. /* small perf tweak to avoid scanning all the blocks */
  251. if (new)
  252. break;
  253. }
  254. /* need to allocate a new array of monitors */
  255. if (!monitor_freelist) {
  256. MonitorArray *last;
  257. LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
  258. marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
  259. marray->num_monitors = array_size;
  260. array_size *= 2;
  261. /* link into the freelist */
  262. for (i = 0; i < marray->num_monitors - 1; ++i) {
  263. marray->monitors [i].data = &marray->monitors [i + 1];
  264. }
  265. marray->monitors [i].data = NULL; /* the last one */
  266. monitor_freelist = &marray->monitors [0];
  267. /* we happend the marray instead of prepending so that
  268. * the collecting loop above will need to scan smaller arrays first
  269. */
  270. if (!monitor_allocated) {
  271. monitor_allocated = marray;
  272. } else {
  273. last = monitor_allocated;
  274. while (last->next)
  275. last = last->next;
  276. last->next = marray;
  277. }
  278. }
  279. }
  280. new = monitor_freelist;
  281. monitor_freelist = new->data;
  282. new->owner = id;
  283. new->nest = 1;
  284. new->data = NULL;
  285. #ifndef DISABLE_PERFCOUNTERS
  286. mono_perfcounters->gc_sync_blocks++;
  287. #endif
  288. return new;
  289. }
  290. /*
  291. * Format of the lock word:
  292. * thinhash | fathash | data
  293. *
  294. * thinhash is the lower bit: if set data is the shifted hashcode of the object.
  295. * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
  296. * struct pointed to by data
  297. * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
  298. */
  299. typedef union {
  300. gsize lock_word;
  301. MonoThreadsSync *sync;
  302. } LockWord;
  303. enum {
  304. LOCK_WORD_THIN_HASH = 1,
  305. LOCK_WORD_FAT_HASH = 1 << 1,
  306. LOCK_WORD_BITS_MASK = 0x3,
  307. LOCK_WORD_HASH_SHIFT = 2
  308. };
  309. #define MONO_OBJECT_ALIGNMENT_SHIFT 3
  310. /*
  311. * mono_object_hash:
  312. * @obj: an object
  313. *
  314. * Calculate a hash code for @obj that is constant while @obj is alive.
  315. */
  316. int
  317. mono_object_hash (MonoObject* obj)
  318. {
  319. #ifdef HAVE_MOVING_COLLECTOR
  320. LockWord lw;
  321. unsigned int hash;
  322. if (!obj)
  323. return 0;
  324. lw.sync = obj->synchronisation;
  325. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  326. /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
  327. return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
  328. }
  329. if (lw.lock_word & LOCK_WORD_FAT_HASH) {
  330. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  331. /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
  332. return lw.sync->hash_code;
  333. }
  334. /*
  335. * while we are inside this function, the GC will keep this object pinned,
  336. * since we are in the unmanaged stack. Thanks to this and to the hash
  337. * function that depends only on the address, we can ignore the races if
  338. * another thread computes the hash at the same time, because it'll end up
  339. * with the same value.
  340. */
  341. hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
  342. /* clear the top bits as they can be discarded */
  343. hash &= ~(LOCK_WORD_BITS_MASK << 30);
  344. /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
  345. if (lw.sync) {
  346. lw.sync->hash_code = hash;
  347. /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
  348. lw.lock_word |= LOCK_WORD_FAT_HASH;
  349. /* this is safe since we don't deflate locks */
  350. obj->synchronisation = lw.sync;
  351. } else {
  352. /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
  353. lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
  354. if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
  355. return hash;
  356. /*g_print ("failed store\n");*/
  357. /* someone set the hash flag or someone inflated the object */
  358. lw.sync = obj->synchronisation;
  359. if (lw.lock_word & LOCK_WORD_THIN_HASH)
  360. return hash;
  361. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  362. lw.sync->hash_code = hash;
  363. lw.lock_word |= LOCK_WORD_FAT_HASH;
  364. /* this is safe since we don't deflate locks */
  365. obj->synchronisation = lw.sync;
  366. }
  367. return hash;
  368. #else
  369. /*
  370. * Wang's address-based hash function:
  371. * http://www.concentric.net/~Ttwang/tech/addrhash.htm
  372. */
  373. return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
  374. #endif
  375. }
  376. /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
  377. * is requested. In this case it returns -1.
  378. */
  379. static inline gint32
  380. mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
  381. {
  382. MonoThreadsSync *mon;
  383. gsize id = GetCurrentThreadId ();
  384. HANDLE sem;
  385. guint32 then = 0, now, delta;
  386. guint32 waitms;
  387. guint32 ret;
  388. MonoInternalThread *thread;
  389. LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
  390. if (G_UNLIKELY (!obj)) {
  391. mono_raise_exception (mono_get_exception_argument_null ("obj"));
  392. return FALSE;
  393. }
  394. retry:
  395. mon = obj->synchronisation;
  396. /* If the object has never been locked... */
  397. if (G_UNLIKELY (mon == NULL)) {
  398. mono_monitor_allocator_lock ();
  399. mon = mon_new (id);
  400. if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
  401. mono_gc_weak_link_add (&mon->data, obj, FALSE);
  402. mono_monitor_allocator_unlock ();
  403. /* Successfully locked */
  404. return 1;
  405. } else {
  406. #ifdef HAVE_MOVING_COLLECTOR
  407. LockWord lw;
  408. lw.sync = obj->synchronisation;
  409. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  410. MonoThreadsSync *oldlw = lw.sync;
  411. /* move the already calculated hash */
  412. mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
  413. lw.sync = mon;
  414. lw.lock_word |= LOCK_WORD_FAT_HASH;
  415. if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
  416. mono_gc_weak_link_add (&mon->data, obj, FALSE);
  417. mono_monitor_allocator_unlock ();
  418. /* Successfully locked */
  419. return 1;
  420. } else {
  421. mon_finalize (mon);
  422. mono_monitor_allocator_unlock ();
  423. goto retry;
  424. }
  425. } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
  426. mon_finalize (mon);
  427. mono_monitor_allocator_unlock ();
  428. /* get the old lock without the fat hash bit */
  429. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  430. mon = lw.sync;
  431. } else {
  432. mon_finalize (mon);
  433. mono_monitor_allocator_unlock ();
  434. mon = obj->synchronisation;
  435. }
  436. #else
  437. mon_finalize (mon);
  438. mono_monitor_allocator_unlock ();
  439. mon = obj->synchronisation;
  440. #endif
  441. }
  442. } else {
  443. #ifdef HAVE_MOVING_COLLECTOR
  444. LockWord lw;
  445. lw.sync = mon;
  446. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  447. MonoThreadsSync *oldlw = lw.sync;
  448. mono_monitor_allocator_lock ();
  449. mon = mon_new (id);
  450. /* move the already calculated hash */
  451. mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
  452. lw.sync = mon;
  453. lw.lock_word |= LOCK_WORD_FAT_HASH;
  454. if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
  455. mono_gc_weak_link_add (&mon->data, obj, TRUE);
  456. mono_monitor_allocator_unlock ();
  457. /* Successfully locked */
  458. return 1;
  459. } else {
  460. mon_finalize (mon);
  461. mono_monitor_allocator_unlock ();
  462. goto retry;
  463. }
  464. }
  465. #endif
  466. }
  467. #ifdef HAVE_MOVING_COLLECTOR
  468. {
  469. LockWord lw;
  470. lw.sync = mon;
  471. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  472. mon = lw.sync;
  473. }
  474. #endif
  475. /* If the object has previously been locked but isn't now... */
  476. /* This case differs from Dice's case 3 because we don't
  477. * deflate locks or cache unused lock records
  478. */
  479. if (G_LIKELY (mon->owner == 0)) {
  480. /* Try to install our ID in the owner field, nest
  481. * should have been left at 1 by the previous unlock
  482. * operation
  483. */
  484. if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
  485. /* Success */
  486. g_assert (mon->nest == 1);
  487. return 1;
  488. } else {
  489. /* Trumped again! */
  490. goto retry;
  491. }
  492. }
  493. /* If the object is currently locked by this thread... */
  494. if (mon->owner == id) {
  495. mon->nest++;
  496. return 1;
  497. }
  498. /* The object must be locked by someone else... */
  499. #ifndef DISABLE_PERFCOUNTERS
  500. mono_perfcounters->thread_contentions++;
  501. #endif
  502. /* If ms is 0 we don't block, but just fail straight away */
  503. if (ms == 0) {
  504. LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
  505. return 0;
  506. }
  507. mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
  508. /* The slow path begins here. */
  509. retry_contended:
  510. /* a small amount of duplicated code, but it allows us to insert the profiler
  511. * callbacks without impacting the fast path: from here on we don't need to go back to the
  512. * retry label, but to retry_contended. At this point mon is already installed in the object
  513. * header.
  514. */
  515. /* This case differs from Dice's case 3 because we don't
  516. * deflate locks or cache unused lock records
  517. */
  518. if (G_LIKELY (mon->owner == 0)) {
  519. /* Try to install our ID in the owner field, nest
  520. * should have been left at 1 by the previous unlock
  521. * operation
  522. */
  523. if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
  524. /* Success */
  525. g_assert (mon->nest == 1);
  526. mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
  527. return 1;
  528. }
  529. }
  530. /* If the object is currently locked by this thread... */
  531. if (mon->owner == id) {
  532. mon->nest++;
  533. mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
  534. return 1;
  535. }
  536. /* We need to make sure there's a semaphore handle (creating it if
  537. * necessary), and block on it
  538. */
  539. if (mon->entry_sem == NULL) {
  540. /* Create the semaphore */
  541. sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
  542. g_assert (sem != NULL);
  543. if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
  544. /* Someone else just put a handle here */
  545. CloseHandle (sem);
  546. }
  547. }
  548. /* If we need to time out, record a timestamp and adjust ms,
  549. * because WaitForSingleObject doesn't tell us how long it
  550. * waited for.
  551. *
  552. * Don't block forever here, because theres a chance the owner
  553. * thread released the lock while we were creating the
  554. * semaphore: we would not get the wakeup. Using the event
  555. * handle technique from pulse/wait would involve locking the
  556. * lock struct and therefore slowing down the fast path.
  557. */
  558. if (ms != INFINITE) {
  559. then = mono_msec_ticks ();
  560. if (ms < 100) {
  561. waitms = ms;
  562. } else {
  563. waitms = 100;
  564. }
  565. } else {
  566. waitms = 100;
  567. }
  568. InterlockedIncrement (&mon->entry_count);
  569. #ifndef DISABLE_PERFCOUNTERS
  570. mono_perfcounters->thread_queue_len++;
  571. mono_perfcounters->thread_queue_max++;
  572. #endif
  573. thread = mono_thread_internal_current ();
  574. mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
  575. /*
  576. * We pass TRUE instead of allow_interruption since we have to check for the
  577. * StopRequested case below.
  578. */
  579. ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
  580. mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
  581. InterlockedDecrement (&mon->entry_count);
  582. #ifndef DISABLE_PERFCOUNTERS
  583. mono_perfcounters->thread_queue_len--;
  584. #endif
  585. if (ms != INFINITE) {
  586. now = mono_msec_ticks ();
  587. if (now < then) {
  588. /* The counter must have wrapped around */
  589. LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
  590. now += (0xffffffff - then);
  591. then = 0;
  592. LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
  593. }
  594. delta = now - then;
  595. if (delta >= ms) {
  596. ms = 0;
  597. } else {
  598. ms -= delta;
  599. }
  600. if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
  601. /* More time left */
  602. goto retry_contended;
  603. }
  604. } else {
  605. if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
  606. if (ret == WAIT_IO_COMPLETION && (mono_thread_test_state (mono_thread_internal_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested)))) {
  607. /*
  608. * We have to obey a stop/suspend request even if
  609. * allow_interruption is FALSE to avoid hangs at shutdown.
  610. */
  611. mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
  612. return -1;
  613. }
  614. /* Infinite wait, so just try again */
  615. goto retry_contended;
  616. }
  617. }
  618. if (ret == WAIT_OBJECT_0) {
  619. /* retry from the top */
  620. goto retry_contended;
  621. }
  622. /* We must have timed out */
  623. LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
  624. mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
  625. if (ret == WAIT_IO_COMPLETION)
  626. return -1;
  627. else
  628. return 0;
  629. }
  630. gboolean
  631. mono_monitor_enter (MonoObject *obj)
  632. {
  633. return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
  634. }
  635. gboolean
  636. mono_monitor_try_enter (MonoObject *obj, guint32 ms)
  637. {
  638. return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
  639. }
  640. void
  641. mono_monitor_exit (MonoObject *obj)
  642. {
  643. MonoThreadsSync *mon;
  644. guint32 nest;
  645. LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, GetCurrentThreadId (), obj));
  646. if (G_UNLIKELY (!obj)) {
  647. mono_raise_exception (mono_get_exception_argument_null ("obj"));
  648. return;
  649. }
  650. mon = obj->synchronisation;
  651. #ifdef HAVE_MOVING_COLLECTOR
  652. {
  653. LockWord lw;
  654. lw.sync = mon;
  655. if (lw.lock_word & LOCK_WORD_THIN_HASH)
  656. return;
  657. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  658. mon = lw.sync;
  659. }
  660. #endif
  661. if (G_UNLIKELY (mon == NULL)) {
  662. /* No one ever used Enter. Just ignore the Exit request as MS does */
  663. return;
  664. }
  665. if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
  666. return;
  667. }
  668. nest = mon->nest - 1;
  669. if (nest == 0) {
  670. LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, GetCurrentThreadId (), obj));
  671. /* object is now unlocked, leave nest==1 so we don't
  672. * need to set it when the lock is reacquired
  673. */
  674. mon->owner = 0;
  675. /* Do the wakeup stuff. It's possible that the last
  676. * blocking thread gave up waiting just before we
  677. * release the semaphore resulting in a futile wakeup
  678. * next time there's contention for this object, but
  679. * it means we don't have to waste time locking the
  680. * struct.
  681. */
  682. if (mon->entry_count > 0) {
  683. ReleaseSemaphore (mon->entry_sem, 1, NULL);
  684. }
  685. } else {
  686. LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, GetCurrentThreadId (), obj, nest));
  687. mon->nest = nest;
  688. }
  689. }
  690. void**
  691. mono_monitor_get_object_monitor_weak_link (MonoObject *object)
  692. {
  693. LockWord lw;
  694. MonoThreadsSync *sync = NULL;
  695. lw.sync = object->synchronisation;
  696. if (lw.lock_word & LOCK_WORD_FAT_HASH) {
  697. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  698. sync = lw.sync;
  699. } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
  700. sync = lw.sync;
  701. }
  702. if (sync && sync->data)
  703. return &sync->data;
  704. return NULL;
  705. }
  706. #ifndef DISABLE_JIT
  707. static void
  708. emit_obj_syncp_check (MonoMethodBuilder *mb, int syncp_loc, int *obj_null_branch, int *true_locktaken_branch, int *syncp_true_false_branch,
  709. int *thin_hash_branch, gboolean branch_on_true)
  710. {
  711. /*
  712. ldarg 0 obj
  713. brfalse.s obj_null
  714. */
  715. mono_mb_emit_byte (mb, CEE_LDARG_0);
  716. *obj_null_branch = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
  717. /*
  718. ldarg.1
  719. ldind.i1
  720. brtrue.s true_locktaken
  721. */
  722. if (true_locktaken_branch) {
  723. mono_mb_emit_byte (mb, CEE_LDARG_1);
  724. mono_mb_emit_byte (mb, CEE_LDIND_I1);
  725. *true_locktaken_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
  726. }
  727. /*
  728. ldarg 0 obj
  729. conv.i objp
  730. ldc.i4 G_STRUCT_OFFSET(MonoObject, synchronisation) objp off
  731. add &syncp
  732. ldind.i syncp
  733. stloc syncp
  734. ldloc syncp syncp
  735. brtrue/false.s syncp_true_false
  736. */
  737. mono_mb_emit_byte (mb, CEE_LDARG_0);
  738. mono_mb_emit_byte (mb, CEE_CONV_I);
  739. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoObject, synchronisation));
  740. mono_mb_emit_byte (mb, CEE_ADD);
  741. mono_mb_emit_byte (mb, CEE_LDIND_I);
  742. mono_mb_emit_stloc (mb, syncp_loc);
  743. if (mono_gc_is_moving ()) {
  744. /*check for a thin hash*/
  745. mono_mb_emit_ldloc (mb, syncp_loc);
  746. mono_mb_emit_icon (mb, 0x01);
  747. mono_mb_emit_byte (mb, CEE_CONV_I);
  748. mono_mb_emit_byte (mb, CEE_AND);
  749. *thin_hash_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
  750. /*clear gc bits*/
  751. mono_mb_emit_ldloc (mb, syncp_loc);
  752. mono_mb_emit_icon (mb, ~0x3);
  753. mono_mb_emit_byte (mb, CEE_CONV_I);
  754. mono_mb_emit_byte (mb, CEE_AND);
  755. mono_mb_emit_stloc (mb, syncp_loc);
  756. } else {
  757. *thin_hash_branch = 0;
  758. }
  759. mono_mb_emit_ldloc (mb, syncp_loc);
  760. *syncp_true_false_branch = mono_mb_emit_short_branch (mb, branch_on_true ? CEE_BRTRUE_S : CEE_BRFALSE_S);
  761. }
  762. #endif
  763. static MonoMethod* monitor_il_fastpaths[3];
  764. gboolean
  765. mono_monitor_is_il_fastpath_wrapper (MonoMethod *method)
  766. {
  767. int i;
  768. for (i = 0; i < 3; ++i) {
  769. if (monitor_il_fastpaths [i] == method)
  770. return TRUE;
  771. }
  772. return FALSE;
  773. }
  774. enum {
  775. FASTPATH_ENTER,
  776. FASTPATH_ENTERV4,
  777. FASTPATH_EXIT
  778. };
  779. static MonoMethod*
  780. register_fastpath (MonoMethod *method, int idx)
  781. {
  782. mono_memory_barrier ();
  783. monitor_il_fastpaths [idx] = method;
  784. return method;
  785. }
  786. static MonoMethod*
  787. mono_monitor_get_fast_enter_method (MonoMethod *monitor_enter_method)
  788. {
  789. MonoMethodBuilder *mb;
  790. MonoMethod *res;
  791. static MonoMethod *compare_exchange_method;
  792. int obj_null_branch, true_locktaken_branch = 0, syncp_null_branch, has_owner_branch, other_owner_branch, tid_branch, thin_hash_branch;
  793. int tid_loc, syncp_loc, owner_loc;
  794. int thread_tls_offset;
  795. gboolean is_v4 = mono_method_signature (monitor_enter_method)->param_count == 2;
  796. int fast_path_idx = is_v4 ? FASTPATH_ENTERV4 : FASTPATH_ENTER;
  797. WrapperInfo *info;
  798. /* The !is_v4 version is not used/tested */
  799. g_assert (is_v4);
  800. thread_tls_offset = mono_thread_get_tls_offset ();
  801. if (thread_tls_offset == -1)
  802. return NULL;
  803. if (monitor_il_fastpaths [fast_path_idx])
  804. return monitor_il_fastpaths [fast_path_idx];
  805. if (!compare_exchange_method) {
  806. MonoMethodDesc *desc;
  807. MonoClass *class;
  808. desc = mono_method_desc_new ("Interlocked:CompareExchange(intptr&,intptr,intptr)", FALSE);
  809. class = mono_class_from_name (mono_defaults.corlib, "System.Threading", "Interlocked");
  810. compare_exchange_method = mono_method_desc_search_in_class (desc, class);
  811. mono_method_desc_free (desc);
  812. if (!compare_exchange_method)
  813. return NULL;
  814. }
  815. mb = mono_mb_new (mono_defaults.monitor_class, is_v4 ? "FastMonitorEnterV4" : "FastMonitorEnter", MONO_WRAPPER_UNKNOWN);
  816. mb->method->slot = -1;
  817. mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
  818. METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
  819. #ifndef DISABLE_JIT
  820. tid_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
  821. syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
  822. owner_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
  823. emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, is_v4 ? &true_locktaken_branch : NULL, &syncp_null_branch, &thin_hash_branch, FALSE);
  824. /*
  825. mono. tls thread_tls_offset threadp
  826. ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) threadp off
  827. add &tid
  828. ldind.i tid
  829. stloc tid
  830. ldloc syncp syncp
  831. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
  832. add &owner
  833. ldind.i owner
  834. stloc owner
  835. ldloc owner owner
  836. brtrue.s tid
  837. */
  838. mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
  839. mono_mb_emit_byte (mb, CEE_MONO_TLS);
  840. mono_mb_emit_i4 (mb, thread_tls_offset);
  841. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
  842. mono_mb_emit_byte (mb, CEE_ADD);
  843. mono_mb_emit_byte (mb, CEE_LDIND_I);
  844. mono_mb_emit_stloc (mb, tid_loc);
  845. mono_mb_emit_ldloc (mb, syncp_loc);
  846. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
  847. mono_mb_emit_byte (mb, CEE_ADD);
  848. mono_mb_emit_byte (mb, CEE_LDIND_I);
  849. mono_mb_emit_stloc (mb, owner_loc);
  850. mono_mb_emit_ldloc (mb, owner_loc);
  851. tid_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
  852. /*
  853. ldloc syncp syncp
  854. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
  855. add &owner
  856. ldloc tid &owner tid
  857. ldc.i4 0 &owner tid 0
  858. call System.Threading.Interlocked.CompareExchange oldowner
  859. brtrue.s has_owner
  860. ret
  861. */
  862. mono_mb_emit_ldloc (mb, syncp_loc);
  863. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
  864. mono_mb_emit_byte (mb, CEE_ADD);
  865. mono_mb_emit_ldloc (mb, tid_loc);
  866. mono_mb_emit_byte (mb, CEE_LDC_I4_0);
  867. mono_mb_emit_managed_call (mb, compare_exchange_method, NULL);
  868. has_owner_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
  869. if (is_v4) {
  870. mono_mb_emit_byte (mb, CEE_LDARG_1);
  871. mono_mb_emit_byte (mb, CEE_LDC_I4_1);
  872. mono_mb_emit_byte (mb, CEE_STIND_I1);
  873. }
  874. mono_mb_emit_byte (mb, CEE_RET);
  875. /*
  876. tid:
  877. ldloc owner owner
  878. ldloc tid owner tid
  879. brne.s other_owner
  880. ldloc syncp syncp
  881. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
  882. add &nest
  883. dup &nest &nest
  884. ldind.i4 &nest nest
  885. ldc.i4 1 &nest nest 1
  886. add &nest nest+
  887. stind.i4
  888. ret
  889. */
  890. mono_mb_patch_short_branch (mb, tid_branch);
  891. mono_mb_emit_ldloc (mb, owner_loc);
  892. mono_mb_emit_ldloc (mb, tid_loc);
  893. other_owner_branch = mono_mb_emit_short_branch (mb, CEE_BNE_UN_S);
  894. mono_mb_emit_ldloc (mb, syncp_loc);
  895. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
  896. mono_mb_emit_byte (mb, CEE_ADD);
  897. mono_mb_emit_byte (mb, CEE_DUP);
  898. mono_mb_emit_byte (mb, CEE_LDIND_I4);
  899. mono_mb_emit_byte (mb, CEE_LDC_I4_1);
  900. mono_mb_emit_byte (mb, CEE_ADD);
  901. mono_mb_emit_byte (mb, CEE_STIND_I4);
  902. if (is_v4) {
  903. mono_mb_emit_byte (mb, CEE_LDARG_1);
  904. mono_mb_emit_byte (mb, CEE_LDC_I4_1);
  905. mono_mb_emit_byte (mb, CEE_STIND_I1);
  906. }
  907. mono_mb_emit_byte (mb, CEE_RET);
  908. /*
  909. obj_null, syncp_null, has_owner, other_owner:
  910. ldarg 0 obj
  911. call System.Threading.Monitor.Enter
  912. ret
  913. */
  914. if (thin_hash_branch)
  915. mono_mb_patch_short_branch (mb, thin_hash_branch);
  916. mono_mb_patch_short_branch (mb, obj_null_branch);
  917. mono_mb_patch_short_branch (mb, syncp_null_branch);
  918. mono_mb_patch_short_branch (mb, has_owner_branch);
  919. mono_mb_patch_short_branch (mb, other_owner_branch);
  920. if (true_locktaken_branch)
  921. mono_mb_patch_short_branch (mb, true_locktaken_branch);
  922. mono_mb_emit_byte (mb, CEE_LDARG_0);
  923. if (is_v4)
  924. mono_mb_emit_byte (mb, CEE_LDARG_1);
  925. mono_mb_emit_managed_call (mb, monitor_enter_method, NULL);
  926. mono_mb_emit_byte (mb, CEE_RET);
  927. #endif
  928. res = register_fastpath (mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_enter_method), 5), fast_path_idx);
  929. info = mono_image_alloc0 (mono_defaults.corlib, sizeof (WrapperInfo));
  930. info->subtype = is_v4 ? WRAPPER_SUBTYPE_FAST_MONITOR_ENTER_V4 : WRAPPER_SUBTYPE_FAST_MONITOR_ENTER;
  931. mono_marshal_set_wrapper_info (res, info);
  932. mono_mb_free (mb);
  933. return res;
  934. }
  935. static MonoMethod*
  936. mono_monitor_get_fast_exit_method (MonoMethod *monitor_exit_method)
  937. {
  938. MonoMethodBuilder *mb;
  939. MonoMethod *res;
  940. int obj_null_branch, has_waiting_branch, has_syncp_branch, owned_branch, nested_branch, thin_hash_branch;
  941. int thread_tls_offset;
  942. int syncp_loc;
  943. WrapperInfo *info;
  944. thread_tls_offset = mono_thread_get_tls_offset ();
  945. if (thread_tls_offset == -1)
  946. return NULL;
  947. if (monitor_il_fastpaths [FASTPATH_EXIT])
  948. return monitor_il_fastpaths [FASTPATH_EXIT];
  949. mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorExit", MONO_WRAPPER_UNKNOWN);
  950. mb->method->slot = -1;
  951. mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
  952. METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
  953. #ifndef DISABLE_JIT
  954. syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
  955. emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, NULL, &has_syncp_branch, &thin_hash_branch, TRUE);
  956. /*
  957. ret
  958. */
  959. mono_mb_emit_byte (mb, CEE_RET);
  960. /*
  961. has_syncp:
  962. ldloc syncp syncp
  963. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
  964. add &owner
  965. ldind.i owner
  966. mono. tls thread_tls_offset owner threadp
  967. ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) owner threadp off
  968. add owner &tid
  969. ldind.i owner tid
  970. beq.s owned
  971. */
  972. mono_mb_patch_short_branch (mb, has_syncp_branch);
  973. mono_mb_emit_ldloc (mb, syncp_loc);
  974. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
  975. mono_mb_emit_byte (mb, CEE_ADD);
  976. mono_mb_emit_byte (mb, CEE_LDIND_I);
  977. mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
  978. mono_mb_emit_byte (mb, CEE_MONO_TLS);
  979. mono_mb_emit_i4 (mb, thread_tls_offset);
  980. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
  981. mono_mb_emit_byte (mb, CEE_ADD);
  982. mono_mb_emit_byte (mb, CEE_LDIND_I);
  983. owned_branch = mono_mb_emit_short_branch (mb, CEE_BEQ_S);
  984. /*
  985. ret
  986. */
  987. mono_mb_emit_byte (mb, CEE_RET);
  988. /*
  989. owned:
  990. ldloc syncp syncp
  991. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
  992. add &nest
  993. dup &nest &nest
  994. ldind.i4 &nest nest
  995. dup &nest nest nest
  996. ldc.i4 1 &nest nest nest 1
  997. bgt.un.s nested &nest nest
  998. */
  999. mono_mb_patch_short_branch (mb, owned_branch);
  1000. mono_mb_emit_ldloc (mb, syncp_loc);
  1001. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
  1002. mono_mb_emit_byte (mb, CEE_ADD);
  1003. mono_mb_emit_byte (mb, CEE_DUP);
  1004. mono_mb_emit_byte (mb, CEE_LDIND_I4);
  1005. mono_mb_emit_byte (mb, CEE_DUP);
  1006. mono_mb_emit_byte (mb, CEE_LDC_I4_1);
  1007. nested_branch = mono_mb_emit_short_branch (mb, CEE_BGT_UN_S);
  1008. /*
  1009. pop &nest
  1010. pop
  1011. ldloc syncp syncp
  1012. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, entry_count) syncp off
  1013. add &count
  1014. ldind.i4 count
  1015. brtrue.s has_waiting
  1016. */
  1017. mono_mb_emit_byte (mb, CEE_POP);
  1018. mono_mb_emit_byte (mb, CEE_POP);
  1019. mono_mb_emit_ldloc (mb, syncp_loc);
  1020. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, entry_count));
  1021. mono_mb_emit_byte (mb, CEE_ADD);
  1022. mono_mb_emit_byte (mb, CEE_LDIND_I4);
  1023. has_waiting_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
  1024. /*
  1025. ldloc syncp syncp
  1026. ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
  1027. add &owner
  1028. ldnull &owner 0
  1029. stind.i
  1030. ret
  1031. */
  1032. mono_mb_emit_ldloc (mb, syncp_loc);
  1033. mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
  1034. mono_mb_emit_byte (mb, CEE_ADD);
  1035. mono_mb_emit_byte (mb, CEE_LDNULL);
  1036. mono_mb_emit_byte (mb, CEE_STIND_I);
  1037. mono_mb_emit_byte (mb, CEE_RET);
  1038. /*
  1039. nested:
  1040. ldc.i4 1 &nest nest 1
  1041. sub &nest nest-
  1042. stind.i4
  1043. ret
  1044. */
  1045. mono_mb_patch_short_branch (mb, nested_branch);
  1046. mono_mb_emit_byte (mb, CEE_LDC_I4_1);
  1047. mono_mb_emit_byte (mb, CEE_SUB);
  1048. mono_mb_emit_byte (mb, CEE_STIND_I4);
  1049. mono_mb_emit_byte (mb, CEE_RET);
  1050. /*
  1051. obj_null, has_waiting:
  1052. ldarg 0 obj
  1053. call System.Threading.Monitor.Exit
  1054. ret
  1055. */
  1056. if (thin_hash_branch)
  1057. mono_mb_patch_short_branch (mb, thin_hash_branch);
  1058. mono_mb_patch_short_branch (mb, obj_null_branch);
  1059. mono_mb_patch_short_branch (mb, has_waiting_branch);
  1060. mono_mb_emit_byte (mb, CEE_LDARG_0);
  1061. mono_mb_emit_managed_call (mb, monitor_exit_method, NULL);
  1062. mono_mb_emit_byte (mb, CEE_RET);
  1063. #endif
  1064. res = register_fastpath (mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_exit_method), 5), FASTPATH_EXIT);
  1065. mono_mb_free (mb);
  1066. info = mono_image_alloc0 (mono_defaults.corlib, sizeof (WrapperInfo));
  1067. info->subtype = WRAPPER_SUBTYPE_FAST_MONITOR_EXIT;
  1068. mono_marshal_set_wrapper_info (res, info);
  1069. return res;
  1070. }
  1071. MonoMethod*
  1072. mono_monitor_get_fast_path (MonoMethod *enter_or_exit)
  1073. {
  1074. if (strcmp (enter_or_exit->name, "Enter") == 0)
  1075. return mono_monitor_get_fast_enter_method (enter_or_exit);
  1076. if (strcmp (enter_or_exit->name, "Exit") == 0)
  1077. return mono_monitor_get_fast_exit_method (enter_or_exit);
  1078. g_assert_not_reached ();
  1079. return NULL;
  1080. }
  1081. /*
  1082. * mono_monitor_threads_sync_member_offset:
  1083. * @owner_offset: returns size and offset of the "owner" member
  1084. * @nest_offset: returns size and offset of the "nest" member
  1085. * @entry_count_offset: returns size and offset of the "entry_count" member
  1086. *
  1087. * Returns the offsets and sizes of three members of the
  1088. * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
  1089. */
  1090. void
  1091. mono_monitor_threads_sync_members_offset (int *owner_offset, int *nest_offset, int *entry_count_offset)
  1092. {
  1093. MonoThreadsSync ts;
  1094. #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
  1095. *owner_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, owner), sizeof (ts.owner));
  1096. *nest_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
  1097. *entry_count_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, entry_count), sizeof (ts.entry_count));
  1098. }
  1099. gboolean
  1100. ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
  1101. {
  1102. gint32 res;
  1103. do {
  1104. res = mono_monitor_try_enter_internal (obj, ms, TRUE);
  1105. if (res == -1)
  1106. mono_thread_interruption_checkpoint ();
  1107. } while (res == -1);
  1108. return res == 1;
  1109. }
  1110. void
  1111. ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
  1112. {
  1113. gint32 res;
  1114. do {
  1115. res = mono_monitor_try_enter_internal (obj, ms, TRUE);
  1116. /*This means we got interrupted during the wait and didn't got the monitor.*/
  1117. if (res == -1)
  1118. mono_thread_interruption_checkpoint ();
  1119. } while (res == -1);
  1120. /*It's safe to do it from here since interruption would happen only on the wrapper.*/
  1121. *lockTaken = res == 1;
  1122. }
  1123. gboolean
  1124. ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
  1125. {
  1126. MonoThreadsSync *mon;
  1127. LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, GetCurrentThreadId()));
  1128. mon = obj->synchronisation;
  1129. #ifdef HAVE_MOVING_COLLECTOR
  1130. {
  1131. LockWord lw;
  1132. lw.sync = mon;
  1133. if (lw.lock_word & LOCK_WORD_THIN_HASH)
  1134. return FALSE;
  1135. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  1136. mon = lw.sync;
  1137. }
  1138. #endif
  1139. if (mon == NULL) {
  1140. return FALSE;
  1141. }
  1142. if(mon->owner==GetCurrentThreadId ()) {
  1143. return(TRUE);
  1144. }
  1145. return(FALSE);
  1146. }
  1147. gboolean
  1148. ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
  1149. {
  1150. MonoThreadsSync *mon;
  1151. LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, GetCurrentThreadId (), obj));
  1152. mon = obj->synchronisation;
  1153. #ifdef HAVE_MOVING_COLLECTOR
  1154. {
  1155. LockWord lw;
  1156. lw.sync = mon;
  1157. if (lw.lock_word & LOCK_WORD_THIN_HASH)
  1158. return FALSE;
  1159. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  1160. mon = lw.sync;
  1161. }
  1162. #endif
  1163. if (mon == NULL) {
  1164. return FALSE;
  1165. }
  1166. if (mon->owner != 0) {
  1167. return TRUE;
  1168. }
  1169. return FALSE;
  1170. }
  1171. /* All wait list manipulation in the pulse, pulseall and wait
  1172. * functions happens while the monitor lock is held, so we don't need
  1173. * any extra struct locking
  1174. */
  1175. void
  1176. ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
  1177. {
  1178. MonoThreadsSync *mon;
  1179. LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, GetCurrentThreadId (), obj));
  1180. mon = obj->synchronisation;
  1181. #ifdef HAVE_MOVING_COLLECTOR
  1182. {
  1183. LockWord lw;
  1184. lw.sync = mon;
  1185. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  1186. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1187. return;
  1188. }
  1189. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  1190. mon = lw.sync;
  1191. }
  1192. #endif
  1193. if (mon == NULL) {
  1194. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1195. return;
  1196. }
  1197. if (mon->owner != GetCurrentThreadId ()) {
  1198. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
  1199. return;
  1200. }
  1201. LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
  1202. if (mon->wait_list != NULL) {
  1203. LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
  1204. SetEvent (mon->wait_list->data);
  1205. mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
  1206. }
  1207. }
  1208. void
  1209. ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
  1210. {
  1211. MonoThreadsSync *mon;
  1212. LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, GetCurrentThreadId (), obj));
  1213. mon = obj->synchronisation;
  1214. #ifdef HAVE_MOVING_COLLECTOR
  1215. {
  1216. LockWord lw;
  1217. lw.sync = mon;
  1218. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  1219. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1220. return;
  1221. }
  1222. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  1223. mon = lw.sync;
  1224. }
  1225. #endif
  1226. if (mon == NULL) {
  1227. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1228. return;
  1229. }
  1230. if (mon->owner != GetCurrentThreadId ()) {
  1231. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
  1232. return;
  1233. }
  1234. LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
  1235. while (mon->wait_list != NULL) {
  1236. LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
  1237. SetEvent (mon->wait_list->data);
  1238. mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
  1239. }
  1240. }
  1241. gboolean
  1242. ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
  1243. {
  1244. MonoThreadsSync *mon;
  1245. HANDLE event;
  1246. guint32 nest;
  1247. guint32 ret;
  1248. gboolean success = FALSE;
  1249. gint32 regain;
  1250. MonoInternalThread *thread = mono_thread_internal_current ();
  1251. LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, GetCurrentThreadId (), obj, ms));
  1252. mon = obj->synchronisation;
  1253. #ifdef HAVE_MOVING_COLLECTOR
  1254. {
  1255. LockWord lw;
  1256. lw.sync = mon;
  1257. if (lw.lock_word & LOCK_WORD_THIN_HASH) {
  1258. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1259. return FALSE;
  1260. }
  1261. lw.lock_word &= ~LOCK_WORD_BITS_MASK;
  1262. mon = lw.sync;
  1263. }
  1264. #endif
  1265. if (mon == NULL) {
  1266. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
  1267. return FALSE;
  1268. }
  1269. if (mon->owner != GetCurrentThreadId ()) {
  1270. mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
  1271. return FALSE;
  1272. }
  1273. /* Do this WaitSleepJoin check before creating the event handle */
  1274. mono_thread_current_check_pending_interrupt ();
  1275. event = CreateEvent (NULL, FALSE, FALSE, NULL);
  1276. if (event == NULL) {
  1277. mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
  1278. return FALSE;
  1279. }
  1280. LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, GetCurrentThreadId (), event));
  1281. mono_thread_current_check_pending_interrupt ();
  1282. mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
  1283. mon->wait_list = g_slist_append (mon->wait_list, event);
  1284. /* Save the nest count, and release the lock */
  1285. nest = mon->nest;
  1286. mon->nest = 1;
  1287. mono_monitor_exit (obj);
  1288. LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
  1289. /* There's no race between unlocking mon and waiting for the
  1290. * event, because auto reset events are sticky, and this event
  1291. * is private to this thread. Therefore even if the event was
  1292. * signalled before we wait, we still succeed.
  1293. */
  1294. ret = WaitForSingleObjectEx (event, ms, TRUE);
  1295. /* Reset the thread state fairly early, so we don't have to worry
  1296. * about the monitor error checking
  1297. */
  1298. mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
  1299. if (mono_thread_interruption_requested ()) {
  1300. /*
  1301. * Can't remove the event from wait_list, since the monitor is not locked by
  1302. * us. So leave it there, mon_new () will delete it when the mon structure
  1303. * is placed on the free list.
  1304. * FIXME: The caller expects to hold the lock after the wait returns, but it
  1305. * doesn't happen in this case:
  1306. * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=97268
  1307. */
  1308. return FALSE;
  1309. }
  1310. /* Regain the lock with the previous nest count */
  1311. do {
  1312. regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
  1313. if (regain == -1)
  1314. mono_thread_interruption_checkpoint ();
  1315. } while (regain == -1);
  1316. if (regain == 0) {
  1317. /* Something went wrong, so throw a
  1318. * SynchronizationLockException
  1319. */
  1320. CloseHandle (event);
  1321. mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
  1322. return FALSE;
  1323. }
  1324. mon->nest = nest;
  1325. LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
  1326. if (ret == WAIT_TIMEOUT) {
  1327. /* Poll the event again, just in case it was signalled
  1328. * while we were trying to regain the monitor lock
  1329. */
  1330. ret = WaitForSingleObjectEx (event, 0, FALSE);
  1331. }
  1332. /* Pulse will have popped our event from the queue if it signalled
  1333. * us, so we only do it here if the wait timed out.
  1334. *
  1335. * This avoids a race condition where the thread holding the
  1336. * lock can Pulse several times before the WaitForSingleObject
  1337. * returns. If we popped the queue here then this event might
  1338. * be signalled more than once, thereby starving another
  1339. * thread.
  1340. */
  1341. if (ret == WAIT_OBJECT_0) {
  1342. LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, GetCurrentThreadId ()));
  1343. success = TRUE;
  1344. } else {
  1345. LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, GetCurrentThreadId (), event));
  1346. /* No pulse, so we have to remove ourself from the
  1347. * wait queue
  1348. */
  1349. mon->wait_list = g_slist_remove (mon->wait_list, event);
  1350. }
  1351. CloseHandle (event);
  1352. return success;
  1353. }