PageRenderTime 77ms CodeModel.GetById 38ms RepoModel.GetById 0ms app.codeStats 0ms

/erts/emulator/beam/erl_lock_check.c

https://github.com/Bwooce/otp
C | 1334 lines | 1122 code | 141 blank | 71 comment | 246 complexity | 5809c585f0850ebb7bb3eaa059f97146 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2005-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. /*
  20. * Description: A lock checker that checks that each thread acquires
  21. * locks according to a predefined global lock order. The
  22. * global lock order is used to prevent deadlocks. If the
  23. * lock order is violated, an error message is printed
  24. * and the emulator aborts. The lock checker is only
  25. * intended to be enabled when debugging.
  26. *
  27. * Author: Rickard Green
  28. */
  29. #ifdef HAVE_CONFIG_H
  30. # include "config.h"
  31. #endif
  32. /* Needed for VxWorks va_arg */
  33. #include "sys.h"
  34. #ifdef ERTS_ENABLE_LOCK_CHECK
  35. #include "erl_lock_check.h"
  36. #include "erl_term.h"
  37. #include "erl_threads.h"
  38. typedef struct {
  39. char *name;
  40. char *internal_order;
  41. } erts_lc_lock_order_t;
  42. /*
  43. * Global lock order for locks in the emulator.
  44. *
  45. * Locks early (low indexes) in the 'erts_lock_order' array should be
  46. * locked before locks late (high indexes) in the array. Each lock has
  47. * a name which is set on initialization. If multiple locks with the
  48. * same name are used, either an immediate Erlang term (e.g. internal
  49. * pid) or the address of the lock is used for internal lock order.
  50. * The immediate Erlang term used for internal lock order is also set
  51. * on initialization. Locks with small immediate Erlang terms should
  52. * be locked before locks with large immediate Erlang terms, and
  53. * locks with small addresses should be locked before locks with
  54. * large addresses. The immediate terms and adresses (boxed pointers)
  55. * are compared as unsigned integers not as Erlang terms.
  56. *
  57. * Once a spinlock or rw(spin)lock has been locked, the thread is not
  58. * allowed to lock mutexes, rwmutexes or process locks until all
  59. * spinlocks and rwlocks have been unlocked. This restriction is not
  60. * reflected by the lock order below, but the lock checker will still
  61. * check for violations of this restriction.
  62. */
  63. static erts_lc_lock_order_t erts_lock_order[] = {
  64. /*
  65. * "Lock name" "Internal lock order
  66. * description (NULL
  67. * if only one lock use
  68. * the lock name)"
  69. */
  70. #ifdef ERTS_SMP
  71. { "driver_lock", "driver_name" },
  72. { "port_lock", "port_id" },
  73. #endif
  74. { "port_data_lock", "address" },
  75. #ifdef ERTS_SMP
  76. { "bif_timers", NULL },
  77. { "reg_tab", NULL },
  78. { "migration_info_update", NULL },
  79. { "proc_main", "pid" },
  80. #ifdef HIPE
  81. { "hipe_mfait_lock", NULL },
  82. #endif
  83. { "nodes_monitors", NULL },
  84. { "driver_list", NULL },
  85. { "proc_link", "pid" },
  86. { "proc_msgq", "pid" },
  87. { "dist_entry", "address" },
  88. { "dist_entry_links", "address" },
  89. { "proc_status", "pid" },
  90. { "proc_tab", NULL },
  91. { "ports_snapshot", NULL },
  92. { "meta_name_tab", "address" },
  93. { "meta_main_tab_slot", "address" },
  94. { "db_tab", "address" },
  95. { "db_tab_fix", "address" },
  96. { "meta_main_tab_main", NULL },
  97. { "db_hash_slot", "address" },
  98. { "node_table", NULL },
  99. { "dist_table", NULL },
  100. { "sys_tracers", NULL },
  101. { "module_tab", NULL },
  102. { "export_tab", NULL },
  103. { "fun_tab", NULL },
  104. { "environ", NULL },
  105. #endif
  106. { "asyncq", "address" },
  107. #ifndef ERTS_SMP
  108. { "async_ready", NULL },
  109. #endif
  110. { "efile_drv", "address" },
  111. #if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
  112. { "child_status", NULL },
  113. #endif
  114. #ifdef __WIN32__
  115. { "sys_driver_data_lock", NULL },
  116. #endif
  117. { "drv_ev_state_grow", NULL, },
  118. { "drv_ev_state", "address" },
  119. { "safe_hash", "address" },
  120. { "pollset_rm_list", NULL },
  121. { "removed_fd_pre_alloc_lock", NULL },
  122. { "state_prealloc", NULL },
  123. { "schdlr_sspnd", NULL },
  124. { "run_queue", "address" },
  125. { "cpu_info", NULL },
  126. { "pollset", "address" },
  127. #ifdef __WIN32__
  128. { "pollwaiter", "address" },
  129. { "break_waiter_lock", NULL },
  130. #endif /* __WIN32__ */
  131. { "alcu_init_atoms", NULL },
  132. { "mseg_init_atoms", NULL },
  133. { "drv_tsd", NULL },
  134. #ifdef ERTS_SMP
  135. { "sys_msg_q", NULL },
  136. { "atom_tab", NULL },
  137. { "make_ref", NULL },
  138. { "misc_op_list_pre_alloc_lock", "address" },
  139. { "message_pre_alloc_lock", "address" },
  140. { "ptimer_pre_alloc_lock", "address", },
  141. { "btm_pre_alloc_lock", NULL, },
  142. { "dist_entry_out_queue", "address" },
  143. #endif
  144. { "mtrace_op", NULL },
  145. { "instr_x", NULL },
  146. { "instr", NULL },
  147. { "fix_alloc", "index" },
  148. { "alcu_allocator", "index" },
  149. { "alcu_delayed_free", "index" },
  150. { "mseg", NULL },
  151. #if HALFWORD_HEAP
  152. { "pmmap", NULL },
  153. #endif
  154. #ifdef ERTS_SMP
  155. { "port_task_pre_alloc_lock", "address" },
  156. { "port_taskq_pre_alloc_lock", "address" },
  157. { "proclist_pre_alloc_lock", "address" },
  158. { "port_tasks_lock", NULL },
  159. { "get_free_port", NULL },
  160. { "port_state", "address" },
  161. { "xports_list_pre_alloc_lock", "address" },
  162. { "inet_buffer_stack_lock", NULL },
  163. { "gc_info", NULL },
  164. { "io_wake", NULL },
  165. { "timer_wheel", NULL },
  166. { "system_block", NULL },
  167. { "timeofday", NULL },
  168. { "breakpoints", NULL },
  169. { "pollsets_lock", NULL },
  170. { "async_id", NULL },
  171. { "pix_lock", "address" },
  172. { "run_queues_lists", NULL },
  173. { "misc_aux_work_queue", "index" },
  174. { "misc_aux_work_pre_alloc_lock", "address" },
  175. { "sched_stat", NULL },
  176. { "run_queue_sleep_list", "address" },
  177. #endif
  178. { "alloc_thr_ix_lock", NULL },
  179. #ifdef ERTS_SMP
  180. { "proc_lck_qs_alloc", NULL },
  181. #endif
  182. #ifdef __WIN32__
  183. #ifdef DEBUG
  184. { "save_ops_lock", NULL },
  185. #endif
  186. #endif
  187. { "mtrace_buf", NULL },
  188. { "erts_alloc_hard_debug", NULL }
  189. };
  190. #define ERTS_LOCK_ORDER_SIZE \
  191. (sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
  192. #define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
  193. (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
  194. && ((LCK_FLG) \
  195. & ERTS_LC_FLG_LT_ALL \
  196. & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
  197. static __decl_noreturn void __noreturn lc_abort(void);
  198. static char *
  199. lock_type(Uint16 flags)
  200. {
  201. switch (flags & ERTS_LC_FLG_LT_ALL) {
  202. case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
  203. case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
  204. case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
  205. case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
  206. case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
  207. default: return "";
  208. }
  209. }
  210. static char *
  211. rw_op_str(Uint16 flags)
  212. {
  213. switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
  214. case ERTS_LC_FLG_LO_READ_WRITE:
  215. return " (rw)";
  216. case ERTS_LC_FLG_LO_READ:
  217. return " (r)";
  218. case ERTS_LC_FLG_LO_WRITE:
  219. erts_fprintf(stderr, "\nInternal error\n");
  220. lc_abort();
  221. default:
  222. break;
  223. }
  224. return "";
  225. }
  226. typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
  227. struct erts_lc_locked_lock_t_ {
  228. erts_lc_locked_lock_t *next;
  229. erts_lc_locked_lock_t *prev;
  230. Eterm extra;
  231. Sint16 id;
  232. Uint16 flags;
  233. };
  234. typedef struct {
  235. erts_lc_locked_lock_t *first;
  236. erts_lc_locked_lock_t *last;
  237. } erts_lc_locked_lock_list_t;
  238. typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
  239. struct erts_lc_locked_locks_t_ {
  240. char *thread_name;
  241. erts_tid_t tid;
  242. erts_lc_locked_locks_t *next;
  243. erts_lc_locked_locks_t *prev;
  244. erts_lc_locked_lock_list_t locked;
  245. erts_lc_locked_lock_list_t required;
  246. };
  247. typedef union erts_lc_free_block_t_ erts_lc_free_block_t;
  248. union erts_lc_free_block_t_ {
  249. erts_lc_free_block_t *next;
  250. erts_lc_locked_lock_t lock;
  251. };
  252. static ethr_tsd_key locks_key;
  253. static erts_lc_locked_locks_t *erts_locked_locks;
  254. static erts_lc_free_block_t *free_blocks;
  255. #ifdef ERTS_LC_STATIC_ALLOC
  256. #define ERTS_LC_FB_CHUNK_SIZE 10000
  257. #else
  258. #define ERTS_LC_FB_CHUNK_SIZE 10
  259. #endif
  260. static ethr_spinlock_t free_blocks_lock;
  261. static ERTS_INLINE void
  262. lc_lock(void)
  263. {
  264. ethr_spin_lock(&free_blocks_lock);
  265. }
  266. static ERTS_INLINE void
  267. lc_unlock(void)
  268. {
  269. ethr_spin_unlock(&free_blocks_lock);
  270. }
  271. static ERTS_INLINE void lc_free(void *p)
  272. {
  273. erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p;
  274. #ifdef DEBUG
  275. memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t));
  276. #endif
  277. lc_lock();
  278. fb->next = free_blocks;
  279. free_blocks = fb;
  280. lc_unlock();
  281. }
  282. #ifdef ERTS_LC_STATIC_ALLOC
  283. static void *lc_core_alloc(void)
  284. {
  285. lc_unlock();
  286. erts_fprintf(stderr, "Lock checker out of memory!\n");
  287. lc_abort();
  288. }
  289. #else
  290. static void *lc_core_alloc(void)
  291. {
  292. int i;
  293. erts_lc_free_block_t *fbs;
  294. lc_unlock();
  295. fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
  296. * ERTS_LC_FB_CHUNK_SIZE);
  297. if (!fbs) {
  298. erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
  299. lc_abort();
  300. }
  301. for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
  302. #ifdef DEBUG
  303. memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
  304. #endif
  305. fbs[i].next = &fbs[i+1];
  306. }
  307. #ifdef DEBUG
  308. memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
  309. 0xdf, sizeof(erts_lc_free_block_t));
  310. #endif
  311. lc_lock();
  312. fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks;
  313. free_blocks = &fbs[1];
  314. return (void *) &fbs[0];
  315. }
  316. #endif
  317. static ERTS_INLINE void *lc_alloc(void)
  318. {
  319. void *res;
  320. lc_lock();
  321. if (!free_blocks)
  322. res = lc_core_alloc();
  323. else {
  324. res = (void *) free_blocks;
  325. free_blocks = free_blocks->next;
  326. }
  327. lc_unlock();
  328. return res;
  329. }
  330. static erts_lc_locked_locks_t *
  331. create_locked_locks(char *thread_name)
  332. {
  333. erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
  334. if (!l_lcks)
  335. lc_abort();
  336. l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
  337. if (!l_lcks->thread_name)
  338. lc_abort();
  339. l_lcks->tid = erts_thr_self();
  340. l_lcks->required.first = NULL;
  341. l_lcks->required.last = NULL;
  342. l_lcks->locked.first = NULL;
  343. l_lcks->locked.last = NULL;
  344. l_lcks->prev = NULL;
  345. lc_lock();
  346. l_lcks->next = erts_locked_locks;
  347. if (erts_locked_locks)
  348. erts_locked_locks->prev = l_lcks;
  349. erts_locked_locks = l_lcks;
  350. lc_unlock();
  351. erts_tsd_set(locks_key, (void *) l_lcks);
  352. return l_lcks;
  353. }
  354. static void
  355. destroy_locked_locks(erts_lc_locked_locks_t *l_lcks)
  356. {
  357. ASSERT(l_lcks->thread_name);
  358. free((void *) l_lcks->thread_name);
  359. ASSERT(l_lcks->required.first == NULL);
  360. ASSERT(l_lcks->required.last == NULL);
  361. ASSERT(l_lcks->locked.first == NULL);
  362. ASSERT(l_lcks->locked.last == NULL);
  363. lc_lock();
  364. if (l_lcks->prev)
  365. l_lcks->prev->next = l_lcks->next;
  366. else {
  367. ASSERT(erts_locked_locks == l_lcks);
  368. erts_locked_locks = l_lcks->next;
  369. }
  370. if (l_lcks->next)
  371. l_lcks->next->prev = l_lcks->prev;
  372. lc_unlock();
  373. free((void *) l_lcks);
  374. }
  375. static ERTS_INLINE erts_lc_locked_locks_t *
  376. get_my_locked_locks(void)
  377. {
  378. return erts_tsd_get(locks_key);
  379. }
  380. static ERTS_INLINE erts_lc_locked_locks_t *
  381. make_my_locked_locks(void)
  382. {
  383. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  384. if (l_lcks)
  385. return l_lcks;
  386. else
  387. return create_locked_locks(NULL);
  388. }
  389. static ERTS_INLINE erts_lc_locked_lock_t *
  390. new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
  391. {
  392. erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
  393. l_lck->next = NULL;
  394. l_lck->prev = NULL;
  395. l_lck->id = lck->id;
  396. l_lck->extra = lck->extra;
  397. l_lck->flags = lck->flags | op_flags;
  398. return l_lck;
  399. }
  400. static void
  401. print_lock2(char *prefix, Sint16 id, Eterm extra, Uint16 flags, char *suffix)
  402. {
  403. char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
  404. ? erts_lock_order[id].name
  405. : "unknown");
  406. if (is_boxed(extra))
  407. erts_fprintf(stderr,
  408. "%s'%s:%p%s'%s%s",
  409. prefix,
  410. lname,
  411. boxed_val(extra),
  412. lock_type(flags),
  413. rw_op_str(flags),
  414. suffix);
  415. else
  416. erts_fprintf(stderr,
  417. "%s'%s:%T%s'%s%s",
  418. prefix,
  419. lname,
  420. extra,
  421. lock_type(flags),
  422. rw_op_str(flags),
  423. suffix);
  424. }
  425. static void
  426. print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
  427. {
  428. print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
  429. }
  430. static void
  431. print_curr_locks(erts_lc_locked_locks_t *l_lcks)
  432. {
  433. erts_lc_locked_lock_t *l_lck;
  434. if (!l_lcks || !l_lcks->locked.first)
  435. erts_fprintf(stderr,
  436. "Currently no locks are locked by the %s thread.\n",
  437. l_lcks->thread_name);
  438. else {
  439. erts_fprintf(stderr,
  440. "Currently these locks are locked by the %s thread:\n",
  441. l_lcks->thread_name);
  442. for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
  443. print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
  444. }
  445. }
  446. static void
  447. print_lock_order(void)
  448. {
  449. int i;
  450. erts_fprintf(stderr, "Lock order:\n");
  451. for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
  452. if (erts_lock_order[i].internal_order)
  453. erts_fprintf(stderr,
  454. " %s:%s\n",
  455. erts_lock_order[i].name,
  456. erts_lock_order[i].internal_order);
  457. else
  458. erts_fprintf(stderr, " %s\n", erts_lock_order[i].name);
  459. }
  460. }
  461. static void
  462. uninitialized_lock(void)
  463. {
  464. erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
  465. print_curr_locks(get_my_locked_locks());
  466. lc_abort();
  467. }
  468. static void
  469. lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
  470. Uint16 op_flags)
  471. {
  472. erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
  473. print_lock(" ", lck, " lock which is already locked by thread!\n");
  474. print_curr_locks(l_lcks);
  475. lc_abort();
  476. }
  477. static void
  478. unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
  479. Uint16 op_flags)
  480. {
  481. erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
  482. print_lock("", lck, " lock which mismatch previous lock operation!\n");
  483. print_curr_locks(l_lcks);
  484. lc_abort();
  485. }
  486. static void
  487. unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  488. {
  489. print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
  490. print_curr_locks(l_lcks);
  491. lc_abort();
  492. }
  493. static void
  494. lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  495. {
  496. print_lock("Lock order violation occured when locking ", lck, "!\n");
  497. print_curr_locks(l_lcks);
  498. print_lock_order();
  499. lc_abort();
  500. }
  501. static void
  502. type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
  503. erts_lc_lock_t *lck)
  504. {
  505. erts_fprintf(stderr, "Lock type order violation occured when ");
  506. print_lock(op, lck, "!\n");
  507. ASSERT(l_lcks);
  508. print_curr_locks(l_lcks);
  509. lc_abort();
  510. }
  511. static void
  512. lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
  513. int failed_have, erts_lc_lock_t *have, int have_len,
  514. int failed_have_not, erts_lc_lock_t *have_not, int have_not_len)
  515. {
  516. int i;
  517. erts_fprintf(stderr, "Lock mismatch found!\n");
  518. if (failed_have >= 0) {
  519. ASSERT(have && have_len > failed_have);
  520. print_lock2("At least the ",
  521. have[failed_have].id, have[failed_have].extra, 0,
  522. " lock is not locked when it should have been\n");
  523. }
  524. else if (failed_have_not >= 0) {
  525. ASSERT(have_not && have_not_len > failed_have_not);
  526. print_lock2("At least the ",
  527. have_not[failed_have_not].id,
  528. have_not[failed_have_not].extra,
  529. 0,
  530. " lock is locked when it should not have been\n");
  531. }
  532. if (exact) {
  533. if (!have || have_len <= 0)
  534. erts_fprintf(stderr,
  535. "Thread should not have any locks locked at all\n");
  536. else {
  537. erts_fprintf(stderr,
  538. "Thread should have these and only these locks "
  539. "locked:\n");
  540. for (i = 0; i < have_len; i++)
  541. print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
  542. }
  543. }
  544. else {
  545. if (have && have_len > 0) {
  546. erts_fprintf(stderr,
  547. "Thread should at least have these locks locked:\n");
  548. for (i = 0; i < have_len; i++)
  549. print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
  550. }
  551. if (have_not && have_not_len > 0) {
  552. erts_fprintf(stderr,
  553. "Thread should at least not have these locks "
  554. "locked:\n");
  555. for (i = 0; i < have_not_len; i++)
  556. print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n");
  557. }
  558. }
  559. print_curr_locks(l_lcks);
  560. lc_abort();
  561. }
  562. static void
  563. unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  564. {
  565. print_lock("Unlocking required ", lck, " lock!\n");
  566. print_curr_locks(l_lcks);
  567. lc_abort();
  568. }
  569. static void
  570. unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  571. {
  572. print_lock("Unrequire on ", lck, " lock not required!\n");
  573. print_curr_locks(l_lcks);
  574. lc_abort();
  575. }
  576. static void
  577. require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  578. {
  579. print_lock("Require on ", lck, " lock already required!\n");
  580. print_curr_locks(l_lcks);
  581. lc_abort();
  582. }
  583. static void
  584. required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  585. {
  586. print_lock("Required ", lck, " lock not locked!\n");
  587. print_curr_locks(l_lcks);
  588. lc_abort();
  589. }
  590. static void
  591. thread_exit_handler(void)
  592. {
  593. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  594. if (l_lcks) {
  595. if (l_lcks->locked.first) {
  596. erts_fprintf(stderr,
  597. "Thread exiting while having locked locks!\n");
  598. print_curr_locks(l_lcks);
  599. lc_abort();
  600. }
  601. destroy_locked_locks(l_lcks);
  602. /* erts_tsd_set(locks_key, NULL); */
  603. }
  604. }
  605. static __decl_noreturn void
  606. lc_abort(void)
  607. {
  608. #ifdef __WIN32__
  609. DebugBreak();
  610. #else
  611. abort();
  612. #endif
  613. }
  614. void
  615. erts_lc_set_thread_name(char *thread_name)
  616. {
  617. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  618. if (!l_lcks)
  619. (void) create_locked_locks(thread_name);
  620. else {
  621. ASSERT(l_lcks->thread_name);
  622. free((void *) l_lcks->thread_name);
  623. l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
  624. if (!l_lcks->thread_name)
  625. lc_abort();
  626. }
  627. }
  628. int
  629. erts_lc_assert_failed(char *file, int line, char *assertion)
  630. {
  631. erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
  632. file, line, assertion);
  633. print_curr_locks(get_my_locked_locks());
  634. lc_abort();
  635. return 0;
  636. }
  637. void erts_lc_fail(char *fmt, ...)
  638. {
  639. va_list args;
  640. erts_fprintf(stderr, "Lock check failed: ");
  641. va_start(args, fmt);
  642. erts_vfprintf(stderr, fmt, args);
  643. va_end(args);
  644. erts_fprintf(stderr, "\n");
  645. print_curr_locks(get_my_locked_locks());
  646. lc_abort();
  647. }
  648. Sint16
  649. erts_lc_get_lock_order_id(char *name)
  650. {
  651. int i;
  652. if (!name || name[0] == '\0')
  653. erts_fprintf(stderr, "Missing lock name\n");
  654. else {
  655. for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++)
  656. if (strcmp(erts_lock_order[i].name, name) == 0)
  657. return i;
  658. erts_fprintf(stderr,
  659. "Lock name '%s' missing in lock order "
  660. "(update erl_lock_check.c)\n",
  661. name);
  662. }
  663. lc_abort();
  664. return (Sint16) -1;
  665. }
  666. static int
  667. find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
  668. {
  669. erts_lc_locked_lock_t *l_lck = *l_lcks;
  670. if (l_lck) {
  671. if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
  672. if ((l_lck->flags & lck->flags) == lck->flags)
  673. return 1;
  674. return 0;
  675. }
  676. else if (l_lck->id < lck->id
  677. || (l_lck->id == lck->id
  678. && l_lck->extra < lck->extra)) {
  679. for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
  680. if (l_lck->id > lck->id
  681. || (l_lck->id == lck->id
  682. && l_lck->extra >= lck->extra)) {
  683. *l_lcks = l_lck;
  684. if (l_lck->id == lck->id
  685. && l_lck->extra == lck->extra
  686. && ((l_lck->flags & lck->flags) == lck->flags))
  687. return 1;
  688. return 0;
  689. }
  690. }
  691. }
  692. else {
  693. for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
  694. if (l_lck->id < lck->id
  695. || (l_lck->id == lck->id
  696. && l_lck->extra <= lck->extra)) {
  697. *l_lcks = l_lck;
  698. if (l_lck->id == lck->id
  699. && l_lck->extra == lck->extra
  700. && ((l_lck->flags & lck->flags) == lck->flags))
  701. return 1;
  702. return 0;
  703. }
  704. }
  705. }
  706. }
  707. return 0;
  708. }
  709. static int
  710. find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
  711. {
  712. erts_lc_locked_lock_t *l_lck = *l_lcks;
  713. if (l_lck) {
  714. if (l_lck->id == id)
  715. return 1;
  716. else if (l_lck->id < id) {
  717. for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
  718. if (l_lck->id >= id) {
  719. *l_lcks = l_lck;
  720. if (l_lck->id == id)
  721. return 1;
  722. return 0;
  723. }
  724. }
  725. }
  726. else {
  727. for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
  728. if (l_lck->id <= id) {
  729. *l_lcks = l_lck;
  730. if (l_lck->id == id)
  731. return 1;
  732. return 0;
  733. }
  734. }
  735. }
  736. }
  737. return 0;
  738. }
  739. void
  740. erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
  741. {
  742. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  743. int i;
  744. if (!l_lcks) {
  745. for (i = 0; i < len; i++)
  746. resv[i] = 0;
  747. }
  748. else {
  749. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  750. for (i = 0; i < len; i++)
  751. resv[i] = find_lock(&l_lck, &locks[i]);
  752. }
  753. }
  754. void
  755. erts_lc_have_lock_ids(int *resv, int *ids, int len)
  756. {
  757. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  758. int i;
  759. if (!l_lcks) {
  760. for (i = 0; i < len; i++)
  761. resv[i] = 0;
  762. }
  763. else {
  764. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  765. for (i = 0; i < len; i++)
  766. resv[i] = find_id(&l_lck, ids[i]);
  767. }
  768. }
  769. void
  770. erts_lc_check(erts_lc_lock_t *have, int have_len,
  771. erts_lc_lock_t *have_not, int have_not_len)
  772. {
  773. int i;
  774. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  775. erts_lc_locked_lock_t *l_lck;
  776. if (have && have_len > 0) {
  777. if (!l_lcks)
  778. lock_mismatch(NULL, 0,
  779. -1, have, have_len,
  780. -1, have_not, have_not_len);
  781. l_lck = l_lcks->locked.first;
  782. for (i = 0; i < have_len; i++) {
  783. if (!find_lock(&l_lck, &have[i]))
  784. lock_mismatch(l_lcks, 0,
  785. i, have, have_len,
  786. -1, have_not, have_not_len);
  787. }
  788. }
  789. if (have_not && have_not_len > 0 && l_lcks) {
  790. l_lck = l_lcks->locked.first;
  791. for (i = 0; i < have_not_len; i++) {
  792. if (find_lock(&l_lck, &have_not[i]))
  793. lock_mismatch(l_lcks, 0,
  794. -1, have, have_len,
  795. i, have_not, have_not_len);
  796. }
  797. }
  798. }
  799. void
  800. erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
  801. {
  802. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  803. if (!l_lcks) {
  804. if (have && have_len > 0)
  805. lock_mismatch(NULL, 1,
  806. -1, have, have_len,
  807. -1, NULL, 0);
  808. }
  809. else {
  810. int i;
  811. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  812. for (i = 0; i < have_len; i++) {
  813. if (!find_lock(&l_lck, &have[i]))
  814. lock_mismatch(l_lcks, 1,
  815. i, have, have_len,
  816. -1, NULL, 0);
  817. }
  818. for (i = 0, l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
  819. i++;
  820. if (i != have_len)
  821. lock_mismatch(l_lcks, 1,
  822. -1, have, have_len,
  823. -1, NULL, 0);
  824. }
  825. }
  826. void
  827. erts_lc_check_no_locked_of_type(Uint16 flags)
  828. {
  829. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  830. if (l_lcks) {
  831. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  832. for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
  833. if (l_lck->flags & flags) {
  834. erts_fprintf(stderr,
  835. "Locked lock of type %s found which isn't "
  836. "allowed here!\n",
  837. lock_type(l_lck->flags));
  838. print_curr_locks(l_lcks);
  839. lc_abort();
  840. }
  841. }
  842. }
  843. }
  844. int
  845. erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  846. {
  847. #ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
  848. return 0;
  849. #else
  850. /*
  851. * Force busy trylock if locking doesn't follow lock order.
  852. * This in order to make sure that caller can handle
  853. * the situation without causing a lock order violation.
  854. */
  855. erts_lc_locked_locks_t *l_lcks;
  856. if (lck->inited != ERTS_LC_INITITALIZED)
  857. uninitialized_lock();
  858. if (lck->id < 0)
  859. return 0;
  860. l_lcks = get_my_locked_locks();
  861. if (!l_lcks || !l_lcks->locked.first) {
  862. ASSERT(!l_lcks || !l_lcks->locked.last);
  863. return 0;
  864. }
  865. else {
  866. erts_lc_locked_lock_t *tl_lck;
  867. ASSERT(l_lcks->locked.last);
  868. #if 0 /* Ok when trylocking I guess... */
  869. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  870. type_order_violation("trylocking ", l_lcks, lck);
  871. #endif
  872. if (l_lcks->locked.last->id < lck->id
  873. || (l_lcks->locked.last->id == lck->id
  874. && l_lcks->locked.last->extra < lck->extra))
  875. return 0;
  876. /*
  877. * Lock order violation
  878. */
  879. /* Check that we are not trying to lock this lock twice */
  880. for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
  881. if (tl_lck->id < lck->id
  882. || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
  883. if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
  884. lock_twice("Trylocking", l_lcks, lck, op_flags);
  885. break;
  886. }
  887. }
  888. #ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
  889. /* We only force busy if a lock order violation would occur
  890. and when on an even millisecond. */
  891. {
  892. SysTimeval tv;
  893. sys_gettimeofday(&tv);
  894. if ((tv.tv_usec / 1000) & 1)
  895. return 0;
  896. }
  897. #endif
  898. return 1;
  899. }
  900. #endif
  901. }
  902. void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
  903. {
  904. erts_lc_locked_locks_t *l_lcks;
  905. erts_lc_locked_lock_t *l_lck;
  906. if (lck->inited != ERTS_LC_INITITALIZED)
  907. uninitialized_lock();
  908. if (lck->id < 0)
  909. return;
  910. l_lcks = make_my_locked_locks();
  911. l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
  912. if (!l_lcks->locked.last) {
  913. ASSERT(!l_lcks->locked.first);
  914. if (locked)
  915. l_lcks->locked.first = l_lcks->locked.last = l_lck;
  916. }
  917. else {
  918. erts_lc_locked_lock_t *tl_lck;
  919. #if 0 /* Ok when trylocking I guess... */
  920. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  921. type_order_violation("trylocking ", l_lcks, lck);
  922. #endif
  923. for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
  924. if (tl_lck->id < lck->id
  925. || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
  926. if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
  927. lock_twice("Trylocking", l_lcks, lck, op_flags);
  928. if (locked) {
  929. l_lck->next = tl_lck->next;
  930. l_lck->prev = tl_lck;
  931. if (tl_lck->next)
  932. tl_lck->next->prev = l_lck;
  933. else
  934. l_lcks->locked.last = l_lck;
  935. tl_lck->next = l_lck;
  936. }
  937. return;
  938. }
  939. }
  940. if (locked) {
  941. l_lck->next = l_lcks->locked.first;
  942. l_lcks->locked.first->prev = l_lck;
  943. l_lcks->locked.first = l_lck;
  944. }
  945. }
  946. }
  947. void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  948. {
  949. erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
  950. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  951. if (!find_lock(&l_lck, lck))
  952. required_not_locked(l_lcks, lck);
  953. l_lck = new_locked_lock(lck, op_flags);
  954. if (!l_lcks->required.last) {
  955. ASSERT(!l_lcks->required.first);
  956. l_lck->next = l_lck->prev = NULL;
  957. l_lcks->required.first = l_lcks->required.last = l_lck;
  958. }
  959. else {
  960. erts_lc_locked_lock_t *l_lck2;
  961. ASSERT(l_lcks->required.first);
  962. for (l_lck2 = l_lcks->required.last;
  963. l_lck2;
  964. l_lck2 = l_lck2->prev) {
  965. if (l_lck2->id < lck->id
  966. || (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
  967. break;
  968. else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
  969. require_twice(l_lcks, lck);
  970. }
  971. if (!l_lck2) {
  972. l_lck->next = l_lcks->required.first;
  973. l_lck->prev = NULL;
  974. l_lcks->required.first->prev = l_lck;
  975. l_lcks->required.first = l_lck;
  976. }
  977. else {
  978. l_lck->next = l_lck2->next;
  979. if (l_lck->next) {
  980. ASSERT(l_lcks->required.last != l_lck2);
  981. l_lck->next->prev = l_lck;
  982. }
  983. else {
  984. ASSERT(l_lcks->required.last == l_lck2);
  985. l_lcks->required.last = l_lck;
  986. }
  987. l_lck->prev = l_lck2;
  988. l_lck2->next = l_lck;
  989. }
  990. }
  991. }
  992. void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  993. {
  994. erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
  995. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  996. if (!find_lock(&l_lck, lck))
  997. required_not_locked(l_lcks, lck);
  998. l_lck = l_lcks->required.first;
  999. if (!find_lock(&l_lck, lck))
  1000. unrequire_of_not_required_lock(l_lcks, lck);
  1001. if (l_lck->prev) {
  1002. ASSERT(l_lcks->required.first != l_lck);
  1003. l_lck->prev->next = l_lck->next;
  1004. }
  1005. else {
  1006. ASSERT(l_lcks->required.first == l_lck);
  1007. l_lcks->required.first = l_lck->next;
  1008. }
  1009. if (l_lck->next) {
  1010. ASSERT(l_lcks->required.last != l_lck);
  1011. l_lck->next->prev = l_lck->prev;
  1012. }
  1013. else {
  1014. ASSERT(l_lcks->required.last == l_lck);
  1015. l_lcks->required.last = l_lck->prev;
  1016. }
  1017. lc_free((void *) l_lck);
  1018. }
  1019. void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  1020. {
  1021. erts_lc_locked_locks_t *l_lcks;
  1022. erts_lc_locked_lock_t *l_lck;
  1023. if (lck->inited != ERTS_LC_INITITALIZED)
  1024. uninitialized_lock();
  1025. if (lck->id < 0)
  1026. return;
  1027. l_lcks = make_my_locked_locks();
  1028. l_lck = new_locked_lock(lck, op_flags);
  1029. if (!l_lcks->locked.last) {
  1030. ASSERT(!l_lcks->locked.first);
  1031. l_lcks->locked.last = l_lcks->locked.first = l_lck;
  1032. }
  1033. else if (l_lcks->locked.last->id < lck->id
  1034. || (l_lcks->locked.last->id == lck->id
  1035. && l_lcks->locked.last->extra < lck->extra)) {
  1036. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  1037. type_order_violation("locking ", l_lcks, lck);
  1038. l_lck->prev = l_lcks->locked.last;
  1039. l_lcks->locked.last->next = l_lck;
  1040. l_lcks->locked.last = l_lck;
  1041. }
  1042. else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
  1043. lock_twice("Locking", l_lcks, lck, op_flags);
  1044. else
  1045. lock_order_violation(l_lcks, lck);
  1046. }
  1047. void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  1048. {
  1049. erts_lc_locked_locks_t *l_lcks;
  1050. erts_lc_locked_lock_t *l_lck;
  1051. if (lck->inited != ERTS_LC_INITITALIZED)
  1052. uninitialized_lock();
  1053. if (lck->id < 0)
  1054. return;
  1055. l_lcks = get_my_locked_locks();
  1056. if (l_lcks) {
  1057. l_lck = l_lcks->required.first;
  1058. if (find_lock(&l_lck, lck))
  1059. unlock_of_required_lock(l_lcks, lck);
  1060. }
  1061. for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
  1062. if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
  1063. if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
  1064. unlock_op_mismatch(l_lcks, lck, op_flags);
  1065. if (l_lck->prev)
  1066. l_lck->prev->next = l_lck->next;
  1067. else
  1068. l_lcks->locked.first = l_lck->next;
  1069. if (l_lck->next)
  1070. l_lck->next->prev = l_lck->prev;
  1071. else
  1072. l_lcks->locked.last = l_lck->prev;
  1073. lc_free((void *) l_lck);
  1074. return;
  1075. }
  1076. }
  1077. unlock_of_not_locked(l_lcks, lck);
  1078. }
  1079. void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  1080. {
  1081. erts_lc_locked_locks_t *l_lcks;
  1082. erts_lc_locked_lock_t *l_lck;
  1083. if (lck->inited != ERTS_LC_INITITALIZED)
  1084. uninitialized_lock();
  1085. if (lck->id < 0)
  1086. return;
  1087. l_lcks = get_my_locked_locks();
  1088. if (l_lcks) {
  1089. l_lck = l_lcks->required.first;
  1090. if (find_lock(&l_lck, lck))
  1091. unlock_of_required_lock(l_lcks, lck);
  1092. }
  1093. l_lck = l_lcks->locked.first;
  1094. if (!find_lock(&l_lck, lck))
  1095. unlock_of_not_locked(l_lcks, lck);
  1096. }
  1097. int
  1098. erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
  1099. {
  1100. return erts_lc_trylock_force_busy_flg(lck, 0);
  1101. }
  1102. void
  1103. erts_lc_trylock(int locked, erts_lc_lock_t *lck)
  1104. {
  1105. erts_lc_trylock_flg(locked, lck, 0);
  1106. }
  1107. void
  1108. erts_lc_lock(erts_lc_lock_t *lck)
  1109. {
  1110. erts_lc_lock_flg(lck, 0);
  1111. }
  1112. void
  1113. erts_lc_unlock(erts_lc_lock_t *lck)
  1114. {
  1115. erts_lc_unlock_flg(lck, 0);
  1116. }
  1117. void erts_lc_might_unlock(erts_lc_lock_t *lck)
  1118. {
  1119. erts_lc_might_unlock_flg(lck, 0);
  1120. }
  1121. void erts_lc_require_lock(erts_lc_lock_t *lck)
  1122. {
  1123. erts_lc_require_lock_flg(lck, 0);
  1124. }
  1125. void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
  1126. {
  1127. erts_lc_unrequire_lock_flg(lck, 0);
  1128. }
  1129. void
  1130. erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
  1131. {
  1132. lck->id = erts_lc_get_lock_order_id(name);
  1133. lck->extra = make_boxed(&lck->extra);
  1134. lck->flags = flags;
  1135. lck->inited = ERTS_LC_INITITALIZED;
  1136. }
  1137. void
  1138. erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
  1139. {
  1140. lck->id = erts_lc_get_lock_order_id(name);
  1141. lck->extra = extra;
  1142. lck->flags = flags;
  1143. lck->inited = ERTS_LC_INITITALIZED;
  1144. }
  1145. void
  1146. erts_lc_destroy_lock(erts_lc_lock_t *lck)
  1147. {
  1148. if (lck->inited != ERTS_LC_INITITALIZED)
  1149. uninitialized_lock();
  1150. lck->inited = 0;
  1151. lck->id = -1;
  1152. lck->extra = THE_NON_VALUE;
  1153. lck->flags = 0;
  1154. }
  1155. void
  1156. erts_lc_init(void)
  1157. {
  1158. #ifdef ERTS_LC_STATIC_ALLOC
  1159. int i;
  1160. static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE];
  1161. for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
  1162. #ifdef DEBUG
  1163. memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
  1164. #endif
  1165. fbs[i].next = &fbs[i+1];
  1166. }
  1167. #ifdef DEBUG
  1168. memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
  1169. 0xdf, sizeof(erts_lc_free_block_t));
  1170. #endif
  1171. fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL;
  1172. free_blocks = &fbs[0];
  1173. #else /* #ifdef ERTS_LC_STATIC_ALLOC */
  1174. free_blocks = NULL;
  1175. #endif /* #ifdef ERTS_LC_STATIC_ALLOC */
  1176. if (ethr_spinlock_init(&free_blocks_lock) != 0)
  1177. lc_abort();
  1178. erts_tsd_key_create(&locks_key);
  1179. }
  1180. void
  1181. erts_lc_late_init(void)
  1182. {
  1183. erts_thr_install_exit_handler(thread_exit_handler);
  1184. }
  1185. /*
  1186. * erts_lc_pll(): print locked locks...
  1187. */
  1188. void
  1189. erts_lc_pll(void)
  1190. {
  1191. print_curr_locks(get_my_locked_locks());
  1192. }
  1193. #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */