PageRenderTime 59ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 0ms

/erts/emulator/beam/erl_lock_check.c

https://github.com/dudefrommangalore/otp
C | 1310 lines | 1102 code | 137 blank | 71 comment | 249 complexity | 07a4aad6e7b8fe99ec41540cae3941d3 MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, BSD-2-Clause, LGPL-2.1
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2005-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. /*
  20. * Description: A lock checker that checks that each thread acquires
  21. * locks according to a predefined global lock order. The
  22. * global lock order is used to prevent deadlocks. If the
  23. * lock order is violated, an error message is printed
  24. * and the emulator aborts. The lock checker is only
  25. * intended to be enabled when debugging.
  26. *
  27. * Author: Rickard Green
  28. */
  29. #ifdef HAVE_CONFIG_H
  30. # include "config.h"
  31. #endif
  32. /* Needed for VxWorks va_arg */
  33. #include "sys.h"
  34. #ifdef ERTS_ENABLE_LOCK_CHECK
  35. #include "erl_lock_check.h"
  36. #include "erl_term.h"
  37. #include "erl_threads.h"
  38. typedef struct {
  39. char *name;
  40. char *internal_order;
  41. } erts_lc_lock_order_t;
  42. /*
  43. * Global lock order for locks in the emulator.
  44. *
  45. * Locks early (low indexes) in the 'erts_lock_order' array should be
  46. * locked before locks late (high indexes) in the array. Each lock has
  47. * a name which is set on initialization. If multiple locks with the
  48. * same name are used, either an immediate Erlang term (e.g. internal
  49. * pid) or the address of the lock is used for internal lock order.
  50. * The immediate Erlang term used for internal lock order is also set
  51. * on initialization. Locks with small immediate Erlang terms should
  52. * be locked before locks with large immediate Erlang terms, and
  53. * locks with small addresses should be locked before locks with
  54. * large addresses. The immediate terms and adresses (boxed pointers)
  55. * are compared as unsigned integers not as Erlang terms.
  56. *
  57. * Once a spinlock or rw(spin)lock has been locked, the thread is not
  58. * allowed to lock mutexes, rwmutexes or process locks until all
  59. * spinlocks and rwlocks have been unlocked. This restriction is not
  60. * reflected by the lock order below, but the lock checker will still
  61. * check for violations of this restriction.
  62. */
  63. static erts_lc_lock_order_t erts_lock_order[] = {
  64. /*
  65. * "Lock name" "Internal lock order
  66. * description (NULL
  67. * if only one lock use
  68. * the lock name)"
  69. */
  70. #ifdef ERTS_SMP
  71. { "driver_lock", "driver_name" },
  72. { "port_lock", "port_id" },
  73. #endif
  74. { "port_data_lock", "address" },
  75. #ifdef ERTS_SMP
  76. { "bif_timers", NULL },
  77. { "reg_tab", NULL },
  78. { "migration_info_update", NULL },
  79. { "proc_main", "pid" },
  80. #ifdef HIPE
  81. { "hipe_mfait_lock", NULL },
  82. #endif
  83. { "nodes_monitors", NULL },
  84. { "driver_list", NULL },
  85. { "proc_link", "pid" },
  86. { "proc_msgq", "pid" },
  87. { "dist_entry", "address" },
  88. { "dist_entry_links", "address" },
  89. { "proc_status", "pid" },
  90. { "proc_tab", NULL },
  91. { "ports_snapshot", NULL },
  92. { "db_tab", "address" },
  93. { "db_tab_fix", "address" },
  94. { "meta_name_tab", "address" },
  95. { "meta_main_tab_slot", "address" },
  96. { "meta_main_tab_main", NULL },
  97. { "db_hash_slot", "address" },
  98. { "node_table", NULL },
  99. { "dist_table", NULL },
  100. { "sys_tracers", NULL },
  101. { "trace_pattern", NULL },
  102. { "module_tab", NULL },
  103. { "export_tab", NULL },
  104. { "fun_tab", NULL },
  105. { "environ", NULL },
  106. #endif
  107. { "asyncq", "address" },
  108. #ifndef ERTS_SMP
  109. { "async_ready", NULL },
  110. #endif
  111. { "efile_drv", "address" },
  112. #if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
  113. { "child_status", NULL },
  114. #endif
  115. #ifdef __WIN32__
  116. { "sys_driver_data_lock", NULL },
  117. #endif
  118. { "drv_ev_state_grow", NULL, },
  119. { "drv_ev_state", "address" },
  120. { "safe_hash", "address" },
  121. { "pollset_rm_list", NULL },
  122. { "removed_fd_pre_alloc_lock", NULL },
  123. { "state_prealloc", NULL },
  124. { "schdlr_sspnd", NULL },
  125. { "cpu_bind", NULL },
  126. { "run_queue", "address" },
  127. { "pollset", "address" },
  128. #ifdef __WIN32__
  129. { "pollwaiter", "address" },
  130. { "break_waiter_lock", NULL },
  131. #endif /* __WIN32__ */
  132. { "alcu_init_atoms", NULL },
  133. { "mseg_init_atoms", NULL },
  134. { "drv_tsd", NULL },
  135. #ifdef ERTS_SMP
  136. { "sys_msg_q", NULL },
  137. { "atom_tab", NULL },
  138. { "make_ref", NULL },
  139. { "misc_op_list_pre_alloc_lock", "address" },
  140. { "message_pre_alloc_lock", "address" },
  141. { "ptimer_pre_alloc_lock", "address", },
  142. { "btm_pre_alloc_lock", NULL, },
  143. { "dist_entry_out_queue", "address" },
  144. #endif
  145. { "mtrace_op", NULL },
  146. { "instr_x", NULL },
  147. { "instr", NULL },
  148. { "fix_alloc", "index" },
  149. { "alcu_allocator", "index" },
  150. { "mseg", NULL },
  151. #ifdef ERTS_SMP
  152. { "port_task_pre_alloc_lock", "address" },
  153. { "port_taskq_pre_alloc_lock", "address" },
  154. { "proclist_pre_alloc_lock", "address" },
  155. { "port_tasks_lock", NULL },
  156. { "get_free_port", NULL },
  157. { "port_state", "address" },
  158. { "xports_list_pre_alloc_lock", "address" },
  159. { "inet_buffer_stack_lock", NULL },
  160. { "gc_info", NULL },
  161. { "io_wake", NULL },
  162. { "timer_wheel", NULL },
  163. { "system_block", NULL },
  164. { "timeofday", NULL },
  165. { "breakpoints", NULL },
  166. { "pollsets_lock", NULL },
  167. { "async_id", NULL },
  168. { "pix_lock", "address" },
  169. { "run_queues_lists", NULL },
  170. { "sched_stat", NULL },
  171. #endif
  172. { "alloc_thr_ix_lock", NULL },
  173. #ifdef ERTS_SMP
  174. { "proc_lck_wtr_alloc", NULL },
  175. #endif
  176. #ifdef __WIN32__
  177. #ifdef DEBUG
  178. { "save_ops_lock", NULL },
  179. #endif
  180. #endif
  181. { "mtrace_buf", NULL }
  182. };
  183. #define ERTS_LOCK_ORDER_SIZE \
  184. (sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
  185. #define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
  186. (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
  187. && ((LCK_FLG) \
  188. & ERTS_LC_FLG_LT_ALL \
  189. & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
  190. static char *
  191. lock_type(Uint16 flags)
  192. {
  193. switch (flags & ERTS_LC_FLG_LT_ALL) {
  194. case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
  195. case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
  196. case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
  197. case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
  198. case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
  199. default: return "";
  200. }
  201. }
  202. static char *
  203. rw_op_str(Uint16 flags)
  204. {
  205. switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
  206. case ERTS_LC_FLG_LO_READ_WRITE:
  207. return " (rw)";
  208. case ERTS_LC_FLG_LO_READ:
  209. return " (r)";
  210. case ERTS_LC_FLG_LO_WRITE:
  211. erts_fprintf(stderr, "\nInternal error\n");
  212. abort();
  213. default:
  214. break;
  215. }
  216. return "";
  217. }
  218. typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
  219. struct erts_lc_locked_lock_t_ {
  220. erts_lc_locked_lock_t *next;
  221. erts_lc_locked_lock_t *prev;
  222. Eterm extra;
  223. Sint16 id;
  224. Uint16 flags;
  225. };
  226. typedef struct {
  227. erts_lc_locked_lock_t *first;
  228. erts_lc_locked_lock_t *last;
  229. } erts_lc_locked_lock_list_t;
  230. typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
  231. struct erts_lc_locked_locks_t_ {
  232. char *thread_name;
  233. erts_tid_t tid;
  234. erts_lc_locked_locks_t *next;
  235. erts_lc_locked_locks_t *prev;
  236. erts_lc_locked_lock_list_t locked;
  237. erts_lc_locked_lock_list_t required;
  238. };
  239. typedef union erts_lc_free_block_t_ erts_lc_free_block_t;
  240. union erts_lc_free_block_t_ {
  241. erts_lc_free_block_t *next;
  242. erts_lc_locked_lock_t lock;
  243. };
  244. static ethr_tsd_key locks_key;
  245. static erts_lc_locked_locks_t *erts_locked_locks;
  246. static erts_lc_free_block_t *free_blocks;
  247. #ifdef ERTS_LC_STATIC_ALLOC
  248. #define ERTS_LC_FB_CHUNK_SIZE 10000
  249. #else
  250. #define ERTS_LC_FB_CHUNK_SIZE 10
  251. #endif
  252. #ifdef ETHR_HAVE_NATIVE_LOCKS
  253. static ethr_spinlock_t free_blocks_lock;
  254. #define ERTS_LC_LOCK ethr_spin_lock
  255. #define ERTS_LC_UNLOCK ethr_spin_unlock
  256. #else
  257. static ethr_mutex free_blocks_lock;
  258. #define ERTS_LC_LOCK ethr_mutex_lock
  259. #define ERTS_LC_UNLOCK ethr_mutex_unlock
  260. #endif
  261. static ERTS_INLINE void
  262. lc_lock(void)
  263. {
  264. if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
  265. abort();
  266. }
  267. static ERTS_INLINE void
  268. lc_unlock(void)
  269. {
  270. if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
  271. abort();
  272. }
  273. static ERTS_INLINE void lc_free(void *p)
  274. {
  275. erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p;
  276. #ifdef DEBUG
  277. memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t));
  278. #endif
  279. lc_lock();
  280. fb->next = free_blocks;
  281. free_blocks = fb;
  282. lc_unlock();
  283. }
  284. #ifdef ERTS_LC_STATIC_ALLOC
  285. static void *lc_core_alloc(void)
  286. {
  287. lc_unlock();
  288. erts_fprintf(stderr, "Lock checker out of memory!\n");
  289. abort();
  290. }
  291. #else
  292. static void *lc_core_alloc(void)
  293. {
  294. int i;
  295. erts_lc_free_block_t *fbs;
  296. lc_unlock();
  297. fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
  298. * ERTS_LC_FB_CHUNK_SIZE);
  299. if (!fbs) {
  300. erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
  301. abort();
  302. }
  303. for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
  304. #ifdef DEBUG
  305. memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
  306. #endif
  307. fbs[i].next = &fbs[i+1];
  308. }
  309. #ifdef DEBUG
  310. memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
  311. 0xdf, sizeof(erts_lc_free_block_t));
  312. #endif
  313. lc_lock();
  314. fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks;
  315. free_blocks = &fbs[1];
  316. return (void *) &fbs[0];
  317. }
  318. #endif
  319. static ERTS_INLINE void *lc_alloc(void)
  320. {
  321. void *res;
  322. lc_lock();
  323. if (!free_blocks)
  324. res = lc_core_alloc();
  325. else {
  326. res = (void *) free_blocks;
  327. free_blocks = free_blocks->next;
  328. }
  329. lc_unlock();
  330. return res;
  331. }
  332. static erts_lc_locked_locks_t *
  333. create_locked_locks(char *thread_name)
  334. {
  335. erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
  336. if (!l_lcks)
  337. abort();
  338. l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
  339. if (!l_lcks->thread_name)
  340. abort();
  341. l_lcks->tid = erts_thr_self();
  342. l_lcks->required.first = NULL;
  343. l_lcks->required.last = NULL;
  344. l_lcks->locked.first = NULL;
  345. l_lcks->locked.last = NULL;
  346. l_lcks->prev = NULL;
  347. lc_lock();
  348. l_lcks->next = erts_locked_locks;
  349. if (erts_locked_locks)
  350. erts_locked_locks->prev = l_lcks;
  351. erts_locked_locks = l_lcks;
  352. lc_unlock();
  353. erts_tsd_set(locks_key, (void *) l_lcks);
  354. return l_lcks;
  355. }
  356. static void
  357. destroy_locked_locks(erts_lc_locked_locks_t *l_lcks)
  358. {
  359. ASSERT(l_lcks->thread_name);
  360. free((void *) l_lcks->thread_name);
  361. ASSERT(l_lcks->required.first == NULL);
  362. ASSERT(l_lcks->required.last == NULL);
  363. ASSERT(l_lcks->locked.first == NULL);
  364. ASSERT(l_lcks->locked.last == NULL);
  365. lc_lock();
  366. if (l_lcks->prev)
  367. l_lcks->prev->next = l_lcks->next;
  368. else {
  369. ASSERT(erts_locked_locks == l_lcks);
  370. erts_locked_locks = l_lcks->next;
  371. }
  372. if (l_lcks->next)
  373. l_lcks->next->prev = l_lcks->prev;
  374. lc_unlock();
  375. free((void *) l_lcks);
  376. }
  377. static ERTS_INLINE erts_lc_locked_locks_t *
  378. get_my_locked_locks(void)
  379. {
  380. return erts_tsd_get(locks_key);
  381. }
  382. static ERTS_INLINE erts_lc_locked_locks_t *
  383. make_my_locked_locks(void)
  384. {
  385. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  386. if (l_lcks)
  387. return l_lcks;
  388. else
  389. return create_locked_locks(NULL);
  390. }
  391. static ERTS_INLINE erts_lc_locked_lock_t *
  392. new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
  393. {
  394. erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
  395. l_lck->next = NULL;
  396. l_lck->prev = NULL;
  397. l_lck->id = lck->id;
  398. l_lck->extra = lck->extra;
  399. l_lck->flags = lck->flags | op_flags;
  400. return l_lck;
  401. }
  402. static void
  403. print_lock2(char *prefix, Sint16 id, Eterm extra, Uint16 flags, char *suffix)
  404. {
  405. char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
  406. ? erts_lock_order[id].name
  407. : "unknown");
  408. if (is_boxed(extra))
  409. erts_fprintf(stderr,
  410. "%s'%s:%p%s'%s%s",
  411. prefix,
  412. lname,
  413. boxed_val(extra),
  414. lock_type(flags),
  415. rw_op_str(flags),
  416. suffix);
  417. else
  418. erts_fprintf(stderr,
  419. "%s'%s:%T%s'%s%s",
  420. prefix,
  421. lname,
  422. extra,
  423. lock_type(flags),
  424. rw_op_str(flags),
  425. suffix);
  426. }
  427. static void
  428. print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
  429. {
  430. print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
  431. }
  432. static void
  433. print_curr_locks(erts_lc_locked_locks_t *l_lcks)
  434. {
  435. erts_lc_locked_lock_t *l_lck;
  436. if (!l_lcks || !l_lcks->locked.first)
  437. erts_fprintf(stderr,
  438. "Currently no locks are locked by the %s thread.\n",
  439. l_lcks->thread_name);
  440. else {
  441. erts_fprintf(stderr,
  442. "Currently these locks are locked by the %s thread:\n",
  443. l_lcks->thread_name);
  444. for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
  445. print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
  446. }
  447. }
  448. static void
  449. print_lock_order(void)
  450. {
  451. int i;
  452. erts_fprintf(stderr, "Lock order:\n");
  453. for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
  454. if (erts_lock_order[i].internal_order)
  455. erts_fprintf(stderr,
  456. " %s:%s\n",
  457. erts_lock_order[i].name,
  458. erts_lock_order[i].internal_order);
  459. else
  460. erts_fprintf(stderr, " %s\n", erts_lock_order[i].name);
  461. }
  462. }
  463. static void
  464. uninitialized_lock(void)
  465. {
  466. erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
  467. print_curr_locks(get_my_locked_locks());
  468. abort();
  469. }
  470. static void
  471. lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
  472. Uint16 op_flags)
  473. {
  474. erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
  475. print_lock(" ", lck, " lock which is already locked by thread!\n");
  476. print_curr_locks(l_lcks);
  477. abort();
  478. }
  479. static void
  480. unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
  481. Uint16 op_flags)
  482. {
  483. erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
  484. print_lock("", lck, " lock which mismatch previous lock operation!\n");
  485. print_curr_locks(l_lcks);
  486. abort();
  487. }
  488. static void
  489. unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  490. {
  491. print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
  492. print_curr_locks(l_lcks);
  493. abort();
  494. }
  495. static void
  496. lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  497. {
  498. print_lock("Lock order violation occured when locking ", lck, "!\n");
  499. print_curr_locks(l_lcks);
  500. print_lock_order();
  501. abort();
  502. }
  503. static void
  504. type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
  505. erts_lc_lock_t *lck)
  506. {
  507. erts_fprintf(stderr, "Lock type order violation occured when ");
  508. print_lock(op, lck, "!\n");
  509. ASSERT(l_lcks);
  510. print_curr_locks(l_lcks);
  511. abort();
  512. }
  513. static void
  514. lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
  515. int failed_have, erts_lc_lock_t *have, int have_len,
  516. int failed_have_not, erts_lc_lock_t *have_not, int have_not_len)
  517. {
  518. int i;
  519. erts_fprintf(stderr, "Lock mismatch found!\n");
  520. if (failed_have >= 0) {
  521. ASSERT(have && have_len > failed_have);
  522. print_lock2("At least the ",
  523. have[failed_have].id, have[failed_have].extra, 0,
  524. " lock is not locked when it should have been\n");
  525. }
  526. else if (failed_have_not >= 0) {
  527. ASSERT(have_not && have_not_len > failed_have_not);
  528. print_lock2("At least the ",
  529. have_not[failed_have_not].id,
  530. have_not[failed_have_not].extra,
  531. 0,
  532. " lock is locked when it should not have been\n");
  533. }
  534. if (exact) {
  535. if (!have || have_len <= 0)
  536. erts_fprintf(stderr,
  537. "Thread should not have any locks locked at all\n");
  538. else {
  539. erts_fprintf(stderr,
  540. "Thread should have these and only these locks "
  541. "locked:\n");
  542. for (i = 0; i < have_len; i++)
  543. print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
  544. }
  545. }
  546. else {
  547. if (have && have_len > 0) {
  548. erts_fprintf(stderr,
  549. "Thread should at least have these locks locked:\n");
  550. for (i = 0; i < have_len; i++)
  551. print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
  552. }
  553. if (have_not && have_not_len > 0) {
  554. erts_fprintf(stderr,
  555. "Thread should at least not have these locks "
  556. "locked:\n");
  557. for (i = 0; i < have_not_len; i++)
  558. print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n");
  559. }
  560. }
  561. print_curr_locks(l_lcks);
  562. abort();
  563. }
  564. static void
  565. unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  566. {
  567. print_lock("Unlocking required ", lck, " lock!\n");
  568. print_curr_locks(l_lcks);
  569. abort();
  570. }
  571. static void
  572. unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  573. {
  574. print_lock("Unrequire on ", lck, " lock not required!\n");
  575. print_curr_locks(l_lcks);
  576. abort();
  577. }
  578. static void
  579. require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  580. {
  581. print_lock("Require on ", lck, " lock already required!\n");
  582. print_curr_locks(l_lcks);
  583. abort();
  584. }
  585. static void
  586. required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
  587. {
  588. print_lock("Required ", lck, " lock not locked!\n");
  589. print_curr_locks(l_lcks);
  590. abort();
  591. }
  592. static void
  593. thread_exit_handler(void)
  594. {
  595. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  596. if (l_lcks) {
  597. if (l_lcks->locked.first) {
  598. erts_fprintf(stderr,
  599. "Thread exiting while having locked locks!\n");
  600. print_curr_locks(l_lcks);
  601. abort();
  602. }
  603. destroy_locked_locks(l_lcks);
  604. /* erts_tsd_set(locks_key, NULL); */
  605. }
  606. }
  607. void
  608. erts_lc_set_thread_name(char *thread_name)
  609. {
  610. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  611. if (!l_lcks)
  612. (void) create_locked_locks(thread_name);
  613. else {
  614. ASSERT(l_lcks->thread_name);
  615. free((void *) l_lcks->thread_name);
  616. l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
  617. if (!l_lcks->thread_name)
  618. abort();
  619. }
  620. }
  621. int
  622. erts_lc_assert_failed(char *file, int line, char *assertion)
  623. {
  624. erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
  625. file, line, assertion);
  626. print_curr_locks(get_my_locked_locks());
  627. abort();
  628. return 0;
  629. }
  630. void erts_lc_fail(char *fmt, ...)
  631. {
  632. va_list args;
  633. erts_fprintf(stderr, "Lock check failed: ");
  634. va_start(args, fmt);
  635. erts_vfprintf(stderr, fmt, args);
  636. va_end(args);
  637. erts_fprintf(stderr, "\n");
  638. print_curr_locks(get_my_locked_locks());
  639. abort();
  640. }
  641. Sint16
  642. erts_lc_get_lock_order_id(char *name)
  643. {
  644. int i;
  645. if (!name || name[0] == '\0')
  646. erts_fprintf(stderr, "Missing lock name\n");
  647. else {
  648. for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++)
  649. if (strcmp(erts_lock_order[i].name, name) == 0)
  650. return i;
  651. erts_fprintf(stderr,
  652. "Lock name '%s' missing in lock order "
  653. "(update erl_lock_check.c)\n",
  654. name);
  655. }
  656. abort();
  657. return (Sint16) -1;
  658. }
  659. static int
  660. find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
  661. {
  662. erts_lc_locked_lock_t *l_lck = *l_lcks;
  663. if (l_lck) {
  664. if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
  665. if ((l_lck->flags & lck->flags) == lck->flags)
  666. return 1;
  667. return 0;
  668. }
  669. else if (l_lck->id < lck->id
  670. || (l_lck->id == lck->id
  671. && l_lck->extra < lck->extra)) {
  672. for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
  673. if (l_lck->id > lck->id
  674. || (l_lck->id == lck->id
  675. && l_lck->extra >= lck->extra)) {
  676. *l_lcks = l_lck;
  677. if (l_lck->id == lck->id
  678. && l_lck->extra == lck->extra
  679. && ((l_lck->flags & lck->flags) == lck->flags))
  680. return 1;
  681. return 0;
  682. }
  683. }
  684. }
  685. else {
  686. for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
  687. if (l_lck->id < lck->id
  688. || (l_lck->id == lck->id
  689. && l_lck->extra <= lck->extra)) {
  690. *l_lcks = l_lck;
  691. if (l_lck->id == lck->id
  692. && l_lck->extra == lck->extra
  693. && ((l_lck->flags & lck->flags) == lck->flags))
  694. return 1;
  695. return 0;
  696. }
  697. }
  698. }
  699. }
  700. return 0;
  701. }
  702. static int
  703. find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
  704. {
  705. erts_lc_locked_lock_t *l_lck = *l_lcks;
  706. if (l_lck) {
  707. if (l_lck->id == id)
  708. return 1;
  709. else if (l_lck->id < id) {
  710. for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
  711. if (l_lck->id >= id) {
  712. *l_lcks = l_lck;
  713. if (l_lck->id == id)
  714. return 1;
  715. return 0;
  716. }
  717. }
  718. }
  719. else {
  720. for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
  721. if (l_lck->id <= id) {
  722. *l_lcks = l_lck;
  723. if (l_lck->id == id)
  724. return 1;
  725. return 0;
  726. }
  727. }
  728. }
  729. }
  730. return 0;
  731. }
  732. void
  733. erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
  734. {
  735. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  736. int i;
  737. if (!l_lcks) {
  738. for (i = 0; i < len; i++)
  739. resv[i] = 0;
  740. }
  741. else {
  742. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  743. for (i = 0; i < len; i++)
  744. resv[i] = find_lock(&l_lck, &locks[i]);
  745. }
  746. }
  747. void
  748. erts_lc_have_lock_ids(int *resv, int *ids, int len)
  749. {
  750. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  751. int i;
  752. if (!l_lcks) {
  753. for (i = 0; i < len; i++)
  754. resv[i] = 0;
  755. }
  756. else {
  757. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  758. for (i = 0; i < len; i++)
  759. resv[i] = find_id(&l_lck, ids[i]);
  760. }
  761. }
  762. void
  763. erts_lc_check(erts_lc_lock_t *have, int have_len,
  764. erts_lc_lock_t *have_not, int have_not_len)
  765. {
  766. int i;
  767. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  768. erts_lc_locked_lock_t *l_lck;
  769. if (have && have_len > 0) {
  770. if (!l_lcks)
  771. lock_mismatch(NULL, 0,
  772. -1, have, have_len,
  773. -1, have_not, have_not_len);
  774. l_lck = l_lcks->locked.first;
  775. for (i = 0; i < have_len; i++) {
  776. if (!find_lock(&l_lck, &have[i]))
  777. lock_mismatch(l_lcks, 0,
  778. i, have, have_len,
  779. -1, have_not, have_not_len);
  780. }
  781. }
  782. if (have_not && have_not_len > 0 && l_lcks) {
  783. l_lck = l_lcks->locked.first;
  784. for (i = 0; i < have_not_len; i++) {
  785. if (find_lock(&l_lck, &have_not[i]))
  786. lock_mismatch(l_lcks, 0,
  787. -1, have, have_len,
  788. i, have_not, have_not_len);
  789. }
  790. }
  791. }
  792. void
  793. erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
  794. {
  795. erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
  796. if (!l_lcks) {
  797. if (have && have_len > 0)
  798. lock_mismatch(NULL, 1,
  799. -1, have, have_len,
  800. -1, NULL, 0);
  801. }
  802. else {
  803. int i;
  804. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  805. for (i = 0; i < have_len; i++) {
  806. if (!find_lock(&l_lck, &have[i]))
  807. lock_mismatch(l_lcks, 1,
  808. i, have, have_len,
  809. -1, NULL, 0);
  810. }
  811. for (i = 0, l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
  812. i++;
  813. if (i != have_len)
  814. lock_mismatch(l_lcks, 1,
  815. -1, have, have_len,
  816. -1, NULL, 0);
  817. }
  818. }
  819. int
  820. erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  821. {
  822. #ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
  823. return 0;
  824. #else
  825. /*
  826. * Force busy trylock if locking doesn't follow lock order.
  827. * This in order to make sure that caller can handle
  828. * the situation without causing a lock order violation.
  829. */
  830. erts_lc_locked_locks_t *l_lcks;
  831. if (lck->inited != ERTS_LC_INITITALIZED)
  832. uninitialized_lock();
  833. if (lck->id < 0)
  834. return 0;
  835. l_lcks = get_my_locked_locks();
  836. if (!l_lcks || !l_lcks->locked.first) {
  837. ASSERT(!l_lcks || !l_lcks->locked.last);
  838. return 0;
  839. }
  840. else {
  841. erts_lc_locked_lock_t *tl_lck;
  842. ASSERT(l_lcks->locked.last);
  843. #if 0 /* Ok when trylocking I guess... */
  844. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  845. type_order_violation("trylocking ", l_lcks, lck);
  846. #endif
  847. if (l_lcks->locked.last->id < lck->id
  848. || (l_lcks->locked.last->id == lck->id
  849. && l_lcks->locked.last->extra < lck->extra))
  850. return 0;
  851. /*
  852. * Lock order violation
  853. */
  854. /* Check that we are not trying to lock this lock twice */
  855. for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
  856. if (tl_lck->id < lck->id
  857. || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
  858. if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
  859. lock_twice("Trylocking", l_lcks, lck, op_flags);
  860. break;
  861. }
  862. }
  863. #ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
  864. /* We only force busy if a lock order violation would occur
  865. and when on an even millisecond. */
  866. {
  867. erts_thr_timeval_t time;
  868. erts_thr_time_now(&time);
  869. if ((time.tv_nsec / 1000000) & 1)
  870. return 0;
  871. }
  872. #endif
  873. return 1;
  874. }
  875. #endif
  876. }
  877. void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
  878. {
  879. erts_lc_locked_locks_t *l_lcks;
  880. erts_lc_locked_lock_t *l_lck;
  881. if (lck->inited != ERTS_LC_INITITALIZED)
  882. uninitialized_lock();
  883. if (lck->id < 0)
  884. return;
  885. l_lcks = make_my_locked_locks();
  886. l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
  887. if (!l_lcks->locked.last) {
  888. ASSERT(!l_lcks->locked.first);
  889. if (locked)
  890. l_lcks->locked.first = l_lcks->locked.last = l_lck;
  891. }
  892. else {
  893. erts_lc_locked_lock_t *tl_lck;
  894. #if 0 /* Ok when trylocking I guess... */
  895. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  896. type_order_violation("trylocking ", l_lcks, lck);
  897. #endif
  898. for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
  899. if (tl_lck->id < lck->id
  900. || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
  901. if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
  902. lock_twice("Trylocking", l_lcks, lck, op_flags);
  903. if (locked) {
  904. l_lck->next = tl_lck->next;
  905. l_lck->prev = tl_lck;
  906. if (tl_lck->next)
  907. tl_lck->next->prev = l_lck;
  908. else
  909. l_lcks->locked.last = l_lck;
  910. tl_lck->next = l_lck;
  911. }
  912. return;
  913. }
  914. }
  915. if (locked) {
  916. l_lck->next = l_lcks->locked.first;
  917. l_lcks->locked.first->prev = l_lck;
  918. l_lcks->locked.first = l_lck;
  919. }
  920. }
  921. }
  922. void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  923. {
  924. erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
  925. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  926. if (!find_lock(&l_lck, lck))
  927. required_not_locked(l_lcks, lck);
  928. l_lck = new_locked_lock(lck, op_flags);
  929. if (!l_lcks->required.last) {
  930. ASSERT(!l_lcks->required.first);
  931. l_lck->next = l_lck->prev = NULL;
  932. l_lcks->required.first = l_lcks->required.last = l_lck;
  933. }
  934. else {
  935. erts_lc_locked_lock_t *l_lck2;
  936. ASSERT(l_lcks->required.first);
  937. for (l_lck2 = l_lcks->required.last;
  938. l_lck2;
  939. l_lck2 = l_lck2->prev) {
  940. if (l_lck2->id < lck->id
  941. || (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
  942. break;
  943. else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
  944. require_twice(l_lcks, lck);
  945. }
  946. if (!l_lck2) {
  947. l_lck->next = l_lcks->required.first;
  948. l_lck->prev = NULL;
  949. l_lcks->required.first->prev = l_lck;
  950. l_lcks->required.first = l_lck;
  951. }
  952. else {
  953. l_lck->next = l_lck2->next;
  954. if (l_lck->next) {
  955. ASSERT(l_lcks->required.last != l_lck2);
  956. l_lck->next->prev = l_lck;
  957. }
  958. else {
  959. ASSERT(l_lcks->required.last == l_lck2);
  960. l_lcks->required.last = l_lck;
  961. }
  962. l_lck->prev = l_lck2;
  963. l_lck2->next = l_lck;
  964. }
  965. }
  966. }
  967. void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  968. {
  969. erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
  970. erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
  971. if (!find_lock(&l_lck, lck))
  972. required_not_locked(l_lcks, lck);
  973. l_lck = l_lcks->required.first;
  974. if (!find_lock(&l_lck, lck))
  975. unrequire_of_not_required_lock(l_lcks, lck);
  976. if (l_lck->prev) {
  977. ASSERT(l_lcks->required.first != l_lck);
  978. l_lck->prev->next = l_lck->next;
  979. }
  980. else {
  981. ASSERT(l_lcks->required.first == l_lck);
  982. l_lcks->required.first = l_lck->next;
  983. }
  984. if (l_lck->next) {
  985. ASSERT(l_lcks->required.last != l_lck);
  986. l_lck->next->prev = l_lck->prev;
  987. }
  988. else {
  989. ASSERT(l_lcks->required.last == l_lck);
  990. l_lcks->required.last = l_lck->prev;
  991. }
  992. lc_free((void *) l_lck);
  993. }
  994. void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  995. {
  996. erts_lc_locked_locks_t *l_lcks;
  997. erts_lc_locked_lock_t *l_lck;
  998. if (lck->inited != ERTS_LC_INITITALIZED)
  999. uninitialized_lock();
  1000. if (lck->id < 0)
  1001. return;
  1002. l_lcks = make_my_locked_locks();
  1003. l_lck = new_locked_lock(lck, op_flags);
  1004. if (!l_lcks->locked.last) {
  1005. ASSERT(!l_lcks->locked.first);
  1006. l_lcks->locked.last = l_lcks->locked.first = l_lck;
  1007. }
  1008. else if (l_lcks->locked.last->id < lck->id
  1009. || (l_lcks->locked.last->id == lck->id
  1010. && l_lcks->locked.last->extra < lck->extra)) {
  1011. if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
  1012. type_order_violation("locking ", l_lcks, lck);
  1013. l_lck->prev = l_lcks->locked.last;
  1014. l_lcks->locked.last->next = l_lck;
  1015. l_lcks->locked.last = l_lck;
  1016. }
  1017. else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
  1018. lock_twice("Locking", l_lcks, lck, op_flags);
  1019. else
  1020. lock_order_violation(l_lcks, lck);
  1021. }
  1022. void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  1023. {
  1024. erts_lc_locked_locks_t *l_lcks;
  1025. erts_lc_locked_lock_t *l_lck;
  1026. if (lck->inited != ERTS_LC_INITITALIZED)
  1027. uninitialized_lock();
  1028. if (lck->id < 0)
  1029. return;
  1030. l_lcks = get_my_locked_locks();
  1031. if (l_lcks) {
  1032. l_lck = l_lcks->required.first;
  1033. if (find_lock(&l_lck, lck))
  1034. unlock_of_required_lock(l_lcks, lck);
  1035. }
  1036. for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
  1037. if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
  1038. if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
  1039. unlock_op_mismatch(l_lcks, lck, op_flags);
  1040. if (l_lck->prev)
  1041. l_lck->prev->next = l_lck->next;
  1042. else
  1043. l_lcks->locked.first = l_lck->next;
  1044. if (l_lck->next)
  1045. l_lck->next->prev = l_lck->prev;
  1046. else
  1047. l_lcks->locked.last = l_lck->prev;
  1048. lc_free((void *) l_lck);
  1049. return;
  1050. }
  1051. }
  1052. unlock_of_not_locked(l_lcks, lck);
  1053. }
  1054. void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
  1055. {
  1056. erts_lc_locked_locks_t *l_lcks;
  1057. erts_lc_locked_lock_t *l_lck;
  1058. if (lck->inited != ERTS_LC_INITITALIZED)
  1059. uninitialized_lock();
  1060. if (lck->id < 0)
  1061. return;
  1062. l_lcks = get_my_locked_locks();
  1063. if (l_lcks) {
  1064. l_lck = l_lcks->required.first;
  1065. if (find_lock(&l_lck, lck))
  1066. unlock_of_required_lock(l_lcks, lck);
  1067. }
  1068. l_lck = l_lcks->locked.first;
  1069. if (!find_lock(&l_lck, lck))
  1070. unlock_of_not_locked(l_lcks, lck);
  1071. }
  1072. int
  1073. erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
  1074. {
  1075. return erts_lc_trylock_force_busy_flg(lck, 0);
  1076. }
  1077. void
  1078. erts_lc_trylock(int locked, erts_lc_lock_t *lck)
  1079. {
  1080. erts_lc_trylock_flg(locked, lck, 0);
  1081. }
  1082. void
  1083. erts_lc_lock(erts_lc_lock_t *lck)
  1084. {
  1085. erts_lc_lock_flg(lck, 0);
  1086. }
  1087. void
  1088. erts_lc_unlock(erts_lc_lock_t *lck)
  1089. {
  1090. erts_lc_unlock_flg(lck, 0);
  1091. }
  1092. void erts_lc_might_unlock(erts_lc_lock_t *lck)
  1093. {
  1094. erts_lc_might_unlock_flg(lck, 0);
  1095. }
  1096. void erts_lc_require_lock(erts_lc_lock_t *lck)
  1097. {
  1098. erts_lc_require_lock_flg(lck, 0);
  1099. }
  1100. void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
  1101. {
  1102. erts_lc_unrequire_lock_flg(lck, 0);
  1103. }
  1104. void
  1105. erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
  1106. {
  1107. lck->id = erts_lc_get_lock_order_id(name);
  1108. lck->extra = make_boxed(&lck->extra);
  1109. lck->flags = flags;
  1110. lck->inited = ERTS_LC_INITITALIZED;
  1111. }
  1112. void
  1113. erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
  1114. {
  1115. lck->id = erts_lc_get_lock_order_id(name);
  1116. lck->extra = extra;
  1117. lck->flags = flags;
  1118. lck->inited = ERTS_LC_INITITALIZED;
  1119. }
  1120. void
  1121. erts_lc_destroy_lock(erts_lc_lock_t *lck)
  1122. {
  1123. if (lck->inited != ERTS_LC_INITITALIZED)
  1124. uninitialized_lock();
  1125. lck->inited = 0;
  1126. lck->id = -1;
  1127. lck->extra = THE_NON_VALUE;
  1128. lck->flags = 0;
  1129. }
  1130. void
  1131. erts_lc_init(void)
  1132. {
  1133. #ifdef ERTS_LC_STATIC_ALLOC
  1134. int i;
  1135. static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE];
  1136. for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
  1137. #ifdef DEBUG
  1138. memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
  1139. #endif
  1140. fbs[i].next = &fbs[i+1];
  1141. }
  1142. #ifdef DEBUG
  1143. memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
  1144. 0xdf, sizeof(erts_lc_free_block_t));
  1145. #endif
  1146. fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL;
  1147. free_blocks = &fbs[0];
  1148. #else /* #ifdef ERTS_LC_STATIC_ALLOC */
  1149. free_blocks = NULL;
  1150. #endif /* #ifdef ERTS_LC_STATIC_ALLOC */
  1151. #ifdef ETHR_HAVE_NATIVE_LOCKS
  1152. if (ethr_spinlock_init(&free_blocks_lock) != 0)
  1153. abort();
  1154. #else
  1155. if (ethr_mutex_init(&free_blocks_lock) != 0)
  1156. abort();
  1157. #endif
  1158. erts_tsd_key_create(&locks_key);
  1159. }
  1160. void
  1161. erts_lc_late_init(void)
  1162. {
  1163. erts_thr_install_exit_handler(thread_exit_handler);
  1164. }
  1165. /*
  1166. * erts_lc_pll(): print locked locks...
  1167. */
  1168. void
  1169. erts_lc_pll(void)
  1170. {
  1171. print_curr_locks(get_my_locked_locks());
  1172. }
  1173. #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */