PageRenderTime 63ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/sys/kern/kern_lock.c

https://bitbucket.org/freebsd/freebsd-head
C | 1478 lines | 1052 code | 140 blank | 286 comment | 281 complexity | af883ceb2dc49b7cf6635716683e7949 MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, BSD-3-Clause, JSON, LGPL-2.1, GPL-2.0, LGPL-2.0, AGPL-1.0, BSD-2-Clause, 0BSD
  1. /*-
  2. * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice(s), this list of conditions and the following disclaimer as
  10. * the first lines of this file unmodified other than the possible
  11. * addition of one or more copyright notices.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice(s), this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
  20. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  24. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  25. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  26. * DAMAGE.
  27. */
  28. #include "opt_adaptive_lockmgrs.h"
  29. #include "opt_ddb.h"
  30. #include "opt_hwpmc_hooks.h"
  31. #include "opt_kdtrace.h"
  32. #include <sys/cdefs.h>
  33. __FBSDID("$FreeBSD$");
  34. #include <sys/param.h>
  35. #include <sys/ktr.h>
  36. #include <sys/lock.h>
  37. #include <sys/lock_profile.h>
  38. #include <sys/lockmgr.h>
  39. #include <sys/mutex.h>
  40. #include <sys/proc.h>
  41. #include <sys/sleepqueue.h>
  42. #ifdef DEBUG_LOCKS
  43. #include <sys/stack.h>
  44. #endif
  45. #include <sys/sysctl.h>
  46. #include <sys/systm.h>
  47. #include <machine/cpu.h>
  48. #ifdef DDB
  49. #include <ddb/ddb.h>
  50. #endif
  51. #ifdef HWPMC_HOOKS
  52. #include <sys/pmckern.h>
  53. PMC_SOFT_DECLARE( , , lock, failed);
  54. #endif
  55. CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
  56. (LK_ADAPTIVE | LK_NOSHARE));
  57. CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
  58. ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
  59. #define SQ_EXCLUSIVE_QUEUE 0
  60. #define SQ_SHARED_QUEUE 1
  61. #ifndef INVARIANTS
  62. #define _lockmgr_assert(lk, what, file, line)
  63. #define TD_LOCKS_INC(td)
  64. #define TD_LOCKS_DEC(td)
  65. #else
  66. #define TD_LOCKS_INC(td) ((td)->td_locks++)
  67. #define TD_LOCKS_DEC(td) ((td)->td_locks--)
  68. #endif
  69. #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
  70. #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
  71. #ifndef DEBUG_LOCKS
  72. #define STACK_PRINT(lk)
  73. #define STACK_SAVE(lk)
  74. #define STACK_ZERO(lk)
  75. #else
  76. #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
  77. #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
  78. #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
  79. #endif
  80. #define LOCK_LOG2(lk, string, arg1, arg2) \
  81. if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
  82. CTR2(KTR_LOCK, (string), (arg1), (arg2))
  83. #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
  84. if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
  85. CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
  86. #define GIANT_DECLARE \
  87. int _i = 0; \
  88. WITNESS_SAVE_DECL(Giant)
  89. #define GIANT_RESTORE() do { \
  90. if (_i > 0) { \
  91. while (_i--) \
  92. mtx_lock(&Giant); \
  93. WITNESS_RESTORE(&Giant.lock_object, Giant); \
  94. } \
  95. } while (0)
  96. #define GIANT_SAVE() do { \
  97. if (mtx_owned(&Giant)) { \
  98. WITNESS_SAVE(&Giant.lock_object, Giant); \
  99. while (mtx_owned(&Giant)) { \
  100. _i++; \
  101. mtx_unlock(&Giant); \
  102. } \
  103. } \
  104. } while (0)
  105. #define LK_CAN_SHARE(x) \
  106. (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
  107. ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
  108. curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  109. #define LK_TRYOP(x) \
  110. ((x) & LK_NOWAIT)
  111. #define LK_CAN_WITNESS(x) \
  112. (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  113. #define LK_TRYWIT(x) \
  114. (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  115. #define LK_CAN_ADAPT(lk, f) \
  116. (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
  117. ((f) & LK_SLEEPFAIL) == 0)
  118. #define lockmgr_disowned(lk) \
  119. (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  120. #define lockmgr_xlocked(lk) \
  121. (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  122. static void assert_lockmgr(const struct lock_object *lock, int how);
  123. #ifdef DDB
  124. static void db_show_lockmgr(const struct lock_object *lock);
  125. #endif
  126. static void lock_lockmgr(struct lock_object *lock, int how);
  127. #ifdef KDTRACE_HOOKS
  128. static int owner_lockmgr(const struct lock_object *lock,
  129. struct thread **owner);
  130. #endif
  131. static int unlock_lockmgr(struct lock_object *lock);
  132. struct lock_class lock_class_lockmgr = {
  133. .lc_name = "lockmgr",
  134. .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  135. .lc_assert = assert_lockmgr,
  136. #ifdef DDB
  137. .lc_ddb_show = db_show_lockmgr,
  138. #endif
  139. .lc_lock = lock_lockmgr,
  140. .lc_unlock = unlock_lockmgr,
  141. #ifdef KDTRACE_HOOKS
  142. .lc_owner = owner_lockmgr,
  143. #endif
  144. };
  145. #ifdef ADAPTIVE_LOCKMGRS
  146. static u_int alk_retries = 10;
  147. static u_int alk_loops = 10000;
  148. static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
  149. "lockmgr debugging");
  150. SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
  151. SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
  152. #endif
  153. static __inline struct thread *
  154. lockmgr_xholder(const struct lock *lk)
  155. {
  156. uintptr_t x;
  157. x = lk->lk_lock;
  158. return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  159. }
  160. /*
  161. * It assumes sleepq_lock held and returns with this one unheld.
  162. * It also assumes the generic interlock is sane and previously checked.
  163. * If LK_INTERLOCK is specified the interlock is not reacquired after the
  164. * sleep.
  165. */
  166. static __inline int
  167. sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  168. const char *wmesg, int pri, int timo, int queue)
  169. {
  170. GIANT_DECLARE;
  171. struct lock_class *class;
  172. int catch, error;
  173. class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  174. catch = pri & PCATCH;
  175. pri &= PRIMASK;
  176. error = 0;
  177. LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  178. (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  179. if (flags & LK_INTERLOCK)
  180. class->lc_unlock(ilk);
  181. if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  182. lk->lk_exslpfail++;
  183. GIANT_SAVE();
  184. sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  185. SLEEPQ_INTERRUPTIBLE : 0), queue);
  186. if ((flags & LK_TIMELOCK) && timo)
  187. sleepq_set_timeout(&lk->lock_object, timo);
  188. /*
  189. * Decisional switch for real sleeping.
  190. */
  191. if ((flags & LK_TIMELOCK) && timo && catch)
  192. error = sleepq_timedwait_sig(&lk->lock_object, pri);
  193. else if ((flags & LK_TIMELOCK) && timo)
  194. error = sleepq_timedwait(&lk->lock_object, pri);
  195. else if (catch)
  196. error = sleepq_wait_sig(&lk->lock_object, pri);
  197. else
  198. sleepq_wait(&lk->lock_object, pri);
  199. GIANT_RESTORE();
  200. if ((flags & LK_SLEEPFAIL) && error == 0)
  201. error = ENOLCK;
  202. return (error);
  203. }
  204. static __inline int
  205. wakeupshlk(struct lock *lk, const char *file, int line)
  206. {
  207. uintptr_t v, x;
  208. u_int realexslp;
  209. int queue, wakeup_swapper;
  210. TD_LOCKS_DEC(curthread);
  211. TD_SLOCKS_DEC(curthread);
  212. WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  213. LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  214. wakeup_swapper = 0;
  215. for (;;) {
  216. x = lk->lk_lock;
  217. /*
  218. * If there is more than one shared lock held, just drop one
  219. * and return.
  220. */
  221. if (LK_SHARERS(x) > 1) {
  222. if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  223. x - LK_ONE_SHARER))
  224. break;
  225. continue;
  226. }
  227. /*
  228. * If there are not waiters on the exclusive queue, drop the
  229. * lock quickly.
  230. */
  231. if ((x & LK_ALL_WAITERS) == 0) {
  232. MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  233. LK_SHARERS_LOCK(1));
  234. if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  235. break;
  236. continue;
  237. }
  238. /*
  239. * We should have a sharer with waiters, so enter the hard
  240. * path in order to handle wakeups correctly.
  241. */
  242. sleepq_lock(&lk->lock_object);
  243. x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  244. v = LK_UNLOCKED;
  245. /*
  246. * If the lock has exclusive waiters, give them preference in
  247. * order to avoid deadlock with shared runners up.
  248. * If interruptible sleeps left the exclusive queue empty
  249. * avoid a starvation for the threads sleeping on the shared
  250. * queue by giving them precedence and cleaning up the
  251. * exclusive waiters bit anyway.
  252. * Please note that lk_exslpfail count may be lying about
  253. * the real number of waiters with the LK_SLEEPFAIL flag on
  254. * because they may be used in conjuction with interruptible
  255. * sleeps so lk_exslpfail might be considered an 'upper limit'
  256. * bound, including the edge cases.
  257. */
  258. realexslp = sleepq_sleepcnt(&lk->lock_object,
  259. SQ_EXCLUSIVE_QUEUE);
  260. if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  261. if (lk->lk_exslpfail < realexslp) {
  262. lk->lk_exslpfail = 0;
  263. queue = SQ_EXCLUSIVE_QUEUE;
  264. v |= (x & LK_SHARED_WAITERS);
  265. } else {
  266. lk->lk_exslpfail = 0;
  267. LOCK_LOG2(lk,
  268. "%s: %p has only LK_SLEEPFAIL sleepers",
  269. __func__, lk);
  270. LOCK_LOG2(lk,
  271. "%s: %p waking up threads on the exclusive queue",
  272. __func__, lk);
  273. wakeup_swapper =
  274. sleepq_broadcast(&lk->lock_object,
  275. SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  276. queue = SQ_SHARED_QUEUE;
  277. }
  278. } else {
  279. /*
  280. * Exclusive waiters sleeping with LK_SLEEPFAIL on
  281. * and using interruptible sleeps/timeout may have
  282. * left spourious lk_exslpfail counts on, so clean
  283. * it up anyway.
  284. */
  285. lk->lk_exslpfail = 0;
  286. queue = SQ_SHARED_QUEUE;
  287. }
  288. if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  289. v)) {
  290. sleepq_release(&lk->lock_object);
  291. continue;
  292. }
  293. LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  294. __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  295. "exclusive");
  296. wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  297. 0, queue);
  298. sleepq_release(&lk->lock_object);
  299. break;
  300. }
  301. lock_profile_release_lock(&lk->lock_object);
  302. return (wakeup_swapper);
  303. }
  304. static void
  305. assert_lockmgr(const struct lock_object *lock, int what)
  306. {
  307. panic("lockmgr locks do not support assertions");
  308. }
  309. static void
  310. lock_lockmgr(struct lock_object *lock, int how)
  311. {
  312. panic("lockmgr locks do not support sleep interlocking");
  313. }
  314. static int
  315. unlock_lockmgr(struct lock_object *lock)
  316. {
  317. panic("lockmgr locks do not support sleep interlocking");
  318. }
  319. #ifdef KDTRACE_HOOKS
  320. static int
  321. owner_lockmgr(const struct lock_object *lock, struct thread **owner)
  322. {
  323. panic("lockmgr locks do not support owner inquiring");
  324. }
  325. #endif
  326. void
  327. lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  328. {
  329. int iflags;
  330. MPASS((flags & ~LK_INIT_MASK) == 0);
  331. ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  332. ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  333. &lk->lk_lock));
  334. iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  335. if (flags & LK_CANRECURSE)
  336. iflags |= LO_RECURSABLE;
  337. if ((flags & LK_NODUP) == 0)
  338. iflags |= LO_DUPOK;
  339. if (flags & LK_NOPROFILE)
  340. iflags |= LO_NOPROFILE;
  341. if ((flags & LK_NOWITNESS) == 0)
  342. iflags |= LO_WITNESS;
  343. if (flags & LK_QUIET)
  344. iflags |= LO_QUIET;
  345. iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  346. lk->lk_lock = LK_UNLOCKED;
  347. lk->lk_recurse = 0;
  348. lk->lk_exslpfail = 0;
  349. lk->lk_timo = timo;
  350. lk->lk_pri = pri;
  351. lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  352. STACK_ZERO(lk);
  353. }
  354. /*
  355. * XXX: Gross hacks to manipulate external lock flags after
  356. * initialization. Used for certain vnode and buf locks.
  357. */
  358. void
  359. lockallowshare(struct lock *lk)
  360. {
  361. lockmgr_assert(lk, KA_XLOCKED);
  362. lk->lock_object.lo_flags &= ~LK_NOSHARE;
  363. }
  364. void
  365. lockallowrecurse(struct lock *lk)
  366. {
  367. lockmgr_assert(lk, KA_XLOCKED);
  368. lk->lock_object.lo_flags |= LO_RECURSABLE;
  369. }
  370. void
  371. lockdisablerecurse(struct lock *lk)
  372. {
  373. lockmgr_assert(lk, KA_XLOCKED);
  374. lk->lock_object.lo_flags &= ~LO_RECURSABLE;
  375. }
  376. void
  377. lockdestroy(struct lock *lk)
  378. {
  379. KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  380. KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  381. KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
  382. lock_destroy(&lk->lock_object);
  383. }
  384. int
  385. __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  386. const char *wmesg, int pri, int timo, const char *file, int line)
  387. {
  388. GIANT_DECLARE;
  389. struct lock_class *class;
  390. const char *iwmesg;
  391. uintptr_t tid, v, x;
  392. u_int op, realexslp;
  393. int error, ipri, itimo, queue, wakeup_swapper;
  394. #ifdef LOCK_PROFILING
  395. uint64_t waittime = 0;
  396. int contested = 0;
  397. #endif
  398. #ifdef ADAPTIVE_LOCKMGRS
  399. volatile struct thread *owner;
  400. u_int i, spintries = 0;
  401. #endif
  402. error = 0;
  403. tid = (uintptr_t)curthread;
  404. op = (flags & LK_TYPE_MASK);
  405. iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  406. ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  407. itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  408. MPASS((flags & ~LK_TOTAL_MASK) == 0);
  409. KASSERT((op & (op - 1)) == 0,
  410. ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  411. KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  412. (op != LK_DOWNGRADE && op != LK_RELEASE),
  413. ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  414. __func__, file, line));
  415. KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  416. ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  417. __func__, file, line));
  418. class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  419. if (panicstr != NULL) {
  420. if (flags & LK_INTERLOCK)
  421. class->lc_unlock(ilk);
  422. return (0);
  423. }
  424. if (lk->lock_object.lo_flags & LK_NOSHARE) {
  425. switch (op) {
  426. case LK_SHARED:
  427. op = LK_EXCLUSIVE;
  428. break;
  429. case LK_UPGRADE:
  430. case LK_DOWNGRADE:
  431. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  432. file, line);
  433. return (0);
  434. }
  435. }
  436. wakeup_swapper = 0;
  437. switch (op) {
  438. case LK_SHARED:
  439. if (LK_CAN_WITNESS(flags))
  440. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  441. file, line, ilk);
  442. for (;;) {
  443. x = lk->lk_lock;
  444. /*
  445. * If no other thread has an exclusive lock, or
  446. * no exclusive waiter is present, bump the count of
  447. * sharers. Since we have to preserve the state of
  448. * waiters, if we fail to acquire the shared lock
  449. * loop back and retry.
  450. */
  451. if (LK_CAN_SHARE(x)) {
  452. if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  453. x + LK_ONE_SHARER))
  454. break;
  455. continue;
  456. }
  457. #ifdef HWPMC_HOOKS
  458. PMC_SOFT_CALL( , , lock, failed);
  459. #endif
  460. lock_profile_obtain_lock_failed(&lk->lock_object,
  461. &contested, &waittime);
  462. /*
  463. * If the lock is already held by curthread in
  464. * exclusive way avoid a deadlock.
  465. */
  466. if (LK_HOLDER(x) == tid) {
  467. LOCK_LOG2(lk,
  468. "%s: %p already held in exclusive mode",
  469. __func__, lk);
  470. error = EDEADLK;
  471. break;
  472. }
  473. /*
  474. * If the lock is expected to not sleep just give up
  475. * and return.
  476. */
  477. if (LK_TRYOP(flags)) {
  478. LOCK_LOG2(lk, "%s: %p fails the try operation",
  479. __func__, lk);
  480. error = EBUSY;
  481. break;
  482. }
  483. #ifdef ADAPTIVE_LOCKMGRS
  484. /*
  485. * If the owner is running on another CPU, spin until
  486. * the owner stops running or the state of the lock
  487. * changes. We need a double-state handle here
  488. * because for a failed acquisition the lock can be
  489. * either held in exclusive mode or shared mode
  490. * (for the writer starvation avoidance technique).
  491. */
  492. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  493. LK_HOLDER(x) != LK_KERNPROC) {
  494. owner = (struct thread *)LK_HOLDER(x);
  495. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  496. CTR3(KTR_LOCK,
  497. "%s: spinning on %p held by %p",
  498. __func__, lk, owner);
  499. /*
  500. * If we are holding also an interlock drop it
  501. * in order to avoid a deadlock if the lockmgr
  502. * owner is adaptively spinning on the
  503. * interlock itself.
  504. */
  505. if (flags & LK_INTERLOCK) {
  506. class->lc_unlock(ilk);
  507. flags &= ~LK_INTERLOCK;
  508. }
  509. GIANT_SAVE();
  510. while (LK_HOLDER(lk->lk_lock) ==
  511. (uintptr_t)owner && TD_IS_RUNNING(owner))
  512. cpu_spinwait();
  513. GIANT_RESTORE();
  514. continue;
  515. } else if (LK_CAN_ADAPT(lk, flags) &&
  516. (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  517. spintries < alk_retries) {
  518. if (flags & LK_INTERLOCK) {
  519. class->lc_unlock(ilk);
  520. flags &= ~LK_INTERLOCK;
  521. }
  522. GIANT_SAVE();
  523. spintries++;
  524. for (i = 0; i < alk_loops; i++) {
  525. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  526. CTR4(KTR_LOCK,
  527. "%s: shared spinning on %p with %u and %u",
  528. __func__, lk, spintries, i);
  529. x = lk->lk_lock;
  530. if ((x & LK_SHARE) == 0 ||
  531. LK_CAN_SHARE(x) != 0)
  532. break;
  533. cpu_spinwait();
  534. }
  535. GIANT_RESTORE();
  536. if (i != alk_loops)
  537. continue;
  538. }
  539. #endif
  540. /*
  541. * Acquire the sleepqueue chain lock because we
  542. * probabilly will need to manipulate waiters flags.
  543. */
  544. sleepq_lock(&lk->lock_object);
  545. x = lk->lk_lock;
  546. /*
  547. * if the lock can be acquired in shared mode, try
  548. * again.
  549. */
  550. if (LK_CAN_SHARE(x)) {
  551. sleepq_release(&lk->lock_object);
  552. continue;
  553. }
  554. #ifdef ADAPTIVE_LOCKMGRS
  555. /*
  556. * The current lock owner might have started executing
  557. * on another CPU (or the lock could have changed
  558. * owner) while we were waiting on the turnstile
  559. * chain lock. If so, drop the turnstile lock and try
  560. * again.
  561. */
  562. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  563. LK_HOLDER(x) != LK_KERNPROC) {
  564. owner = (struct thread *)LK_HOLDER(x);
  565. if (TD_IS_RUNNING(owner)) {
  566. sleepq_release(&lk->lock_object);
  567. continue;
  568. }
  569. }
  570. #endif
  571. /*
  572. * Try to set the LK_SHARED_WAITERS flag. If we fail,
  573. * loop back and retry.
  574. */
  575. if ((x & LK_SHARED_WAITERS) == 0) {
  576. if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  577. x | LK_SHARED_WAITERS)) {
  578. sleepq_release(&lk->lock_object);
  579. continue;
  580. }
  581. LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  582. __func__, lk);
  583. }
  584. /*
  585. * As far as we have been unable to acquire the
  586. * shared lock and the shared waiters flag is set,
  587. * we will sleep.
  588. */
  589. error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  590. SQ_SHARED_QUEUE);
  591. flags &= ~LK_INTERLOCK;
  592. if (error) {
  593. LOCK_LOG3(lk,
  594. "%s: interrupted sleep for %p with %d",
  595. __func__, lk, error);
  596. break;
  597. }
  598. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  599. __func__, lk);
  600. }
  601. if (error == 0) {
  602. lock_profile_obtain_lock_success(&lk->lock_object,
  603. contested, waittime, file, line);
  604. LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  605. line);
  606. WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  607. line);
  608. TD_LOCKS_INC(curthread);
  609. TD_SLOCKS_INC(curthread);
  610. STACK_SAVE(lk);
  611. }
  612. break;
  613. case LK_UPGRADE:
  614. _lockmgr_assert(lk, KA_SLOCKED, file, line);
  615. v = lk->lk_lock;
  616. x = v & LK_ALL_WAITERS;
  617. v &= LK_EXCLUSIVE_SPINNERS;
  618. /*
  619. * Try to switch from one shared lock to an exclusive one.
  620. * We need to preserve waiters flags during the operation.
  621. */
  622. if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  623. tid | x)) {
  624. LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  625. line);
  626. WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  627. LK_TRYWIT(flags), file, line);
  628. TD_SLOCKS_DEC(curthread);
  629. break;
  630. }
  631. /*
  632. * We have been unable to succeed in upgrading, so just
  633. * give up the shared lock.
  634. */
  635. wakeup_swapper |= wakeupshlk(lk, file, line);
  636. /* FALLTHROUGH */
  637. case LK_EXCLUSIVE:
  638. if (LK_CAN_WITNESS(flags))
  639. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  640. LOP_EXCLUSIVE, file, line, ilk);
  641. /*
  642. * If curthread already holds the lock and this one is
  643. * allowed to recurse, simply recurse on it.
  644. */
  645. if (lockmgr_xlocked(lk)) {
  646. if ((flags & LK_CANRECURSE) == 0 &&
  647. (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  648. /*
  649. * If the lock is expected to not panic just
  650. * give up and return.
  651. */
  652. if (LK_TRYOP(flags)) {
  653. LOCK_LOG2(lk,
  654. "%s: %p fails the try operation",
  655. __func__, lk);
  656. error = EBUSY;
  657. break;
  658. }
  659. if (flags & LK_INTERLOCK)
  660. class->lc_unlock(ilk);
  661. panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  662. __func__, iwmesg, file, line);
  663. }
  664. lk->lk_recurse++;
  665. LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  666. LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  667. lk->lk_recurse, file, line);
  668. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  669. LK_TRYWIT(flags), file, line);
  670. TD_LOCKS_INC(curthread);
  671. break;
  672. }
  673. while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  674. tid)) {
  675. #ifdef HWPMC_HOOKS
  676. PMC_SOFT_CALL( , , lock, failed);
  677. #endif
  678. lock_profile_obtain_lock_failed(&lk->lock_object,
  679. &contested, &waittime);
  680. /*
  681. * If the lock is expected to not sleep just give up
  682. * and return.
  683. */
  684. if (LK_TRYOP(flags)) {
  685. LOCK_LOG2(lk, "%s: %p fails the try operation",
  686. __func__, lk);
  687. error = EBUSY;
  688. break;
  689. }
  690. #ifdef ADAPTIVE_LOCKMGRS
  691. /*
  692. * If the owner is running on another CPU, spin until
  693. * the owner stops running or the state of the lock
  694. * changes.
  695. */
  696. x = lk->lk_lock;
  697. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  698. LK_HOLDER(x) != LK_KERNPROC) {
  699. owner = (struct thread *)LK_HOLDER(x);
  700. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  701. CTR3(KTR_LOCK,
  702. "%s: spinning on %p held by %p",
  703. __func__, lk, owner);
  704. /*
  705. * If we are holding also an interlock drop it
  706. * in order to avoid a deadlock if the lockmgr
  707. * owner is adaptively spinning on the
  708. * interlock itself.
  709. */
  710. if (flags & LK_INTERLOCK) {
  711. class->lc_unlock(ilk);
  712. flags &= ~LK_INTERLOCK;
  713. }
  714. GIANT_SAVE();
  715. while (LK_HOLDER(lk->lk_lock) ==
  716. (uintptr_t)owner && TD_IS_RUNNING(owner))
  717. cpu_spinwait();
  718. GIANT_RESTORE();
  719. continue;
  720. } else if (LK_CAN_ADAPT(lk, flags) &&
  721. (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  722. spintries < alk_retries) {
  723. if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  724. !atomic_cmpset_ptr(&lk->lk_lock, x,
  725. x | LK_EXCLUSIVE_SPINNERS))
  726. continue;
  727. if (flags & LK_INTERLOCK) {
  728. class->lc_unlock(ilk);
  729. flags &= ~LK_INTERLOCK;
  730. }
  731. GIANT_SAVE();
  732. spintries++;
  733. for (i = 0; i < alk_loops; i++) {
  734. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  735. CTR4(KTR_LOCK,
  736. "%s: shared spinning on %p with %u and %u",
  737. __func__, lk, spintries, i);
  738. if ((lk->lk_lock &
  739. LK_EXCLUSIVE_SPINNERS) == 0)
  740. break;
  741. cpu_spinwait();
  742. }
  743. GIANT_RESTORE();
  744. if (i != alk_loops)
  745. continue;
  746. }
  747. #endif
  748. /*
  749. * Acquire the sleepqueue chain lock because we
  750. * probabilly will need to manipulate waiters flags.
  751. */
  752. sleepq_lock(&lk->lock_object);
  753. x = lk->lk_lock;
  754. /*
  755. * if the lock has been released while we spun on
  756. * the sleepqueue chain lock just try again.
  757. */
  758. if (x == LK_UNLOCKED) {
  759. sleepq_release(&lk->lock_object);
  760. continue;
  761. }
  762. #ifdef ADAPTIVE_LOCKMGRS
  763. /*
  764. * The current lock owner might have started executing
  765. * on another CPU (or the lock could have changed
  766. * owner) while we were waiting on the turnstile
  767. * chain lock. If so, drop the turnstile lock and try
  768. * again.
  769. */
  770. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  771. LK_HOLDER(x) != LK_KERNPROC) {
  772. owner = (struct thread *)LK_HOLDER(x);
  773. if (TD_IS_RUNNING(owner)) {
  774. sleepq_release(&lk->lock_object);
  775. continue;
  776. }
  777. }
  778. #endif
  779. /*
  780. * The lock can be in the state where there is a
  781. * pending queue of waiters, but still no owner.
  782. * This happens when the lock is contested and an
  783. * owner is going to claim the lock.
  784. * If curthread is the one successfully acquiring it
  785. * claim lock ownership and return, preserving waiters
  786. * flags.
  787. */
  788. v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  789. if ((x & ~v) == LK_UNLOCKED) {
  790. v &= ~LK_EXCLUSIVE_SPINNERS;
  791. if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  792. tid | v)) {
  793. sleepq_release(&lk->lock_object);
  794. LOCK_LOG2(lk,
  795. "%s: %p claimed by a new writer",
  796. __func__, lk);
  797. break;
  798. }
  799. sleepq_release(&lk->lock_object);
  800. continue;
  801. }
  802. /*
  803. * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
  804. * fail, loop back and retry.
  805. */
  806. if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  807. if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  808. x | LK_EXCLUSIVE_WAITERS)) {
  809. sleepq_release(&lk->lock_object);
  810. continue;
  811. }
  812. LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  813. __func__, lk);
  814. }
  815. /*
  816. * As far as we have been unable to acquire the
  817. * exclusive lock and the exclusive waiters flag
  818. * is set, we will sleep.
  819. */
  820. error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  821. SQ_EXCLUSIVE_QUEUE);
  822. flags &= ~LK_INTERLOCK;
  823. if (error) {
  824. LOCK_LOG3(lk,
  825. "%s: interrupted sleep for %p with %d",
  826. __func__, lk, error);
  827. break;
  828. }
  829. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  830. __func__, lk);
  831. }
  832. if (error == 0) {
  833. lock_profile_obtain_lock_success(&lk->lock_object,
  834. contested, waittime, file, line);
  835. LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  836. lk->lk_recurse, file, line);
  837. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  838. LK_TRYWIT(flags), file, line);
  839. TD_LOCKS_INC(curthread);
  840. STACK_SAVE(lk);
  841. }
  842. break;
  843. case LK_DOWNGRADE:
  844. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  845. LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  846. WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  847. TD_SLOCKS_INC(curthread);
  848. /*
  849. * In order to preserve waiters flags, just spin.
  850. */
  851. for (;;) {
  852. x = lk->lk_lock;
  853. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  854. x &= LK_ALL_WAITERS;
  855. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  856. LK_SHARERS_LOCK(1) | x))
  857. break;
  858. cpu_spinwait();
  859. }
  860. break;
  861. case LK_RELEASE:
  862. _lockmgr_assert(lk, KA_LOCKED, file, line);
  863. x = lk->lk_lock;
  864. if ((x & LK_SHARE) == 0) {
  865. /*
  866. * As first option, treact the lock as if it has not
  867. * any waiter.
  868. * Fix-up the tid var if the lock has been disowned.
  869. */
  870. if (LK_HOLDER(x) == LK_KERNPROC)
  871. tid = LK_KERNPROC;
  872. else {
  873. WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  874. file, line);
  875. TD_LOCKS_DEC(curthread);
  876. }
  877. LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  878. lk->lk_recurse, file, line);
  879. /*
  880. * The lock is held in exclusive mode.
  881. * If the lock is recursed also, then unrecurse it.
  882. */
  883. if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  884. LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  885. lk);
  886. lk->lk_recurse--;
  887. break;
  888. }
  889. if (tid != LK_KERNPROC)
  890. lock_profile_release_lock(&lk->lock_object);
  891. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  892. LK_UNLOCKED))
  893. break;
  894. sleepq_lock(&lk->lock_object);
  895. x = lk->lk_lock;
  896. v = LK_UNLOCKED;
  897. /*
  898. * If the lock has exclusive waiters, give them
  899. * preference in order to avoid deadlock with
  900. * shared runners up.
  901. * If interruptible sleeps left the exclusive queue
  902. * empty avoid a starvation for the threads sleeping
  903. * on the shared queue by giving them precedence
  904. * and cleaning up the exclusive waiters bit anyway.
  905. * Please note that lk_exslpfail count may be lying
  906. * about the real number of waiters with the
  907. * LK_SLEEPFAIL flag on because they may be used in
  908. * conjuction with interruptible sleeps so
  909. * lk_exslpfail might be considered an 'upper limit'
  910. * bound, including the edge cases.
  911. */
  912. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  913. realexslp = sleepq_sleepcnt(&lk->lock_object,
  914. SQ_EXCLUSIVE_QUEUE);
  915. if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  916. if (lk->lk_exslpfail < realexslp) {
  917. lk->lk_exslpfail = 0;
  918. queue = SQ_EXCLUSIVE_QUEUE;
  919. v |= (x & LK_SHARED_WAITERS);
  920. } else {
  921. lk->lk_exslpfail = 0;
  922. LOCK_LOG2(lk,
  923. "%s: %p has only LK_SLEEPFAIL sleepers",
  924. __func__, lk);
  925. LOCK_LOG2(lk,
  926. "%s: %p waking up threads on the exclusive queue",
  927. __func__, lk);
  928. wakeup_swapper =
  929. sleepq_broadcast(&lk->lock_object,
  930. SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  931. queue = SQ_SHARED_QUEUE;
  932. }
  933. } else {
  934. /*
  935. * Exclusive waiters sleeping with LK_SLEEPFAIL
  936. * on and using interruptible sleeps/timeout
  937. * may have left spourious lk_exslpfail counts
  938. * on, so clean it up anyway.
  939. */
  940. lk->lk_exslpfail = 0;
  941. queue = SQ_SHARED_QUEUE;
  942. }
  943. LOCK_LOG3(lk,
  944. "%s: %p waking up threads on the %s queue",
  945. __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  946. "exclusive");
  947. atomic_store_rel_ptr(&lk->lk_lock, v);
  948. wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
  949. SLEEPQ_LK, 0, queue);
  950. sleepq_release(&lk->lock_object);
  951. break;
  952. } else
  953. wakeup_swapper = wakeupshlk(lk, file, line);
  954. break;
  955. case LK_DRAIN:
  956. if (LK_CAN_WITNESS(flags))
  957. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  958. LOP_EXCLUSIVE, file, line, ilk);
  959. /*
  960. * Trying to drain a lock we already own will result in a
  961. * deadlock.
  962. */
  963. if (lockmgr_xlocked(lk)) {
  964. if (flags & LK_INTERLOCK)
  965. class->lc_unlock(ilk);
  966. panic("%s: draining %s with the lock held @ %s:%d\n",
  967. __func__, iwmesg, file, line);
  968. }
  969. while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
  970. #ifdef HWPMC_HOOKS
  971. PMC_SOFT_CALL( , , lock, failed);
  972. #endif
  973. lock_profile_obtain_lock_failed(&lk->lock_object,
  974. &contested, &waittime);
  975. /*
  976. * If the lock is expected to not sleep just give up
  977. * and return.
  978. */
  979. if (LK_TRYOP(flags)) {
  980. LOCK_LOG2(lk, "%s: %p fails the try operation",
  981. __func__, lk);
  982. error = EBUSY;
  983. break;
  984. }
  985. /*
  986. * Acquire the sleepqueue chain lock because we
  987. * probabilly will need to manipulate waiters flags.
  988. */
  989. sleepq_lock(&lk->lock_object);
  990. x = lk->lk_lock;
  991. /*
  992. * if the lock has been released while we spun on
  993. * the sleepqueue chain lock just try again.
  994. */
  995. if (x == LK_UNLOCKED) {
  996. sleepq_release(&lk->lock_object);
  997. continue;
  998. }
  999. v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  1000. if ((x & ~v) == LK_UNLOCKED) {
  1001. v = (x & ~LK_EXCLUSIVE_SPINNERS);
  1002. /*
  1003. * If interruptible sleeps left the exclusive
  1004. * queue empty avoid a starvation for the
  1005. * threads sleeping on the shared queue by
  1006. * giving them precedence and cleaning up the
  1007. * exclusive waiters bit anyway.
  1008. * Please note that lk_exslpfail count may be
  1009. * lying about the real number of waiters with
  1010. * the LK_SLEEPFAIL flag on because they may
  1011. * be used in conjuction with interruptible
  1012. * sleeps so lk_exslpfail might be considered
  1013. * an 'upper limit' bound, including the edge
  1014. * cases.
  1015. */
  1016. if (v & LK_EXCLUSIVE_WAITERS) {
  1017. queue = SQ_EXCLUSIVE_QUEUE;
  1018. v &= ~LK_EXCLUSIVE_WAITERS;
  1019. } else {
  1020. /*
  1021. * Exclusive waiters sleeping with
  1022. * LK_SLEEPFAIL on and using
  1023. * interruptible sleeps/timeout may
  1024. * have left spourious lk_exslpfail
  1025. * counts on, so clean it up anyway.
  1026. */
  1027. MPASS(v & LK_SHARED_WAITERS);
  1028. lk->lk_exslpfail = 0;
  1029. queue = SQ_SHARED_QUEUE;
  1030. v &= ~LK_SHARED_WAITERS;
  1031. }
  1032. if (queue == SQ_EXCLUSIVE_QUEUE) {
  1033. realexslp =
  1034. sleepq_sleepcnt(&lk->lock_object,
  1035. SQ_EXCLUSIVE_QUEUE);
  1036. if (lk->lk_exslpfail >= realexslp) {
  1037. lk->lk_exslpfail = 0;
  1038. queue = SQ_SHARED_QUEUE;
  1039. v &= ~LK_SHARED_WAITERS;
  1040. if (realexslp != 0) {
  1041. LOCK_LOG2(lk,
  1042. "%s: %p has only LK_SLEEPFAIL sleepers",
  1043. __func__, lk);
  1044. LOCK_LOG2(lk,
  1045. "%s: %p waking up threads on the exclusive queue",
  1046. __func__, lk);
  1047. wakeup_swapper =
  1048. sleepq_broadcast(
  1049. &lk->lock_object,
  1050. SLEEPQ_LK, 0,
  1051. SQ_EXCLUSIVE_QUEUE);
  1052. }
  1053. } else
  1054. lk->lk_exslpfail = 0;
  1055. }
  1056. if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
  1057. sleepq_release(&lk->lock_object);
  1058. continue;
  1059. }
  1060. LOCK_LOG3(lk,
  1061. "%s: %p waking up all threads on the %s queue",
  1062. __func__, lk, queue == SQ_SHARED_QUEUE ?
  1063. "shared" : "exclusive");
  1064. wakeup_swapper |= sleepq_broadcast(
  1065. &lk->lock_object, SLEEPQ_LK, 0, queue);
  1066. /*
  1067. * If shared waiters have been woken up we need
  1068. * to wait for one of them to acquire the lock
  1069. * before to set the exclusive waiters in
  1070. * order to avoid a deadlock.
  1071. */
  1072. if (queue == SQ_SHARED_QUEUE) {
  1073. for (v = lk->lk_lock;
  1074. (v & LK_SHARE) && !LK_SHARERS(v);
  1075. v = lk->lk_lock)
  1076. cpu_spinwait();
  1077. }
  1078. }
  1079. /*
  1080. * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
  1081. * fail, loop back and retry.
  1082. */
  1083. if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  1084. if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  1085. x | LK_EXCLUSIVE_WAITERS)) {
  1086. sleepq_release(&lk->lock_object);
  1087. continue;
  1088. }
  1089. LOCK_LOG2(lk, "%s: %p set drain waiters flag",
  1090. __func__, lk);
  1091. }
  1092. /*
  1093. * As far as we have been unable to acquire the
  1094. * exclusive lock and the exclusive waiters flag
  1095. * is set, we will sleep.
  1096. */
  1097. if (flags & LK_INTERLOCK) {
  1098. class->lc_unlock(ilk);
  1099. flags &= ~LK_INTERLOCK;
  1100. }
  1101. GIANT_SAVE();
  1102. sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
  1103. SQ_EXCLUSIVE_QUEUE);
  1104. sleepq_wait(&lk->lock_object, ipri & PRIMASK);
  1105. GIANT_RESTORE();
  1106. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  1107. __func__, lk);
  1108. }
  1109. if (error == 0) {
  1110. lock_profile_obtain_lock_success(&lk->lock_object,
  1111. contested, waittime, file, line);
  1112. LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
  1113. lk->lk_recurse, file, line);
  1114. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  1115. LK_TRYWIT(flags), file, line);
  1116. TD_LOCKS_INC(curthread);
  1117. STACK_SAVE(lk);
  1118. }
  1119. break;
  1120. default:
  1121. if (flags & LK_INTERLOCK)
  1122. class->lc_unlock(ilk);
  1123. panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
  1124. }
  1125. if (flags & LK_INTERLOCK)
  1126. class->lc_unlock(ilk);
  1127. if (wakeup_swapper)
  1128. kick_proc0();
  1129. return (error);
  1130. }
  1131. void
  1132. _lockmgr_disown(struct lock *lk, const char *file, int line)
  1133. {
  1134. uintptr_t tid, x;
  1135. if (SCHEDULER_STOPPED())
  1136. return;
  1137. tid = (uintptr_t)curthread;
  1138. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  1139. /*
  1140. * If the owner is already LK_KERNPROC just skip the whole operation.
  1141. */
  1142. if (LK_HOLDER(lk->lk_lock) != tid)
  1143. return;
  1144. lock_profile_release_lock(&lk->lock_object);
  1145. LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
  1146. WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
  1147. TD_LOCKS_DEC(curthread);
  1148. STACK_SAVE(lk);
  1149. /*
  1150. * In order to preserve waiters flags, just spin.
  1151. */
  1152. for (;;) {
  1153. x = lk->lk_lock;
  1154. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  1155. x &= LK_ALL_WAITERS;
  1156. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  1157. LK_KERNPROC | x))
  1158. return;
  1159. cpu_spinwait();
  1160. }
  1161. }
  1162. void
  1163. lockmgr_printinfo(const struct lock *lk)
  1164. {
  1165. struct thread *td;
  1166. uintptr_t x;
  1167. if (lk->lk_lock == LK_UNLOCKED)
  1168. printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
  1169. else if (lk->lk_lock & LK_SHARE)
  1170. printf("lock type %s: SHARED (count %ju)\n",
  1171. lk->lock_object.lo_name,
  1172. (uintmax_t)LK_SHARERS(lk->lk_lock));
  1173. else {
  1174. td = lockmgr_xholder(lk);
  1175. printf("lock type %s: EXCL by thread %p "
  1176. "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
  1177. td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
  1178. }
  1179. x = lk->lk_lock;
  1180. if (x & LK_EXCLUSIVE_WAITERS)
  1181. printf(" with exclusive waiters pending\n");
  1182. if (x & LK_SHARED_WAITERS)
  1183. printf(" with shared waiters pending\n");
  1184. if (x & LK_EXCLUSIVE_SPINNERS)
  1185. printf(" with exclusive spinners pending\n");
  1186. STACK_PRINT(lk);
  1187. }
  1188. int
  1189. lockstatus(const struct lock *lk)
  1190. {
  1191. uintptr_t v, x;
  1192. int ret;
  1193. ret = LK_SHARED;
  1194. x = lk->lk_lock;
  1195. v = LK_HOLDER(x);
  1196. if ((x & LK_SHARE) == 0) {
  1197. if (v == (uintptr_t)curthread || v == LK_KERNPROC)
  1198. ret = LK_EXCLUSIVE;
  1199. else
  1200. ret = LK_EXCLOTHER;
  1201. } else if (x == LK_UNLOCKED)
  1202. ret = 0;
  1203. return (ret);
  1204. }
  1205. #ifdef INVARIANT_SUPPORT
  1206. FEATURE(invariant_support,
  1207. "Support for modules compiled with INVARIANTS option");
  1208. #ifndef INVARIANTS
  1209. #undef _lockmgr_assert
  1210. #endif
  1211. void
  1212. _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
  1213. {
  1214. int slocked = 0;
  1215. if (panicstr != NULL)
  1216. return;
  1217. switch (what) {
  1218. case KA_SLOCKED:
  1219. case KA_SLOCKED | KA_NOTRECURSED:
  1220. case KA_SLOCKED | KA_RECURSED:
  1221. slocked = 1;
  1222. case KA_LOCKED:
  1223. case KA_LOCKED | KA_NOTRECURSED:
  1224. case KA_LOCKED | KA_RECURSED:
  1225. #ifdef WITNESS
  1226. /*
  1227. * We cannot trust WITNESS if the lock is held in exclusive
  1228. * mode and a call to lockmgr_disown() happened.
  1229. * Workaround this skipping the check if the lock is held in
  1230. * exclusive mode even for the KA_LOCKED case.
  1231. */
  1232. if (slocked || (lk->lk_lock & LK_SHARE)) {
  1233. witness_assert(&lk->lock_object, what, file, line);
  1234. break;
  1235. }
  1236. #endif
  1237. if (lk->lk_lock == LK_UNLOCKED ||
  1238. ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
  1239. (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
  1240. panic("Lock %s not %slocked @ %s:%d\n",
  1241. lk->lock_object.lo_name, slocked ? "share" : "",
  1242. file, line);
  1243. if ((lk->lk_lock & LK_SHARE) == 0) {
  1244. if (lockmgr_recursed(lk)) {
  1245. if (what & KA_NOTRECURSED)
  1246. panic("Lock %s recursed @ %s:%d\n",
  1247. lk->lock_object.lo_name, file,
  1248. line);
  1249. } else if (what & KA_RECURSED)
  1250. panic("Lock %s not recursed @ %s:%d\n",
  1251. lk->lock_object.lo_name, file, line);
  1252. }
  1253. break;
  1254. case KA_XLOCKED:
  1255. case KA_XLOCKED | KA_NOTRECURSED:
  1256. case KA_XLOCKED | KA_RECURSED:
  1257. if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
  1258. panic("Lock %s not exclusively locked @ %s:%d\n",
  1259. lk->lock_object.lo_name, file, line);
  1260. if (lockmgr_recursed(lk)) {
  1261. if (what & KA_NOTRECURSED)
  1262. panic("Lock %s recursed @ %s:%d\n",
  1263. lk->lock_object.lo_name, file, line);
  1264. } else if (what & KA_RECURSED)
  1265. panic("Lock %s not recursed @ %s:%d\n",
  1266. lk->lock_object.lo_name, file, line);
  1267. break;
  1268. case KA_UNLOCKED:
  1269. if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
  1270. panic("Lock %s exclusively locked @ %s:%d\n",
  1271. lk->lock_object.lo_name, file, line);
  1272. break;
  1273. default:
  1274. panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
  1275. line);
  1276. }
  1277. }
  1278. #endif
  1279. #ifdef DDB
  1280. int
  1281. lockmgr_chain(struct thread *td, struct thread **ownerp)
  1282. {
  1283. struct lock *lk;
  1284. lk = td->td_wchan;
  1285. if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
  1286. return (0);
  1287. db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
  1288. if (lk->lk_lock & LK_SHARE)
  1289. db_printf("SHARED (count %ju)\n",
  1290. (uintmax_t)LK_SHARERS(lk->lk_lock));
  1291. else
  1292. db_printf("EXCL\n");
  1293. *ownerp = lockmgr_xholder(lk);
  1294. return (1);
  1295. }
  1296. static void
  1297. db_show_lockmgr(const struct lock_object *lock)
  1298. {
  1299. struct thread *td;
  1300. const struct lock *lk;
  1301. lk = (const struct lock *)lock;
  1302. db_printf(" state: ");
  1303. if (lk->lk_lock == LK_UNLOCKED)
  1304. db_printf("UNLOCKED\n");
  1305. else if (lk->lk_lock & LK_SHARE)
  1306. db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
  1307. else {
  1308. td = lockmgr_xholder(lk);
  1309. if (td == (struct thread *)LK_KERNPROC)
  1310. db_printf("XLOCK: LK_KERNPROC\n");
  1311. else
  1312. db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
  1313. td->td_tid, td->td_proc->p_pid,
  1314. td->td_proc->p_comm);
  1315. if (lockmgr_recursed(lk))
  1316. db_printf(" recursed: %d\n", lk->lk_recurse);
  1317. }
  1318. db_printf(" waiters: ");
  1319. switch (lk->lk_lock & LK_ALL_WAITERS) {
  1320. case LK_SHARED_WAITERS:
  1321. db_printf("shared\n");
  1322. break;
  1323. case LK_EXCLUSIVE_WAITERS:
  1324. db_printf("exclusive\n");
  1325. break;
  1326. case LK_ALL_WAITERS:
  1327. db_printf("shared and exclusive\n");
  1328. break;
  1329. default:
  1330. db_printf("none\n");
  1331. }
  1332. db_printf(" spinners: ");
  1333. if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
  1334. db_printf("exclusive\n");
  1335. else
  1336. db_printf("none\n");
  1337. }
  1338. #endif