PageRenderTime 42ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/src/freebsd/sys/kern/kern_lock.c

https://bitbucket.org/killerpenguinassassins/open_distrib_devel
C | 1462 lines | 1037 code | 139 blank | 286 comment | 281 complexity | 1e6cefe4cdb53a94906ce02cb7469c73 MD5 | raw file
Possible License(s): CC0-1.0, MIT, LGPL-2.0, LGPL-3.0, WTFPL, GPL-2.0, BSD-2-Clause, AGPL-3.0, CC-BY-SA-3.0, MPL-2.0, JSON, BSD-3-Clause-No-Nuclear-License-2014, LGPL-2.1, CPL-1.0, AGPL-1.0, 0BSD, ISC, Apache-2.0, GPL-3.0, IPL-1.0, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /*-
  2. * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice(s), this list of conditions and the following disclaimer as
  10. * the first lines of this file unmodified other than the possible
  11. * addition of one or more copyright notices.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice(s), this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
  20. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  24. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  25. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  26. * DAMAGE.
  27. */
  28. #include "opt_adaptive_lockmgrs.h"
  29. #include "opt_ddb.h"
  30. #include "opt_kdtrace.h"
  31. #include <sys/cdefs.h>
  32. __FBSDID("$FreeBSD$");
  33. #include <sys/param.h>
  34. #include <sys/ktr.h>
  35. #include <sys/lock.h>
  36. #include <sys/lock_profile.h>
  37. #include <sys/lockmgr.h>
  38. #include <sys/mutex.h>
  39. #include <sys/proc.h>
  40. #include <sys/sleepqueue.h>
  41. #ifdef DEBUG_LOCKS
  42. #include <sys/stack.h>
  43. #endif
  44. #include <sys/sysctl.h>
  45. #include <sys/systm.h>
  46. #include <machine/cpu.h>
  47. #ifdef DDB
  48. #include <ddb/ddb.h>
  49. #endif
  50. CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
  51. (LK_ADAPTIVE | LK_NOSHARE));
  52. CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
  53. ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
  54. #define SQ_EXCLUSIVE_QUEUE 0
  55. #define SQ_SHARED_QUEUE 1
  56. #ifndef INVARIANTS
  57. #define _lockmgr_assert(lk, what, file, line)
  58. #define TD_LOCKS_INC(td)
  59. #define TD_LOCKS_DEC(td)
  60. #else
  61. #define TD_LOCKS_INC(td) ((td)->td_locks++)
  62. #define TD_LOCKS_DEC(td) ((td)->td_locks--)
  63. #endif
  64. #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
  65. #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
  66. #ifndef DEBUG_LOCKS
  67. #define STACK_PRINT(lk)
  68. #define STACK_SAVE(lk)
  69. #define STACK_ZERO(lk)
  70. #else
  71. #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
  72. #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
  73. #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
  74. #endif
  75. #define LOCK_LOG2(lk, string, arg1, arg2) \
  76. if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
  77. CTR2(KTR_LOCK, (string), (arg1), (arg2))
  78. #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
  79. if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
  80. CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
  81. #define GIANT_DECLARE \
  82. int _i = 0; \
  83. WITNESS_SAVE_DECL(Giant)
  84. #define GIANT_RESTORE() do { \
  85. if (_i > 0) { \
  86. while (_i--) \
  87. mtx_lock(&Giant); \
  88. WITNESS_RESTORE(&Giant.lock_object, Giant); \
  89. } \
  90. } while (0)
  91. #define GIANT_SAVE() do { \
  92. if (mtx_owned(&Giant)) { \
  93. WITNESS_SAVE(&Giant.lock_object, Giant); \
  94. while (mtx_owned(&Giant)) { \
  95. _i++; \
  96. mtx_unlock(&Giant); \
  97. } \
  98. } \
  99. } while (0)
  100. #define LK_CAN_SHARE(x) \
  101. (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
  102. ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
  103. curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  104. #define LK_TRYOP(x) \
  105. ((x) & LK_NOWAIT)
  106. #define LK_CAN_WITNESS(x) \
  107. (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  108. #define LK_TRYWIT(x) \
  109. (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  110. #define LK_CAN_ADAPT(lk, f) \
  111. (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
  112. ((f) & LK_SLEEPFAIL) == 0)
  113. #define lockmgr_disowned(lk) \
  114. (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  115. #define lockmgr_xlocked(lk) \
  116. (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  117. static void assert_lockmgr(const struct lock_object *lock, int how);
  118. #ifdef DDB
  119. static void db_show_lockmgr(const struct lock_object *lock);
  120. #endif
  121. static void lock_lockmgr(struct lock_object *lock, int how);
  122. #ifdef KDTRACE_HOOKS
  123. static int owner_lockmgr(const struct lock_object *lock,
  124. struct thread **owner);
  125. #endif
  126. static int unlock_lockmgr(struct lock_object *lock);
  127. struct lock_class lock_class_lockmgr = {
  128. .lc_name = "lockmgr",
  129. .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  130. .lc_assert = assert_lockmgr,
  131. #ifdef DDB
  132. .lc_ddb_show = db_show_lockmgr,
  133. #endif
  134. .lc_lock = lock_lockmgr,
  135. .lc_unlock = unlock_lockmgr,
  136. #ifdef KDTRACE_HOOKS
  137. .lc_owner = owner_lockmgr,
  138. #endif
  139. };
  140. #ifdef ADAPTIVE_LOCKMGRS
  141. static u_int alk_retries = 10;
  142. static u_int alk_loops = 10000;
  143. static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
  144. "lockmgr debugging");
  145. SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
  146. SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
  147. #endif
  148. static __inline struct thread *
  149. lockmgr_xholder(const struct lock *lk)
  150. {
  151. uintptr_t x;
  152. x = lk->lk_lock;
  153. return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  154. }
  155. /*
  156. * It assumes sleepq_lock held and returns with this one unheld.
  157. * It also assumes the generic interlock is sane and previously checked.
  158. * If LK_INTERLOCK is specified the interlock is not reacquired after the
  159. * sleep.
  160. */
  161. static __inline int
  162. sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  163. const char *wmesg, int pri, int timo, int queue)
  164. {
  165. GIANT_DECLARE;
  166. struct lock_class *class;
  167. int catch, error;
  168. class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  169. catch = pri & PCATCH;
  170. pri &= PRIMASK;
  171. error = 0;
  172. LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  173. (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  174. if (flags & LK_INTERLOCK)
  175. class->lc_unlock(ilk);
  176. if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  177. lk->lk_exslpfail++;
  178. GIANT_SAVE();
  179. sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  180. SLEEPQ_INTERRUPTIBLE : 0), queue);
  181. if ((flags & LK_TIMELOCK) && timo)
  182. sleepq_set_timeout(&lk->lock_object, timo);
  183. /*
  184. * Decisional switch for real sleeping.
  185. */
  186. if ((flags & LK_TIMELOCK) && timo && catch)
  187. error = sleepq_timedwait_sig(&lk->lock_object, pri);
  188. else if ((flags & LK_TIMELOCK) && timo)
  189. error = sleepq_timedwait(&lk->lock_object, pri);
  190. else if (catch)
  191. error = sleepq_wait_sig(&lk->lock_object, pri);
  192. else
  193. sleepq_wait(&lk->lock_object, pri);
  194. GIANT_RESTORE();
  195. if ((flags & LK_SLEEPFAIL) && error == 0)
  196. error = ENOLCK;
  197. return (error);
  198. }
  199. static __inline int
  200. wakeupshlk(struct lock *lk, const char *file, int line)
  201. {
  202. uintptr_t v, x;
  203. u_int realexslp;
  204. int queue, wakeup_swapper;
  205. TD_LOCKS_DEC(curthread);
  206. TD_SLOCKS_DEC(curthread);
  207. WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  208. LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  209. wakeup_swapper = 0;
  210. for (;;) {
  211. x = lk->lk_lock;
  212. /*
  213. * If there is more than one shared lock held, just drop one
  214. * and return.
  215. */
  216. if (LK_SHARERS(x) > 1) {
  217. if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  218. x - LK_ONE_SHARER))
  219. break;
  220. continue;
  221. }
  222. /*
  223. * If there are not waiters on the exclusive queue, drop the
  224. * lock quickly.
  225. */
  226. if ((x & LK_ALL_WAITERS) == 0) {
  227. MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  228. LK_SHARERS_LOCK(1));
  229. if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  230. break;
  231. continue;
  232. }
  233. /*
  234. * We should have a sharer with waiters, so enter the hard
  235. * path in order to handle wakeups correctly.
  236. */
  237. sleepq_lock(&lk->lock_object);
  238. x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  239. v = LK_UNLOCKED;
  240. /*
  241. * If the lock has exclusive waiters, give them preference in
  242. * order to avoid deadlock with shared runners up.
  243. * If interruptible sleeps left the exclusive queue empty
  244. * avoid a starvation for the threads sleeping on the shared
  245. * queue by giving them precedence and cleaning up the
  246. * exclusive waiters bit anyway.
  247. * Please note that lk_exslpfail count may be lying about
  248. * the real number of waiters with the LK_SLEEPFAIL flag on
  249. * because they may be used in conjuction with interruptible
  250. * sleeps so lk_exslpfail might be considered an 'upper limit'
  251. * bound, including the edge cases.
  252. */
  253. realexslp = sleepq_sleepcnt(&lk->lock_object,
  254. SQ_EXCLUSIVE_QUEUE);
  255. if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  256. if (lk->lk_exslpfail < realexslp) {
  257. lk->lk_exslpfail = 0;
  258. queue = SQ_EXCLUSIVE_QUEUE;
  259. v |= (x & LK_SHARED_WAITERS);
  260. } else {
  261. lk->lk_exslpfail = 0;
  262. LOCK_LOG2(lk,
  263. "%s: %p has only LK_SLEEPFAIL sleepers",
  264. __func__, lk);
  265. LOCK_LOG2(lk,
  266. "%s: %p waking up threads on the exclusive queue",
  267. __func__, lk);
  268. wakeup_swapper =
  269. sleepq_broadcast(&lk->lock_object,
  270. SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  271. queue = SQ_SHARED_QUEUE;
  272. }
  273. } else {
  274. /*
  275. * Exclusive waiters sleeping with LK_SLEEPFAIL on
  276. * and using interruptible sleeps/timeout may have
  277. * left spourious lk_exslpfail counts on, so clean
  278. * it up anyway.
  279. */
  280. lk->lk_exslpfail = 0;
  281. queue = SQ_SHARED_QUEUE;
  282. }
  283. if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  284. v)) {
  285. sleepq_release(&lk->lock_object);
  286. continue;
  287. }
  288. LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  289. __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  290. "exclusive");
  291. wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  292. 0, queue);
  293. sleepq_release(&lk->lock_object);
  294. break;
  295. }
  296. lock_profile_release_lock(&lk->lock_object);
  297. return (wakeup_swapper);
  298. }
  299. static void
  300. assert_lockmgr(const struct lock_object *lock, int what)
  301. {
  302. panic("lockmgr locks do not support assertions");
  303. }
  304. static void
  305. lock_lockmgr(struct lock_object *lock, int how)
  306. {
  307. panic("lockmgr locks do not support sleep interlocking");
  308. }
  309. static int
  310. unlock_lockmgr(struct lock_object *lock)
  311. {
  312. panic("lockmgr locks do not support sleep interlocking");
  313. }
  314. #ifdef KDTRACE_HOOKS
  315. static int
  316. owner_lockmgr(const struct lock_object *lock, struct thread **owner)
  317. {
  318. panic("lockmgr locks do not support owner inquiring");
  319. }
  320. #endif
  321. void
  322. lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  323. {
  324. int iflags;
  325. MPASS((flags & ~LK_INIT_MASK) == 0);
  326. ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  327. ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  328. &lk->lk_lock));
  329. iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  330. if (flags & LK_CANRECURSE)
  331. iflags |= LO_RECURSABLE;
  332. if ((flags & LK_NODUP) == 0)
  333. iflags |= LO_DUPOK;
  334. if (flags & LK_NOPROFILE)
  335. iflags |= LO_NOPROFILE;
  336. if ((flags & LK_NOWITNESS) == 0)
  337. iflags |= LO_WITNESS;
  338. if (flags & LK_QUIET)
  339. iflags |= LO_QUIET;
  340. iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  341. lk->lk_lock = LK_UNLOCKED;
  342. lk->lk_recurse = 0;
  343. lk->lk_exslpfail = 0;
  344. lk->lk_timo = timo;
  345. lk->lk_pri = pri;
  346. lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  347. STACK_ZERO(lk);
  348. }
  349. /*
  350. * XXX: Gross hacks to manipulate external lock flags after
  351. * initialization. Used for certain vnode and buf locks.
  352. */
  353. void
  354. lockallowshare(struct lock *lk)
  355. {
  356. lockmgr_assert(lk, KA_XLOCKED);
  357. lk->lock_object.lo_flags &= ~LK_NOSHARE;
  358. }
  359. void
  360. lockallowrecurse(struct lock *lk)
  361. {
  362. lockmgr_assert(lk, KA_XLOCKED);
  363. lk->lock_object.lo_flags |= LO_RECURSABLE;
  364. }
  365. void
  366. lockdisablerecurse(struct lock *lk)
  367. {
  368. lockmgr_assert(lk, KA_XLOCKED);
  369. lk->lock_object.lo_flags &= ~LO_RECURSABLE;
  370. }
  371. void
  372. lockdestroy(struct lock *lk)
  373. {
  374. KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  375. KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  376. KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
  377. lock_destroy(&lk->lock_object);
  378. }
  379. int
  380. __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  381. const char *wmesg, int pri, int timo, const char *file, int line)
  382. {
  383. GIANT_DECLARE;
  384. struct lock_class *class;
  385. const char *iwmesg;
  386. uintptr_t tid, v, x;
  387. u_int op, realexslp;
  388. int error, ipri, itimo, queue, wakeup_swapper;
  389. #ifdef LOCK_PROFILING
  390. uint64_t waittime = 0;
  391. int contested = 0;
  392. #endif
  393. #ifdef ADAPTIVE_LOCKMGRS
  394. volatile struct thread *owner;
  395. u_int i, spintries = 0;
  396. #endif
  397. error = 0;
  398. tid = (uintptr_t)curthread;
  399. op = (flags & LK_TYPE_MASK);
  400. iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  401. ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  402. itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  403. MPASS((flags & ~LK_TOTAL_MASK) == 0);
  404. KASSERT((op & (op - 1)) == 0,
  405. ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  406. KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  407. (op != LK_DOWNGRADE && op != LK_RELEASE),
  408. ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  409. __func__, file, line));
  410. KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  411. ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  412. __func__, file, line));
  413. class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  414. if (panicstr != NULL) {
  415. if (flags & LK_INTERLOCK)
  416. class->lc_unlock(ilk);
  417. return (0);
  418. }
  419. if (lk->lock_object.lo_flags & LK_NOSHARE) {
  420. switch (op) {
  421. case LK_SHARED:
  422. op = LK_EXCLUSIVE;
  423. break;
  424. case LK_UPGRADE:
  425. case LK_DOWNGRADE:
  426. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  427. file, line);
  428. return (0);
  429. }
  430. }
  431. wakeup_swapper = 0;
  432. switch (op) {
  433. case LK_SHARED:
  434. if (LK_CAN_WITNESS(flags))
  435. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  436. file, line, ilk);
  437. for (;;) {
  438. x = lk->lk_lock;
  439. /*
  440. * If no other thread has an exclusive lock, or
  441. * no exclusive waiter is present, bump the count of
  442. * sharers. Since we have to preserve the state of
  443. * waiters, if we fail to acquire the shared lock
  444. * loop back and retry.
  445. */
  446. if (LK_CAN_SHARE(x)) {
  447. if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  448. x + LK_ONE_SHARER))
  449. break;
  450. continue;
  451. }
  452. lock_profile_obtain_lock_failed(&lk->lock_object,
  453. &contested, &waittime);
  454. /*
  455. * If the lock is already held by curthread in
  456. * exclusive way avoid a deadlock.
  457. */
  458. if (LK_HOLDER(x) == tid) {
  459. LOCK_LOG2(lk,
  460. "%s: %p already held in exclusive mode",
  461. __func__, lk);
  462. error = EDEADLK;
  463. break;
  464. }
  465. /*
  466. * If the lock is expected to not sleep just give up
  467. * and return.
  468. */
  469. if (LK_TRYOP(flags)) {
  470. LOCK_LOG2(lk, "%s: %p fails the try operation",
  471. __func__, lk);
  472. error = EBUSY;
  473. break;
  474. }
  475. #ifdef ADAPTIVE_LOCKMGRS
  476. /*
  477. * If the owner is running on another CPU, spin until
  478. * the owner stops running or the state of the lock
  479. * changes. We need a double-state handle here
  480. * because for a failed acquisition the lock can be
  481. * either held in exclusive mode or shared mode
  482. * (for the writer starvation avoidance technique).
  483. */
  484. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  485. LK_HOLDER(x) != LK_KERNPROC) {
  486. owner = (struct thread *)LK_HOLDER(x);
  487. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  488. CTR3(KTR_LOCK,
  489. "%s: spinning on %p held by %p",
  490. __func__, lk, owner);
  491. /*
  492. * If we are holding also an interlock drop it
  493. * in order to avoid a deadlock if the lockmgr
  494. * owner is adaptively spinning on the
  495. * interlock itself.
  496. */
  497. if (flags & LK_INTERLOCK) {
  498. class->lc_unlock(ilk);
  499. flags &= ~LK_INTERLOCK;
  500. }
  501. GIANT_SAVE();
  502. while (LK_HOLDER(lk->lk_lock) ==
  503. (uintptr_t)owner && TD_IS_RUNNING(owner))
  504. cpu_spinwait();
  505. GIANT_RESTORE();
  506. continue;
  507. } else if (LK_CAN_ADAPT(lk, flags) &&
  508. (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  509. spintries < alk_retries) {
  510. if (flags & LK_INTERLOCK) {
  511. class->lc_unlock(ilk);
  512. flags &= ~LK_INTERLOCK;
  513. }
  514. GIANT_SAVE();
  515. spintries++;
  516. for (i = 0; i < alk_loops; i++) {
  517. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  518. CTR4(KTR_LOCK,
  519. "%s: shared spinning on %p with %u and %u",
  520. __func__, lk, spintries, i);
  521. x = lk->lk_lock;
  522. if ((x & LK_SHARE) == 0 ||
  523. LK_CAN_SHARE(x) != 0)
  524. break;
  525. cpu_spinwait();
  526. }
  527. GIANT_RESTORE();
  528. if (i != alk_loops)
  529. continue;
  530. }
  531. #endif
  532. /*
  533. * Acquire the sleepqueue chain lock because we
  534. * probabilly will need to manipulate waiters flags.
  535. */
  536. sleepq_lock(&lk->lock_object);
  537. x = lk->lk_lock;
  538. /*
  539. * if the lock can be acquired in shared mode, try
  540. * again.
  541. */
  542. if (LK_CAN_SHARE(x)) {
  543. sleepq_release(&lk->lock_object);
  544. continue;
  545. }
  546. #ifdef ADAPTIVE_LOCKMGRS
  547. /*
  548. * The current lock owner might have started executing
  549. * on another CPU (or the lock could have changed
  550. * owner) while we were waiting on the turnstile
  551. * chain lock. If so, drop the turnstile lock and try
  552. * again.
  553. */
  554. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  555. LK_HOLDER(x) != LK_KERNPROC) {
  556. owner = (struct thread *)LK_HOLDER(x);
  557. if (TD_IS_RUNNING(owner)) {
  558. sleepq_release(&lk->lock_object);
  559. continue;
  560. }
  561. }
  562. #endif
  563. /*
  564. * Try to set the LK_SHARED_WAITERS flag. If we fail,
  565. * loop back and retry.
  566. */
  567. if ((x & LK_SHARED_WAITERS) == 0) {
  568. if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  569. x | LK_SHARED_WAITERS)) {
  570. sleepq_release(&lk->lock_object);
  571. continue;
  572. }
  573. LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  574. __func__, lk);
  575. }
  576. /*
  577. * As far as we have been unable to acquire the
  578. * shared lock and the shared waiters flag is set,
  579. * we will sleep.
  580. */
  581. error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  582. SQ_SHARED_QUEUE);
  583. flags &= ~LK_INTERLOCK;
  584. if (error) {
  585. LOCK_LOG3(lk,
  586. "%s: interrupted sleep for %p with %d",
  587. __func__, lk, error);
  588. break;
  589. }
  590. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  591. __func__, lk);
  592. }
  593. if (error == 0) {
  594. lock_profile_obtain_lock_success(&lk->lock_object,
  595. contested, waittime, file, line);
  596. LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  597. line);
  598. WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  599. line);
  600. TD_LOCKS_INC(curthread);
  601. TD_SLOCKS_INC(curthread);
  602. STACK_SAVE(lk);
  603. }
  604. break;
  605. case LK_UPGRADE:
  606. _lockmgr_assert(lk, KA_SLOCKED, file, line);
  607. v = lk->lk_lock;
  608. x = v & LK_ALL_WAITERS;
  609. v &= LK_EXCLUSIVE_SPINNERS;
  610. /*
  611. * Try to switch from one shared lock to an exclusive one.
  612. * We need to preserve waiters flags during the operation.
  613. */
  614. if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  615. tid | x)) {
  616. LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  617. line);
  618. WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  619. LK_TRYWIT(flags), file, line);
  620. TD_SLOCKS_DEC(curthread);
  621. break;
  622. }
  623. /*
  624. * We have been unable to succeed in upgrading, so just
  625. * give up the shared lock.
  626. */
  627. wakeup_swapper |= wakeupshlk(lk, file, line);
  628. /* FALLTHROUGH */
  629. case LK_EXCLUSIVE:
  630. if (LK_CAN_WITNESS(flags))
  631. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  632. LOP_EXCLUSIVE, file, line, ilk);
  633. /*
  634. * If curthread already holds the lock and this one is
  635. * allowed to recurse, simply recurse on it.
  636. */
  637. if (lockmgr_xlocked(lk)) {
  638. if ((flags & LK_CANRECURSE) == 0 &&
  639. (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  640. /*
  641. * If the lock is expected to not panic just
  642. * give up and return.
  643. */
  644. if (LK_TRYOP(flags)) {
  645. LOCK_LOG2(lk,
  646. "%s: %p fails the try operation",
  647. __func__, lk);
  648. error = EBUSY;
  649. break;
  650. }
  651. if (flags & LK_INTERLOCK)
  652. class->lc_unlock(ilk);
  653. panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  654. __func__, iwmesg, file, line);
  655. }
  656. lk->lk_recurse++;
  657. LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  658. LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  659. lk->lk_recurse, file, line);
  660. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  661. LK_TRYWIT(flags), file, line);
  662. TD_LOCKS_INC(curthread);
  663. break;
  664. }
  665. while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  666. tid)) {
  667. lock_profile_obtain_lock_failed(&lk->lock_object,
  668. &contested, &waittime);
  669. /*
  670. * If the lock is expected to not sleep just give up
  671. * and return.
  672. */
  673. if (LK_TRYOP(flags)) {
  674. LOCK_LOG2(lk, "%s: %p fails the try operation",
  675. __func__, lk);
  676. error = EBUSY;
  677. break;
  678. }
  679. #ifdef ADAPTIVE_LOCKMGRS
  680. /*
  681. * If the owner is running on another CPU, spin until
  682. * the owner stops running or the state of the lock
  683. * changes.
  684. */
  685. x = lk->lk_lock;
  686. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  687. LK_HOLDER(x) != LK_KERNPROC) {
  688. owner = (struct thread *)LK_HOLDER(x);
  689. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  690. CTR3(KTR_LOCK,
  691. "%s: spinning on %p held by %p",
  692. __func__, lk, owner);
  693. /*
  694. * If we are holding also an interlock drop it
  695. * in order to avoid a deadlock if the lockmgr
  696. * owner is adaptively spinning on the
  697. * interlock itself.
  698. */
  699. if (flags & LK_INTERLOCK) {
  700. class->lc_unlock(ilk);
  701. flags &= ~LK_INTERLOCK;
  702. }
  703. GIANT_SAVE();
  704. while (LK_HOLDER(lk->lk_lock) ==
  705. (uintptr_t)owner && TD_IS_RUNNING(owner))
  706. cpu_spinwait();
  707. GIANT_RESTORE();
  708. continue;
  709. } else if (LK_CAN_ADAPT(lk, flags) &&
  710. (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  711. spintries < alk_retries) {
  712. if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  713. !atomic_cmpset_ptr(&lk->lk_lock, x,
  714. x | LK_EXCLUSIVE_SPINNERS))
  715. continue;
  716. if (flags & LK_INTERLOCK) {
  717. class->lc_unlock(ilk);
  718. flags &= ~LK_INTERLOCK;
  719. }
  720. GIANT_SAVE();
  721. spintries++;
  722. for (i = 0; i < alk_loops; i++) {
  723. if (LOCK_LOG_TEST(&lk->lock_object, 0))
  724. CTR4(KTR_LOCK,
  725. "%s: shared spinning on %p with %u and %u",
  726. __func__, lk, spintries, i);
  727. if ((lk->lk_lock &
  728. LK_EXCLUSIVE_SPINNERS) == 0)
  729. break;
  730. cpu_spinwait();
  731. }
  732. GIANT_RESTORE();
  733. if (i != alk_loops)
  734. continue;
  735. }
  736. #endif
  737. /*
  738. * Acquire the sleepqueue chain lock because we
  739. * probabilly will need to manipulate waiters flags.
  740. */
  741. sleepq_lock(&lk->lock_object);
  742. x = lk->lk_lock;
  743. /*
  744. * if the lock has been released while we spun on
  745. * the sleepqueue chain lock just try again.
  746. */
  747. if (x == LK_UNLOCKED) {
  748. sleepq_release(&lk->lock_object);
  749. continue;
  750. }
  751. #ifdef ADAPTIVE_LOCKMGRS
  752. /*
  753. * The current lock owner might have started executing
  754. * on another CPU (or the lock could have changed
  755. * owner) while we were waiting on the turnstile
  756. * chain lock. If so, drop the turnstile lock and try
  757. * again.
  758. */
  759. if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  760. LK_HOLDER(x) != LK_KERNPROC) {
  761. owner = (struct thread *)LK_HOLDER(x);
  762. if (TD_IS_RUNNING(owner)) {
  763. sleepq_release(&lk->lock_object);
  764. continue;
  765. }
  766. }
  767. #endif
  768. /*
  769. * The lock can be in the state where there is a
  770. * pending queue of waiters, but still no owner.
  771. * This happens when the lock is contested and an
  772. * owner is going to claim the lock.
  773. * If curthread is the one successfully acquiring it
  774. * claim lock ownership and return, preserving waiters
  775. * flags.
  776. */
  777. v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  778. if ((x & ~v) == LK_UNLOCKED) {
  779. v &= ~LK_EXCLUSIVE_SPINNERS;
  780. if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  781. tid | v)) {
  782. sleepq_release(&lk->lock_object);
  783. LOCK_LOG2(lk,
  784. "%s: %p claimed by a new writer",
  785. __func__, lk);
  786. break;
  787. }
  788. sleepq_release(&lk->lock_object);
  789. continue;
  790. }
  791. /*
  792. * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
  793. * fail, loop back and retry.
  794. */
  795. if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  796. if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  797. x | LK_EXCLUSIVE_WAITERS)) {
  798. sleepq_release(&lk->lock_object);
  799. continue;
  800. }
  801. LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  802. __func__, lk);
  803. }
  804. /*
  805. * As far as we have been unable to acquire the
  806. * exclusive lock and the exclusive waiters flag
  807. * is set, we will sleep.
  808. */
  809. error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  810. SQ_EXCLUSIVE_QUEUE);
  811. flags &= ~LK_INTERLOCK;
  812. if (error) {
  813. LOCK_LOG3(lk,
  814. "%s: interrupted sleep for %p with %d",
  815. __func__, lk, error);
  816. break;
  817. }
  818. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  819. __func__, lk);
  820. }
  821. if (error == 0) {
  822. lock_profile_obtain_lock_success(&lk->lock_object,
  823. contested, waittime, file, line);
  824. LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  825. lk->lk_recurse, file, line);
  826. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  827. LK_TRYWIT(flags), file, line);
  828. TD_LOCKS_INC(curthread);
  829. STACK_SAVE(lk);
  830. }
  831. break;
  832. case LK_DOWNGRADE:
  833. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  834. LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  835. WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  836. TD_SLOCKS_INC(curthread);
  837. /*
  838. * In order to preserve waiters flags, just spin.
  839. */
  840. for (;;) {
  841. x = lk->lk_lock;
  842. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  843. x &= LK_ALL_WAITERS;
  844. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  845. LK_SHARERS_LOCK(1) | x))
  846. break;
  847. cpu_spinwait();
  848. }
  849. break;
  850. case LK_RELEASE:
  851. _lockmgr_assert(lk, KA_LOCKED, file, line);
  852. x = lk->lk_lock;
  853. if ((x & LK_SHARE) == 0) {
  854. /*
  855. * As first option, treact the lock as if it has not
  856. * any waiter.
  857. * Fix-up the tid var if the lock has been disowned.
  858. */
  859. if (LK_HOLDER(x) == LK_KERNPROC)
  860. tid = LK_KERNPROC;
  861. else {
  862. WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  863. file, line);
  864. TD_LOCKS_DEC(curthread);
  865. }
  866. LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  867. lk->lk_recurse, file, line);
  868. /*
  869. * The lock is held in exclusive mode.
  870. * If the lock is recursed also, then unrecurse it.
  871. */
  872. if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  873. LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  874. lk);
  875. lk->lk_recurse--;
  876. break;
  877. }
  878. if (tid != LK_KERNPROC)
  879. lock_profile_release_lock(&lk->lock_object);
  880. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  881. LK_UNLOCKED))
  882. break;
  883. sleepq_lock(&lk->lock_object);
  884. x = lk->lk_lock;
  885. v = LK_UNLOCKED;
  886. /*
  887. * If the lock has exclusive waiters, give them
  888. * preference in order to avoid deadlock with
  889. * shared runners up.
  890. * If interruptible sleeps left the exclusive queue
  891. * empty avoid a starvation for the threads sleeping
  892. * on the shared queue by giving them precedence
  893. * and cleaning up the exclusive waiters bit anyway.
  894. * Please note that lk_exslpfail count may be lying
  895. * about the real number of waiters with the
  896. * LK_SLEEPFAIL flag on because they may be used in
  897. * conjuction with interruptible sleeps so
  898. * lk_exslpfail might be considered an 'upper limit'
  899. * bound, including the edge cases.
  900. */
  901. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  902. realexslp = sleepq_sleepcnt(&lk->lock_object,
  903. SQ_EXCLUSIVE_QUEUE);
  904. if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  905. if (lk->lk_exslpfail < realexslp) {
  906. lk->lk_exslpfail = 0;
  907. queue = SQ_EXCLUSIVE_QUEUE;
  908. v |= (x & LK_SHARED_WAITERS);
  909. } else {
  910. lk->lk_exslpfail = 0;
  911. LOCK_LOG2(lk,
  912. "%s: %p has only LK_SLEEPFAIL sleepers",
  913. __func__, lk);
  914. LOCK_LOG2(lk,
  915. "%s: %p waking up threads on the exclusive queue",
  916. __func__, lk);
  917. wakeup_swapper =
  918. sleepq_broadcast(&lk->lock_object,
  919. SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  920. queue = SQ_SHARED_QUEUE;
  921. }
  922. } else {
  923. /*
  924. * Exclusive waiters sleeping with LK_SLEEPFAIL
  925. * on and using interruptible sleeps/timeout
  926. * may have left spourious lk_exslpfail counts
  927. * on, so clean it up anyway.
  928. */
  929. lk->lk_exslpfail = 0;
  930. queue = SQ_SHARED_QUEUE;
  931. }
  932. LOCK_LOG3(lk,
  933. "%s: %p waking up threads on the %s queue",
  934. __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  935. "exclusive");
  936. atomic_store_rel_ptr(&lk->lk_lock, v);
  937. wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
  938. SLEEPQ_LK, 0, queue);
  939. sleepq_release(&lk->lock_object);
  940. break;
  941. } else
  942. wakeup_swapper = wakeupshlk(lk, file, line);
  943. break;
  944. case LK_DRAIN:
  945. if (LK_CAN_WITNESS(flags))
  946. WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  947. LOP_EXCLUSIVE, file, line, ilk);
  948. /*
  949. * Trying to drain a lock we already own will result in a
  950. * deadlock.
  951. */
  952. if (lockmgr_xlocked(lk)) {
  953. if (flags & LK_INTERLOCK)
  954. class->lc_unlock(ilk);
  955. panic("%s: draining %s with the lock held @ %s:%d\n",
  956. __func__, iwmesg, file, line);
  957. }
  958. while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
  959. lock_profile_obtain_lock_failed(&lk->lock_object,
  960. &contested, &waittime);
  961. /*
  962. * If the lock is expected to not sleep just give up
  963. * and return.
  964. */
  965. if (LK_TRYOP(flags)) {
  966. LOCK_LOG2(lk, "%s: %p fails the try operation",
  967. __func__, lk);
  968. error = EBUSY;
  969. break;
  970. }
  971. /*
  972. * Acquire the sleepqueue chain lock because we
  973. * probabilly will need to manipulate waiters flags.
  974. */
  975. sleepq_lock(&lk->lock_object);
  976. x = lk->lk_lock;
  977. /*
  978. * if the lock has been released while we spun on
  979. * the sleepqueue chain lock just try again.
  980. */
  981. if (x == LK_UNLOCKED) {
  982. sleepq_release(&lk->lock_object);
  983. continue;
  984. }
  985. v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  986. if ((x & ~v) == LK_UNLOCKED) {
  987. v = (x & ~LK_EXCLUSIVE_SPINNERS);
  988. /*
  989. * If interruptible sleeps left the exclusive
  990. * queue empty avoid a starvation for the
  991. * threads sleeping on the shared queue by
  992. * giving them precedence and cleaning up the
  993. * exclusive waiters bit anyway.
  994. * Please note that lk_exslpfail count may be
  995. * lying about the real number of waiters with
  996. * the LK_SLEEPFAIL flag on because they may
  997. * be used in conjuction with interruptible
  998. * sleeps so lk_exslpfail might be considered
  999. * an 'upper limit' bound, including the edge
  1000. * cases.
  1001. */
  1002. if (v & LK_EXCLUSIVE_WAITERS) {
  1003. queue = SQ_EXCLUSIVE_QUEUE;
  1004. v &= ~LK_EXCLUSIVE_WAITERS;
  1005. } else {
  1006. /*
  1007. * Exclusive waiters sleeping with
  1008. * LK_SLEEPFAIL on and using
  1009. * interruptible sleeps/timeout may
  1010. * have left spourious lk_exslpfail
  1011. * counts on, so clean it up anyway.
  1012. */
  1013. MPASS(v & LK_SHARED_WAITERS);
  1014. lk->lk_exslpfail = 0;
  1015. queue = SQ_SHARED_QUEUE;
  1016. v &= ~LK_SHARED_WAITERS;
  1017. }
  1018. if (queue == SQ_EXCLUSIVE_QUEUE) {
  1019. realexslp =
  1020. sleepq_sleepcnt(&lk->lock_object,
  1021. SQ_EXCLUSIVE_QUEUE);
  1022. if (lk->lk_exslpfail >= realexslp) {
  1023. lk->lk_exslpfail = 0;
  1024. queue = SQ_SHARED_QUEUE;
  1025. v &= ~LK_SHARED_WAITERS;
  1026. if (realexslp != 0) {
  1027. LOCK_LOG2(lk,
  1028. "%s: %p has only LK_SLEEPFAIL sleepers",
  1029. __func__, lk);
  1030. LOCK_LOG2(lk,
  1031. "%s: %p waking up threads on the exclusive queue",
  1032. __func__, lk);
  1033. wakeup_swapper =
  1034. sleepq_broadcast(
  1035. &lk->lock_object,
  1036. SLEEPQ_LK, 0,
  1037. SQ_EXCLUSIVE_QUEUE);
  1038. }
  1039. } else
  1040. lk->lk_exslpfail = 0;
  1041. }
  1042. if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
  1043. sleepq_release(&lk->lock_object);
  1044. continue;
  1045. }
  1046. LOCK_LOG3(lk,
  1047. "%s: %p waking up all threads on the %s queue",
  1048. __func__, lk, queue == SQ_SHARED_QUEUE ?
  1049. "shared" : "exclusive");
  1050. wakeup_swapper |= sleepq_broadcast(
  1051. &lk->lock_object, SLEEPQ_LK, 0, queue);
  1052. /*
  1053. * If shared waiters have been woken up we need
  1054. * to wait for one of them to acquire the lock
  1055. * before to set the exclusive waiters in
  1056. * order to avoid a deadlock.
  1057. */
  1058. if (queue == SQ_SHARED_QUEUE) {
  1059. for (v = lk->lk_lock;
  1060. (v & LK_SHARE) && !LK_SHARERS(v);
  1061. v = lk->lk_lock)
  1062. cpu_spinwait();
  1063. }
  1064. }
  1065. /*
  1066. * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
  1067. * fail, loop back and retry.
  1068. */
  1069. if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  1070. if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  1071. x | LK_EXCLUSIVE_WAITERS)) {
  1072. sleepq_release(&lk->lock_object);
  1073. continue;
  1074. }
  1075. LOCK_LOG2(lk, "%s: %p set drain waiters flag",
  1076. __func__, lk);
  1077. }
  1078. /*
  1079. * As far as we have been unable to acquire the
  1080. * exclusive lock and the exclusive waiters flag
  1081. * is set, we will sleep.
  1082. */
  1083. if (flags & LK_INTERLOCK) {
  1084. class->lc_unlock(ilk);
  1085. flags &= ~LK_INTERLOCK;
  1086. }
  1087. GIANT_SAVE();
  1088. sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
  1089. SQ_EXCLUSIVE_QUEUE);
  1090. sleepq_wait(&lk->lock_object, ipri & PRIMASK);
  1091. GIANT_RESTORE();
  1092. LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  1093. __func__, lk);
  1094. }
  1095. if (error == 0) {
  1096. lock_profile_obtain_lock_success(&lk->lock_object,
  1097. contested, waittime, file, line);
  1098. LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
  1099. lk->lk_recurse, file, line);
  1100. WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  1101. LK_TRYWIT(flags), file, line);
  1102. TD_LOCKS_INC(curthread);
  1103. STACK_SAVE(lk);
  1104. }
  1105. break;
  1106. default:
  1107. if (flags & LK_INTERLOCK)
  1108. class->lc_unlock(ilk);
  1109. panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
  1110. }
  1111. if (flags & LK_INTERLOCK)
  1112. class->lc_unlock(ilk);
  1113. if (wakeup_swapper)
  1114. kick_proc0();
  1115. return (error);
  1116. }
  1117. void
  1118. _lockmgr_disown(struct lock *lk, const char *file, int line)
  1119. {
  1120. uintptr_t tid, x;
  1121. if (SCHEDULER_STOPPED())
  1122. return;
  1123. tid = (uintptr_t)curthread;
  1124. _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  1125. /*
  1126. * If the owner is already LK_KERNPROC just skip the whole operation.
  1127. */
  1128. if (LK_HOLDER(lk->lk_lock) != tid)
  1129. return;
  1130. lock_profile_release_lock(&lk->lock_object);
  1131. LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
  1132. WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
  1133. TD_LOCKS_DEC(curthread);
  1134. STACK_SAVE(lk);
  1135. /*
  1136. * In order to preserve waiters flags, just spin.
  1137. */
  1138. for (;;) {
  1139. x = lk->lk_lock;
  1140. MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  1141. x &= LK_ALL_WAITERS;
  1142. if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  1143. LK_KERNPROC | x))
  1144. return;
  1145. cpu_spinwait();
  1146. }
  1147. }
  1148. void
  1149. lockmgr_printinfo(const struct lock *lk)
  1150. {
  1151. struct thread *td;
  1152. uintptr_t x;
  1153. if (lk->lk_lock == LK_UNLOCKED)
  1154. printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
  1155. else if (lk->lk_lock & LK_SHARE)
  1156. printf("lock type %s: SHARED (count %ju)\n",
  1157. lk->lock_object.lo_name,
  1158. (uintmax_t)LK_SHARERS(lk->lk_lock));
  1159. else {
  1160. td = lockmgr_xholder(lk);
  1161. printf("lock type %s: EXCL by thread %p (pid %d)\n",
  1162. lk->lock_object.lo_name, td, td->td_proc->p_pid);
  1163. }
  1164. x = lk->lk_lock;
  1165. if (x & LK_EXCLUSIVE_WAITERS)
  1166. printf(" with exclusive waiters pending\n");
  1167. if (x & LK_SHARED_WAITERS)
  1168. printf(" with shared waiters pending\n");
  1169. if (x & LK_EXCLUSIVE_SPINNERS)
  1170. printf(" with exclusive spinners pending\n");
  1171. STACK_PRINT(lk);
  1172. }
  1173. int
  1174. lockstatus(const struct lock *lk)
  1175. {
  1176. uintptr_t v, x;
  1177. int ret;
  1178. ret = LK_SHARED;
  1179. x = lk->lk_lock;
  1180. v = LK_HOLDER(x);
  1181. if ((x & LK_SHARE) == 0) {
  1182. if (v == (uintptr_t)curthread || v == LK_KERNPROC)
  1183. ret = LK_EXCLUSIVE;
  1184. else
  1185. ret = LK_EXCLOTHER;
  1186. } else if (x == LK_UNLOCKED)
  1187. ret = 0;
  1188. return (ret);
  1189. }
  1190. #ifdef INVARIANT_SUPPORT
  1191. FEATURE(invariant_support,
  1192. "Support for modules compiled with INVARIANTS option");
  1193. #ifndef INVARIANTS
  1194. #undef _lockmgr_assert
  1195. #endif
  1196. void
  1197. _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
  1198. {
  1199. int slocked = 0;
  1200. if (panicstr != NULL)
  1201. return;
  1202. switch (what) {
  1203. case KA_SLOCKED:
  1204. case KA_SLOCKED | KA_NOTRECURSED:
  1205. case KA_SLOCKED | KA_RECURSED:
  1206. slocked = 1;
  1207. case KA_LOCKED:
  1208. case KA_LOCKED | KA_NOTRECURSED:
  1209. case KA_LOCKED | KA_RECURSED:
  1210. #ifdef WITNESS
  1211. /*
  1212. * We cannot trust WITNESS if the lock is held in exclusive
  1213. * mode and a call to lockmgr_disown() happened.
  1214. * Workaround this skipping the check if the lock is held in
  1215. * exclusive mode even for the KA_LOCKED case.
  1216. */
  1217. if (slocked || (lk->lk_lock & LK_SHARE)) {
  1218. witness_assert(&lk->lock_object, what, file, line);
  1219. break;
  1220. }
  1221. #endif
  1222. if (lk->lk_lock == LK_UNLOCKED ||
  1223. ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
  1224. (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
  1225. panic("Lock %s not %slocked @ %s:%d\n",
  1226. lk->lock_object.lo_name, slocked ? "share" : "",
  1227. file, line);
  1228. if ((lk->lk_lock & LK_SHARE) == 0) {
  1229. if (lockmgr_recursed(lk)) {
  1230. if (what & KA_NOTRECURSED)
  1231. panic("Lock %s recursed @ %s:%d\n",
  1232. lk->lock_object.lo_name, file,
  1233. line);
  1234. } else if (what & KA_RECURSED)
  1235. panic("Lock %s not recursed @ %s:%d\n",
  1236. lk->lock_object.lo_name, file, line);
  1237. }
  1238. break;
  1239. case KA_XLOCKED:
  1240. case KA_XLOCKED | KA_NOTRECURSED:
  1241. case KA_XLOCKED | KA_RECURSED:
  1242. if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
  1243. panic("Lock %s not exclusively locked @ %s:%d\n",
  1244. lk->lock_object.lo_name, file, line);
  1245. if (lockmgr_recursed(lk)) {
  1246. if (what & KA_NOTRECURSED)
  1247. panic("Lock %s recursed @ %s:%d\n",
  1248. lk->lock_object.lo_name, file, line);
  1249. } else if (what & KA_RECURSED)
  1250. panic("Lock %s not recursed @ %s:%d\n",
  1251. lk->lock_object.lo_name, file, line);
  1252. break;
  1253. case KA_UNLOCKED:
  1254. if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
  1255. panic("Lock %s exclusively locked @ %s:%d\n",
  1256. lk->lock_object.lo_name, file, line);
  1257. break;
  1258. default:
  1259. panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
  1260. line);
  1261. }
  1262. }
  1263. #endif
  1264. #ifdef DDB
  1265. int
  1266. lockmgr_chain(struct thread *td, struct thread **ownerp)
  1267. {
  1268. struct lock *lk;
  1269. lk = td->td_wchan;
  1270. if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
  1271. return (0);
  1272. db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
  1273. if (lk->lk_lock & LK_SHARE)
  1274. db_printf("SHARED (count %ju)\n",
  1275. (uintmax_t)LK_SHARERS(lk->lk_lock));
  1276. else
  1277. db_printf("EXCL\n");
  1278. *ownerp = lockmgr_xholder(lk);
  1279. return (1);
  1280. }
  1281. static void
  1282. db_show_lockmgr(const struct lock_object *lock)
  1283. {
  1284. struct thread *td;
  1285. const struct lock *lk;
  1286. lk = (const struct lock *)lock;
  1287. db_printf(" state: ");
  1288. if (lk->lk_lock == LK_UNLOCKED)
  1289. db_printf("UNLOCKED\n");
  1290. else if (lk->lk_lock & LK_SHARE)
  1291. db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
  1292. else {
  1293. td = lockmgr_xholder(lk);
  1294. if (td == (struct thread *)LK_KERNPROC)
  1295. db_printf("XLOCK: LK_KERNPROC\n");
  1296. else
  1297. db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
  1298. td->td_tid, td->td_proc->p_pid,
  1299. td->td_proc->p_comm);
  1300. if (lockmgr_recursed(lk))
  1301. db_printf(" recursed: %d\n", lk->lk_recurse);
  1302. }
  1303. db_printf(" waiters: ");
  1304. switch (lk->lk_lock & LK_ALL_WAITERS) {
  1305. case LK_SHARED_WAITERS:
  1306. db_printf("shared\n");
  1307. break;
  1308. case LK_EXCLUSIVE_WAITERS:
  1309. db_printf("exclusive\n");
  1310. break;
  1311. case LK_ALL_WAITERS:
  1312. db_printf("shared and exclusive\n");
  1313. break;
  1314. default:
  1315. db_printf("none\n");
  1316. }
  1317. db_printf(" spinners: ");
  1318. if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
  1319. db_printf("exclusive\n");
  1320. else
  1321. db_printf("none\n");
  1322. }
  1323. #endif