/contrib/bind9/lib/isc/rwlock.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 809 lines · 522 code · 119 blank · 168 comment · 150 complexity · 974010e253610a84757cc4772dfd7d39 MD5 · raw file

  1. /*
  2. * Copyright (C) 2004, 2005, 2007, 2009, 2011, 2012 Internet Systems Consortium, Inc. ("ISC")
  3. * Copyright (C) 1998-2001, 2003 Internet Software Consortium.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
  10. * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
  11. * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
  12. * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  13. * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
  14. * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  15. * PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. /* $Id$ */
  18. /*! \file */
  19. #include <config.h>
  20. #include <stddef.h>
  21. #include <isc/atomic.h>
  22. #include <isc/magic.h>
  23. #include <isc/msgs.h>
  24. #include <isc/platform.h>
  25. #include <isc/rwlock.h>
  26. #include <isc/util.h>
  27. #define RWLOCK_MAGIC ISC_MAGIC('R', 'W', 'L', 'k')
  28. #define VALID_RWLOCK(rwl) ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
  29. #ifdef ISC_PLATFORM_USETHREADS
  30. #ifndef RWLOCK_DEFAULT_READ_QUOTA
  31. #define RWLOCK_DEFAULT_READ_QUOTA 4
  32. #endif
  33. #ifndef RWLOCK_DEFAULT_WRITE_QUOTA
  34. #define RWLOCK_DEFAULT_WRITE_QUOTA 4
  35. #endif
  36. #ifdef ISC_RWLOCK_TRACE
  37. #include <stdio.h> /* Required for fprintf/stderr. */
  38. #include <isc/thread.h> /* Required for isc_thread_self(). */
  39. static void
  40. print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  41. fprintf(stderr,
  42. isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  43. ISC_MSG_PRINTLOCK,
  44. "rwlock %p thread %lu %s(%s): %s, %u active, "
  45. "%u granted, %u rwaiting, %u wwaiting\n"),
  46. rwl, isc_thread_self(), operation,
  47. (type == isc_rwlocktype_read ?
  48. isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  49. ISC_MSG_READ, "read") :
  50. isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  51. ISC_MSG_WRITE, "write")),
  52. (rwl->type == isc_rwlocktype_read ?
  53. isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  54. ISC_MSG_READING, "reading") :
  55. isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  56. ISC_MSG_WRITING, "writing")),
  57. rwl->active, rwl->granted, rwl->readers_waiting,
  58. rwl->writers_waiting);
  59. }
  60. #endif
  61. isc_result_t
  62. isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
  63. unsigned int write_quota)
  64. {
  65. isc_result_t result;
  66. REQUIRE(rwl != NULL);
  67. /*
  68. * In case there's trouble initializing, we zero magic now. If all
  69. * goes well, we'll set it to RWLOCK_MAGIC.
  70. */
  71. rwl->magic = 0;
  72. #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
  73. rwl->write_requests = 0;
  74. rwl->write_completions = 0;
  75. rwl->cnt_and_flag = 0;
  76. rwl->readers_waiting = 0;
  77. rwl->write_granted = 0;
  78. if (read_quota != 0) {
  79. UNEXPECTED_ERROR(__FILE__, __LINE__,
  80. "read quota is not supported");
  81. }
  82. if (write_quota == 0)
  83. write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
  84. rwl->write_quota = write_quota;
  85. #else
  86. rwl->type = isc_rwlocktype_read;
  87. rwl->original = isc_rwlocktype_none;
  88. rwl->active = 0;
  89. rwl->granted = 0;
  90. rwl->readers_waiting = 0;
  91. rwl->writers_waiting = 0;
  92. if (read_quota == 0)
  93. read_quota = RWLOCK_DEFAULT_READ_QUOTA;
  94. rwl->read_quota = read_quota;
  95. if (write_quota == 0)
  96. write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
  97. rwl->write_quota = write_quota;
  98. #endif
  99. result = isc_mutex_init(&rwl->lock);
  100. if (result != ISC_R_SUCCESS)
  101. return (result);
  102. result = isc_condition_init(&rwl->readable);
  103. if (result != ISC_R_SUCCESS) {
  104. UNEXPECTED_ERROR(__FILE__, __LINE__,
  105. "isc_condition_init(readable) %s: %s",
  106. isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  107. ISC_MSG_FAILED, "failed"),
  108. isc_result_totext(result));
  109. result = ISC_R_UNEXPECTED;
  110. goto destroy_lock;
  111. }
  112. result = isc_condition_init(&rwl->writeable);
  113. if (result != ISC_R_SUCCESS) {
  114. UNEXPECTED_ERROR(__FILE__, __LINE__,
  115. "isc_condition_init(writeable) %s: %s",
  116. isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  117. ISC_MSG_FAILED, "failed"),
  118. isc_result_totext(result));
  119. result = ISC_R_UNEXPECTED;
  120. goto destroy_rcond;
  121. }
  122. rwl->magic = RWLOCK_MAGIC;
  123. return (ISC_R_SUCCESS);
  124. destroy_rcond:
  125. (void)isc_condition_destroy(&rwl->readable);
  126. destroy_lock:
  127. DESTROYLOCK(&rwl->lock);
  128. return (result);
  129. }
  130. void
  131. isc_rwlock_destroy(isc_rwlock_t *rwl) {
  132. REQUIRE(VALID_RWLOCK(rwl));
  133. #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
  134. REQUIRE(rwl->write_requests == rwl->write_completions &&
  135. rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
  136. #else
  137. LOCK(&rwl->lock);
  138. REQUIRE(rwl->active == 0 &&
  139. rwl->readers_waiting == 0 &&
  140. rwl->writers_waiting == 0);
  141. UNLOCK(&rwl->lock);
  142. #endif
  143. rwl->magic = 0;
  144. (void)isc_condition_destroy(&rwl->readable);
  145. (void)isc_condition_destroy(&rwl->writeable);
  146. DESTROYLOCK(&rwl->lock);
  147. }
  148. #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
  149. /*
  150. * When some architecture-dependent atomic operations are available,
  151. * rwlock can be more efficient than the generic algorithm defined below.
  152. * The basic algorithm is described in the following URL:
  153. * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html
  154. *
  155. * The key is to use the following integer variables modified atomically:
  156. * write_requests, write_completions, and cnt_and_flag.
  157. *
  158. * write_requests and write_completions act as a waiting queue for writers
  159. * in order to ensure the FIFO order. Both variables begin with the initial
  160. * value of 0. When a new writer tries to get a write lock, it increments
  161. * write_requests and gets the previous value of the variable as a "ticket".
  162. * When write_completions reaches the ticket number, the new writer can start
  163. * writing. When the writer completes its work, it increments
  164. * write_completions so that another new writer can start working. If the
  165. * write_requests is not equal to write_completions, it means a writer is now
  166. * working or waiting. In this case, a new readers cannot start reading, or
  167. * in other words, this algorithm basically prefers writers.
  168. *
  169. * cnt_and_flag is a "lock" shared by all readers and writers. This integer
  170. * variable is a kind of structure with two members: writer_flag (1 bit) and
  171. * reader_count (31 bits). The writer_flag shows whether a writer is working,
  172. * and the reader_count shows the number of readers currently working or almost
  173. * ready for working. A writer who has the current "ticket" tries to get the
  174. * lock by exclusively setting the writer_flag to 1, provided that the whole
  175. * 32-bit is 0 (meaning no readers or writers working). On the other hand,
  176. * a new reader tries to increment the "reader_count" field provided that
  177. * the writer_flag is 0 (meaning there is no writer working).
  178. *
  179. * If some of the above operations fail, the reader or the writer sleeps
  180. * until the related condition changes. When a working reader or writer
  181. * completes its work, some readers or writers are sleeping, and the condition
  182. * that suspended the reader or writer has changed, it wakes up the sleeping
  183. * readers or writers.
  184. *
  185. * As already noted, this algorithm basically prefers writers. In order to
  186. * prevent readers from starving, however, the algorithm also introduces the
  187. * "writer quota" (Q). When Q consecutive writers have completed their work,
  188. * suspending readers, the last writer will wake up the readers, even if a new
  189. * writer is waiting.
  190. *
  191. * Implementation specific note: due to the combination of atomic operations
  192. * and a mutex lock, ordering between the atomic operation and locks can be
  193. * very sensitive in some cases. In particular, it is generally very important
  194. * to check the atomic variable that requires a reader or writer to sleep after
  195. * locking the mutex and before actually sleeping; otherwise, it could be very
  196. * likely to cause a deadlock. For example, assume "var" is a variable
  197. * atomically modified, then the corresponding code would be:
  198. * if (var == need_sleep) {
  199. * LOCK(lock);
  200. * if (var == need_sleep)
  201. * WAIT(cond, lock);
  202. * UNLOCK(lock);
  203. * }
  204. * The second check is important, since "var" is protected by the atomic
  205. * operation, not by the mutex, and can be changed just before sleeping.
  206. * (The first "if" could be omitted, but this is also important in order to
  207. * make the code efficient by avoiding the use of the mutex unless it is
  208. * really necessary.)
  209. */
  210. #define WRITER_ACTIVE 0x1
  211. #define READER_INCR 0x2
  212. isc_result_t
  213. isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  214. isc_int32_t cntflag;
  215. REQUIRE(VALID_RWLOCK(rwl));
  216. #ifdef ISC_RWLOCK_TRACE
  217. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  218. ISC_MSG_PRELOCK, "prelock"), rwl, type);
  219. #endif
  220. if (type == isc_rwlocktype_read) {
  221. if (rwl->write_requests != rwl->write_completions) {
  222. /* there is a waiting or active writer */
  223. LOCK(&rwl->lock);
  224. if (rwl->write_requests != rwl->write_completions) {
  225. rwl->readers_waiting++;
  226. WAIT(&rwl->readable, &rwl->lock);
  227. rwl->readers_waiting--;
  228. }
  229. UNLOCK(&rwl->lock);
  230. }
  231. cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
  232. POST(cntflag);
  233. while (1) {
  234. if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
  235. break;
  236. /* A writer is still working */
  237. LOCK(&rwl->lock);
  238. rwl->readers_waiting++;
  239. if ((rwl->cnt_and_flag & WRITER_ACTIVE) != 0)
  240. WAIT(&rwl->readable, &rwl->lock);
  241. rwl->readers_waiting--;
  242. UNLOCK(&rwl->lock);
  243. /*
  244. * Typically, the reader should be able to get a lock
  245. * at this stage:
  246. * (1) there should have been no pending writer when
  247. * the reader was trying to increment the
  248. * counter; otherwise, the writer should be in
  249. * the waiting queue, preventing the reader from
  250. * proceeding to this point.
  251. * (2) once the reader increments the counter, no
  252. * more writer can get a lock.
  253. * Still, it is possible another writer can work at
  254. * this point, e.g. in the following scenario:
  255. * A previous writer unlocks the writer lock.
  256. * This reader proceeds to point (1).
  257. * A new writer appears, and gets a new lock before
  258. * the reader increments the counter.
  259. * The reader then increments the counter.
  260. * The previous writer notices there is a waiting
  261. * reader who is almost ready, and wakes it up.
  262. * So, the reader needs to confirm whether it can now
  263. * read explicitly (thus we loop). Note that this is
  264. * not an infinite process, since the reader has
  265. * incremented the counter at this point.
  266. */
  267. }
  268. /*
  269. * If we are temporarily preferred to writers due to the writer
  270. * quota, reset the condition (race among readers doesn't
  271. * matter).
  272. */
  273. rwl->write_granted = 0;
  274. } else {
  275. isc_int32_t prev_writer;
  276. /* enter the waiting queue, and wait for our turn */
  277. prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
  278. while (rwl->write_completions != prev_writer) {
  279. LOCK(&rwl->lock);
  280. if (rwl->write_completions != prev_writer) {
  281. WAIT(&rwl->writeable, &rwl->lock);
  282. UNLOCK(&rwl->lock);
  283. continue;
  284. }
  285. UNLOCK(&rwl->lock);
  286. break;
  287. }
  288. while (1) {
  289. cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
  290. WRITER_ACTIVE);
  291. if (cntflag == 0)
  292. break;
  293. /* Another active reader or writer is working. */
  294. LOCK(&rwl->lock);
  295. if (rwl->cnt_and_flag != 0)
  296. WAIT(&rwl->writeable, &rwl->lock);
  297. UNLOCK(&rwl->lock);
  298. }
  299. INSIST((rwl->cnt_and_flag & WRITER_ACTIVE) != 0);
  300. rwl->write_granted++;
  301. }
  302. #ifdef ISC_RWLOCK_TRACE
  303. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  304. ISC_MSG_POSTLOCK, "postlock"), rwl, type);
  305. #endif
  306. return (ISC_R_SUCCESS);
  307. }
  308. isc_result_t
  309. isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  310. isc_int32_t cntflag;
  311. REQUIRE(VALID_RWLOCK(rwl));
  312. #ifdef ISC_RWLOCK_TRACE
  313. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  314. ISC_MSG_PRELOCK, "prelock"), rwl, type);
  315. #endif
  316. if (type == isc_rwlocktype_read) {
  317. /* If a writer is waiting or working, we fail. */
  318. if (rwl->write_requests != rwl->write_completions)
  319. return (ISC_R_LOCKBUSY);
  320. /* Otherwise, be ready for reading. */
  321. cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
  322. if ((cntflag & WRITER_ACTIVE) != 0) {
  323. /*
  324. * A writer is working. We lose, and cancel the read
  325. * request.
  326. */
  327. cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
  328. -READER_INCR);
  329. /*
  330. * If no other readers are waiting and we've suspended
  331. * new writers in this short period, wake them up.
  332. */
  333. if (cntflag == READER_INCR &&
  334. rwl->write_completions != rwl->write_requests) {
  335. LOCK(&rwl->lock);
  336. BROADCAST(&rwl->writeable);
  337. UNLOCK(&rwl->lock);
  338. }
  339. return (ISC_R_LOCKBUSY);
  340. }
  341. } else {
  342. /* Try locking without entering the waiting queue. */
  343. cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
  344. WRITER_ACTIVE);
  345. if (cntflag != 0)
  346. return (ISC_R_LOCKBUSY);
  347. /*
  348. * XXXJT: jump into the queue, possibly breaking the writer
  349. * order.
  350. */
  351. (void)isc_atomic_xadd(&rwl->write_completions, -1);
  352. rwl->write_granted++;
  353. }
  354. #ifdef ISC_RWLOCK_TRACE
  355. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  356. ISC_MSG_POSTLOCK, "postlock"), rwl, type);
  357. #endif
  358. return (ISC_R_SUCCESS);
  359. }
  360. isc_result_t
  361. isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
  362. isc_int32_t prevcnt;
  363. REQUIRE(VALID_RWLOCK(rwl));
  364. /* Try to acquire write access. */
  365. prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
  366. READER_INCR, WRITER_ACTIVE);
  367. /*
  368. * There must have been no writer, and there must have been at least
  369. * one reader.
  370. */
  371. INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
  372. (prevcnt & ~WRITER_ACTIVE) != 0);
  373. if (prevcnt == READER_INCR) {
  374. /*
  375. * We are the only reader and have been upgraded.
  376. * Now jump into the head of the writer waiting queue.
  377. */
  378. (void)isc_atomic_xadd(&rwl->write_completions, -1);
  379. } else
  380. return (ISC_R_LOCKBUSY);
  381. return (ISC_R_SUCCESS);
  382. }
  383. void
  384. isc_rwlock_downgrade(isc_rwlock_t *rwl) {
  385. isc_int32_t prev_readers;
  386. REQUIRE(VALID_RWLOCK(rwl));
  387. /* Become an active reader. */
  388. prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
  389. /* We must have been a writer. */
  390. INSIST((prev_readers & WRITER_ACTIVE) != 0);
  391. /* Complete write */
  392. (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
  393. (void)isc_atomic_xadd(&rwl->write_completions, 1);
  394. /* Resume other readers */
  395. LOCK(&rwl->lock);
  396. if (rwl->readers_waiting > 0)
  397. BROADCAST(&rwl->readable);
  398. UNLOCK(&rwl->lock);
  399. }
  400. isc_result_t
  401. isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  402. isc_int32_t prev_cnt;
  403. REQUIRE(VALID_RWLOCK(rwl));
  404. #ifdef ISC_RWLOCK_TRACE
  405. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  406. ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
  407. #endif
  408. if (type == isc_rwlocktype_read) {
  409. prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
  410. /*
  411. * If we're the last reader and any writers are waiting, wake
  412. * them up. We need to wake up all of them to ensure the
  413. * FIFO order.
  414. */
  415. if (prev_cnt == READER_INCR &&
  416. rwl->write_completions != rwl->write_requests) {
  417. LOCK(&rwl->lock);
  418. BROADCAST(&rwl->writeable);
  419. UNLOCK(&rwl->lock);
  420. }
  421. } else {
  422. isc_boolean_t wakeup_writers = ISC_TRUE;
  423. /*
  424. * Reset the flag, and (implicitly) tell other writers
  425. * we are done.
  426. */
  427. (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
  428. (void)isc_atomic_xadd(&rwl->write_completions, 1);
  429. if (rwl->write_granted >= rwl->write_quota ||
  430. rwl->write_requests == rwl->write_completions ||
  431. (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) {
  432. /*
  433. * We have passed the write quota, no writer is
  434. * waiting, or some readers are almost ready, pending
  435. * possible writers. Note that the last case can
  436. * happen even if write_requests != write_completions
  437. * (which means a new writer in the queue), so we need
  438. * to catch the case explicitly.
  439. */
  440. LOCK(&rwl->lock);
  441. if (rwl->readers_waiting > 0) {
  442. wakeup_writers = ISC_FALSE;
  443. BROADCAST(&rwl->readable);
  444. }
  445. UNLOCK(&rwl->lock);
  446. }
  447. if (rwl->write_requests != rwl->write_completions &&
  448. wakeup_writers) {
  449. LOCK(&rwl->lock);
  450. BROADCAST(&rwl->writeable);
  451. UNLOCK(&rwl->lock);
  452. }
  453. }
  454. #ifdef ISC_RWLOCK_TRACE
  455. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  456. ISC_MSG_POSTUNLOCK, "postunlock"),
  457. rwl, type);
  458. #endif
  459. return (ISC_R_SUCCESS);
  460. }
  461. #else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
  462. static isc_result_t
  463. doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) {
  464. isc_boolean_t skip = ISC_FALSE;
  465. isc_boolean_t done = ISC_FALSE;
  466. isc_result_t result = ISC_R_SUCCESS;
  467. REQUIRE(VALID_RWLOCK(rwl));
  468. LOCK(&rwl->lock);
  469. #ifdef ISC_RWLOCK_TRACE
  470. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  471. ISC_MSG_PRELOCK, "prelock"), rwl, type);
  472. #endif
  473. if (type == isc_rwlocktype_read) {
  474. if (rwl->readers_waiting != 0)
  475. skip = ISC_TRUE;
  476. while (!done) {
  477. if (!skip &&
  478. ((rwl->active == 0 ||
  479. (rwl->type == isc_rwlocktype_read &&
  480. (rwl->writers_waiting == 0 ||
  481. rwl->granted < rwl->read_quota)))))
  482. {
  483. rwl->type = isc_rwlocktype_read;
  484. rwl->active++;
  485. rwl->granted++;
  486. done = ISC_TRUE;
  487. } else if (nonblock) {
  488. result = ISC_R_LOCKBUSY;
  489. done = ISC_TRUE;
  490. } else {
  491. skip = ISC_FALSE;
  492. rwl->readers_waiting++;
  493. WAIT(&rwl->readable, &rwl->lock);
  494. rwl->readers_waiting--;
  495. }
  496. }
  497. } else {
  498. if (rwl->writers_waiting != 0)
  499. skip = ISC_TRUE;
  500. while (!done) {
  501. if (!skip && rwl->active == 0) {
  502. rwl->type = isc_rwlocktype_write;
  503. rwl->active = 1;
  504. rwl->granted++;
  505. done = ISC_TRUE;
  506. } else if (nonblock) {
  507. result = ISC_R_LOCKBUSY;
  508. done = ISC_TRUE;
  509. } else {
  510. skip = ISC_FALSE;
  511. rwl->writers_waiting++;
  512. WAIT(&rwl->writeable, &rwl->lock);
  513. rwl->writers_waiting--;
  514. }
  515. }
  516. }
  517. #ifdef ISC_RWLOCK_TRACE
  518. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  519. ISC_MSG_POSTLOCK, "postlock"), rwl, type);
  520. #endif
  521. UNLOCK(&rwl->lock);
  522. return (result);
  523. }
  524. isc_result_t
  525. isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  526. return (doit(rwl, type, ISC_FALSE));
  527. }
  528. isc_result_t
  529. isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  530. return (doit(rwl, type, ISC_TRUE));
  531. }
  532. isc_result_t
  533. isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
  534. isc_result_t result = ISC_R_SUCCESS;
  535. REQUIRE(VALID_RWLOCK(rwl));
  536. LOCK(&rwl->lock);
  537. REQUIRE(rwl->type == isc_rwlocktype_read);
  538. REQUIRE(rwl->active != 0);
  539. /* If we are the only reader then succeed. */
  540. if (rwl->active == 1) {
  541. rwl->original = (rwl->original == isc_rwlocktype_none) ?
  542. isc_rwlocktype_read : isc_rwlocktype_none;
  543. rwl->type = isc_rwlocktype_write;
  544. } else
  545. result = ISC_R_LOCKBUSY;
  546. UNLOCK(&rwl->lock);
  547. return (result);
  548. }
  549. void
  550. isc_rwlock_downgrade(isc_rwlock_t *rwl) {
  551. REQUIRE(VALID_RWLOCK(rwl));
  552. LOCK(&rwl->lock);
  553. REQUIRE(rwl->type == isc_rwlocktype_write);
  554. REQUIRE(rwl->active == 1);
  555. rwl->type = isc_rwlocktype_read;
  556. rwl->original = (rwl->original == isc_rwlocktype_none) ?
  557. isc_rwlocktype_write : isc_rwlocktype_none;
  558. /*
  559. * Resume processing any read request that were blocked when
  560. * we upgraded.
  561. */
  562. if (rwl->original == isc_rwlocktype_none &&
  563. (rwl->writers_waiting == 0 || rwl->granted < rwl->read_quota) &&
  564. rwl->readers_waiting > 0)
  565. BROADCAST(&rwl->readable);
  566. UNLOCK(&rwl->lock);
  567. }
  568. isc_result_t
  569. isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  570. REQUIRE(VALID_RWLOCK(rwl));
  571. LOCK(&rwl->lock);
  572. REQUIRE(rwl->type == type);
  573. UNUSED(type);
  574. #ifdef ISC_RWLOCK_TRACE
  575. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  576. ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
  577. #endif
  578. INSIST(rwl->active > 0);
  579. rwl->active--;
  580. if (rwl->active == 0) {
  581. if (rwl->original != isc_rwlocktype_none) {
  582. rwl->type = rwl->original;
  583. rwl->original = isc_rwlocktype_none;
  584. }
  585. if (rwl->type == isc_rwlocktype_read) {
  586. rwl->granted = 0;
  587. if (rwl->writers_waiting > 0) {
  588. rwl->type = isc_rwlocktype_write;
  589. SIGNAL(&rwl->writeable);
  590. } else if (rwl->readers_waiting > 0) {
  591. /* Does this case ever happen? */
  592. BROADCAST(&rwl->readable);
  593. }
  594. } else {
  595. if (rwl->readers_waiting > 0) {
  596. if (rwl->writers_waiting > 0 &&
  597. rwl->granted < rwl->write_quota) {
  598. SIGNAL(&rwl->writeable);
  599. } else {
  600. rwl->granted = 0;
  601. rwl->type = isc_rwlocktype_read;
  602. BROADCAST(&rwl->readable);
  603. }
  604. } else if (rwl->writers_waiting > 0) {
  605. rwl->granted = 0;
  606. SIGNAL(&rwl->writeable);
  607. } else {
  608. rwl->granted = 0;
  609. }
  610. }
  611. }
  612. INSIST(rwl->original == isc_rwlocktype_none);
  613. #ifdef ISC_RWLOCK_TRACE
  614. print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
  615. ISC_MSG_POSTUNLOCK, "postunlock"),
  616. rwl, type);
  617. #endif
  618. UNLOCK(&rwl->lock);
  619. return (ISC_R_SUCCESS);
  620. }
  621. #endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
  622. #else /* ISC_PLATFORM_USETHREADS */
  623. isc_result_t
  624. isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
  625. unsigned int write_quota)
  626. {
  627. REQUIRE(rwl != NULL);
  628. UNUSED(read_quota);
  629. UNUSED(write_quota);
  630. rwl->type = isc_rwlocktype_read;
  631. rwl->active = 0;
  632. rwl->magic = RWLOCK_MAGIC;
  633. return (ISC_R_SUCCESS);
  634. }
  635. isc_result_t
  636. isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  637. REQUIRE(VALID_RWLOCK(rwl));
  638. if (type == isc_rwlocktype_read) {
  639. if (rwl->type != isc_rwlocktype_read && rwl->active != 0)
  640. return (ISC_R_LOCKBUSY);
  641. rwl->type = isc_rwlocktype_read;
  642. rwl->active++;
  643. } else {
  644. if (rwl->active != 0)
  645. return (ISC_R_LOCKBUSY);
  646. rwl->type = isc_rwlocktype_write;
  647. rwl->active = 1;
  648. }
  649. return (ISC_R_SUCCESS);
  650. }
  651. isc_result_t
  652. isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  653. return (isc_rwlock_lock(rwl, type));
  654. }
  655. isc_result_t
  656. isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
  657. isc_result_t result = ISC_R_SUCCESS;
  658. REQUIRE(VALID_RWLOCK(rwl));
  659. REQUIRE(rwl->type == isc_rwlocktype_read);
  660. REQUIRE(rwl->active != 0);
  661. /* If we are the only reader then succeed. */
  662. if (rwl->active == 1)
  663. rwl->type = isc_rwlocktype_write;
  664. else
  665. result = ISC_R_LOCKBUSY;
  666. return (result);
  667. }
  668. void
  669. isc_rwlock_downgrade(isc_rwlock_t *rwl) {
  670. REQUIRE(VALID_RWLOCK(rwl));
  671. REQUIRE(rwl->type == isc_rwlocktype_write);
  672. REQUIRE(rwl->active == 1);
  673. rwl->type = isc_rwlocktype_read;
  674. }
  675. isc_result_t
  676. isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
  677. REQUIRE(VALID_RWLOCK(rwl));
  678. REQUIRE(rwl->type == type);
  679. UNUSED(type);
  680. INSIST(rwl->active > 0);
  681. rwl->active--;
  682. return (ISC_R_SUCCESS);
  683. }
  684. void
  685. isc_rwlock_destroy(isc_rwlock_t *rwl) {
  686. REQUIRE(rwl != NULL);
  687. REQUIRE(rwl->active == 0);
  688. rwl->magic = 0;
  689. }
  690. #endif /* ISC_PLATFORM_USETHREADS */