/contrib/bind9/lib/isc/task.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 1578 lines · 1005 code · 236 blank · 337 comment · 144 complexity · 3cac71d8f1789171d51e47337a728d82 MD5 · raw file

  1. /*
  2. * Copyright (C) 2004-2012 Internet Systems Consortium, Inc. ("ISC")
  3. * Copyright (C) 1998-2003 Internet Software Consortium.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
  10. * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
  11. * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
  12. * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  13. * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
  14. * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  15. * PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. /* $Id$ */
  18. /*! \file
  19. * \author Principal Author: Bob Halley
  20. */
  21. /*
  22. * XXXRTH Need to document the states a task can be in, and the rules
  23. * for changing states.
  24. */
  25. #include <config.h>
  26. #include <isc/condition.h>
  27. #include <isc/event.h>
  28. #include <isc/magic.h>
  29. #include <isc/mem.h>
  30. #include <isc/msgs.h>
  31. #include <isc/platform.h>
  32. #include <isc/string.h>
  33. #include <isc/task.h>
  34. #include <isc/thread.h>
  35. #include <isc/util.h>
  36. #include <isc/xml.h>
  37. #ifdef OPENSSL_LEAKS
  38. #include <openssl/err.h>
  39. #endif
  40. /*%
  41. * For BIND9 internal applications:
  42. * when built with threads we use multiple worker threads shared by the whole
  43. * application.
  44. * when built without threads we share a single global task manager and use
  45. * an integrated event loop for socket, timer, and other generic task events.
  46. * For generic library:
  47. * we don't use either of them: an application can have multiple task managers
  48. * whether or not it's threaded, and if the application is threaded each thread
  49. * is expected to have a separate manager; no "worker threads" are shared by
  50. * the application threads.
  51. */
  52. #ifdef BIND9
  53. #ifdef ISC_PLATFORM_USETHREADS
  54. #define USE_WORKER_THREADS
  55. #else
  56. #define USE_SHARED_MANAGER
  57. #endif /* ISC_PLATFORM_USETHREADS */
  58. #endif /* BIND9 */
  59. #ifndef USE_WORKER_THREADS
  60. #include "task_p.h"
  61. #endif /* USE_WORKER_THREADS */
  62. #ifdef ISC_TASK_TRACE
  63. #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
  64. task, isc_thread_self(), (m))
  65. #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
  66. (t), isc_thread_self(), (m))
  67. #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
  68. isc_thread_self(), (m))
  69. #else
  70. #define XTRACE(m)
  71. #define XTTRACE(t, m)
  72. #define XTHREADTRACE(m)
  73. #endif
  74. /***
  75. *** Types.
  76. ***/
  77. typedef enum {
  78. task_state_idle, task_state_ready, task_state_running,
  79. task_state_done
  80. } task_state_t;
  81. #if defined(HAVE_LIBXML2) && defined(BIND9)
  82. static const char *statenames[] = {
  83. "idle", "ready", "running", "done",
  84. };
  85. #endif
  86. #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
  87. #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
  88. typedef struct isc__task isc__task_t;
  89. typedef struct isc__taskmgr isc__taskmgr_t;
  90. struct isc__task {
  91. /* Not locked. */
  92. isc_task_t common;
  93. isc__taskmgr_t * manager;
  94. isc_mutex_t lock;
  95. /* Locked by task lock. */
  96. task_state_t state;
  97. unsigned int references;
  98. isc_eventlist_t events;
  99. isc_eventlist_t on_shutdown;
  100. unsigned int quantum;
  101. unsigned int flags;
  102. isc_stdtime_t now;
  103. char name[16];
  104. void * tag;
  105. /* Locked by task manager lock. */
  106. LINK(isc__task_t) link;
  107. LINK(isc__task_t) ready_link;
  108. };
  109. #define TASK_F_SHUTTINGDOWN 0x01
  110. #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
  111. != 0)
  112. #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
  113. #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
  114. typedef ISC_LIST(isc__task_t) isc__tasklist_t;
  115. struct isc__taskmgr {
  116. /* Not locked. */
  117. isc_taskmgr_t common;
  118. isc_mem_t * mctx;
  119. isc_mutex_t lock;
  120. #ifdef ISC_PLATFORM_USETHREADS
  121. unsigned int workers;
  122. isc_thread_t * threads;
  123. #endif /* ISC_PLATFORM_USETHREADS */
  124. /* Locked by task manager lock. */
  125. unsigned int default_quantum;
  126. LIST(isc__task_t) tasks;
  127. isc__tasklist_t ready_tasks;
  128. #ifdef ISC_PLATFORM_USETHREADS
  129. isc_condition_t work_available;
  130. isc_condition_t exclusive_granted;
  131. #endif /* ISC_PLATFORM_USETHREADS */
  132. unsigned int tasks_running;
  133. isc_boolean_t exclusive_requested;
  134. isc_boolean_t exiting;
  135. #ifdef USE_SHARED_MANAGER
  136. unsigned int refs;
  137. #endif /* ISC_PLATFORM_USETHREADS */
  138. };
  139. #define DEFAULT_TASKMGR_QUANTUM 10
  140. #define DEFAULT_DEFAULT_QUANTUM 5
  141. #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
  142. #ifdef USE_SHARED_MANAGER
  143. static isc__taskmgr_t *taskmgr = NULL;
  144. #endif /* USE_SHARED_MANAGER */
  145. /*%
  146. * The following can be either static or public, depending on build environment.
  147. */
  148. #ifdef BIND9
  149. #define ISC_TASKFUNC_SCOPE
  150. #else
  151. #define ISC_TASKFUNC_SCOPE static
  152. #endif
  153. ISC_TASKFUNC_SCOPE isc_result_t
  154. isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
  155. isc_task_t **taskp);
  156. ISC_TASKFUNC_SCOPE void
  157. isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
  158. ISC_TASKFUNC_SCOPE void
  159. isc__task_detach(isc_task_t **taskp);
  160. ISC_TASKFUNC_SCOPE void
  161. isc__task_send(isc_task_t *task0, isc_event_t **eventp);
  162. ISC_TASKFUNC_SCOPE void
  163. isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
  164. ISC_TASKFUNC_SCOPE unsigned int
  165. isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
  166. isc_eventtype_t last, void *tag);
  167. ISC_TASKFUNC_SCOPE unsigned int
  168. isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
  169. void *tag);
  170. ISC_TASKFUNC_SCOPE isc_boolean_t
  171. isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
  172. ISC_TASKFUNC_SCOPE unsigned int
  173. isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
  174. isc_eventtype_t last, void *tag,
  175. isc_eventlist_t *events);
  176. ISC_TASKFUNC_SCOPE unsigned int
  177. isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
  178. void *tag, isc_eventlist_t *events);
  179. ISC_TASKFUNC_SCOPE isc_result_t
  180. isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
  181. const void *arg);
  182. ISC_TASKFUNC_SCOPE void
  183. isc__task_shutdown(isc_task_t *task0);
  184. ISC_TASKFUNC_SCOPE void
  185. isc__task_destroy(isc_task_t **taskp);
  186. ISC_TASKFUNC_SCOPE void
  187. isc__task_setname(isc_task_t *task0, const char *name, void *tag);
  188. ISC_TASKFUNC_SCOPE const char *
  189. isc__task_getname(isc_task_t *task0);
  190. ISC_TASKFUNC_SCOPE void *
  191. isc__task_gettag(isc_task_t *task0);
  192. ISC_TASKFUNC_SCOPE void
  193. isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
  194. ISC_TASKFUNC_SCOPE isc_result_t
  195. isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
  196. unsigned int default_quantum, isc_taskmgr_t **managerp);
  197. ISC_TASKFUNC_SCOPE void
  198. isc__taskmgr_destroy(isc_taskmgr_t **managerp);
  199. ISC_TASKFUNC_SCOPE isc_result_t
  200. isc__task_beginexclusive(isc_task_t *task);
  201. ISC_TASKFUNC_SCOPE void
  202. isc__task_endexclusive(isc_task_t *task0);
  203. static struct isc__taskmethods {
  204. isc_taskmethods_t methods;
  205. /*%
  206. * The following are defined just for avoiding unused static functions.
  207. */
  208. #ifndef BIND9
  209. void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
  210. #endif
  211. } taskmethods = {
  212. {
  213. isc__task_attach,
  214. isc__task_detach,
  215. isc__task_destroy,
  216. isc__task_send,
  217. isc__task_sendanddetach,
  218. isc__task_unsend,
  219. isc__task_onshutdown,
  220. isc__task_shutdown,
  221. isc__task_setname,
  222. isc__task_purge,
  223. isc__task_purgerange,
  224. isc__task_beginexclusive,
  225. isc__task_endexclusive
  226. }
  227. #ifndef BIND9
  228. ,
  229. (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
  230. (void *)isc__task_getname, (void *)isc__task_gettag,
  231. (void *)isc__task_getcurrenttime
  232. #endif
  233. };
  234. static isc_taskmgrmethods_t taskmgrmethods = {
  235. isc__taskmgr_destroy,
  236. isc__task_create
  237. };
  238. /***
  239. *** Tasks.
  240. ***/
  241. static void
  242. task_finished(isc__task_t *task) {
  243. isc__taskmgr_t *manager = task->manager;
  244. REQUIRE(EMPTY(task->events));
  245. REQUIRE(EMPTY(task->on_shutdown));
  246. REQUIRE(task->references == 0);
  247. REQUIRE(task->state == task_state_done);
  248. XTRACE("task_finished");
  249. LOCK(&manager->lock);
  250. UNLINK(manager->tasks, task, link);
  251. #ifdef USE_WORKER_THREADS
  252. if (FINISHED(manager)) {
  253. /*
  254. * All tasks have completed and the
  255. * task manager is exiting. Wake up
  256. * any idle worker threads so they
  257. * can exit.
  258. */
  259. BROADCAST(&manager->work_available);
  260. }
  261. #endif /* USE_WORKER_THREADS */
  262. UNLOCK(&manager->lock);
  263. DESTROYLOCK(&task->lock);
  264. task->common.impmagic = 0;
  265. task->common.magic = 0;
  266. isc_mem_put(manager->mctx, task, sizeof(*task));
  267. }
  268. ISC_TASKFUNC_SCOPE isc_result_t
  269. isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
  270. isc_task_t **taskp)
  271. {
  272. isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
  273. isc__task_t *task;
  274. isc_boolean_t exiting;
  275. isc_result_t result;
  276. REQUIRE(VALID_MANAGER(manager));
  277. REQUIRE(taskp != NULL && *taskp == NULL);
  278. task = isc_mem_get(manager->mctx, sizeof(*task));
  279. if (task == NULL)
  280. return (ISC_R_NOMEMORY);
  281. XTRACE("isc_task_create");
  282. task->manager = manager;
  283. result = isc_mutex_init(&task->lock);
  284. if (result != ISC_R_SUCCESS) {
  285. isc_mem_put(manager->mctx, task, sizeof(*task));
  286. return (result);
  287. }
  288. task->state = task_state_idle;
  289. task->references = 1;
  290. INIT_LIST(task->events);
  291. INIT_LIST(task->on_shutdown);
  292. task->quantum = quantum;
  293. task->flags = 0;
  294. task->now = 0;
  295. memset(task->name, 0, sizeof(task->name));
  296. task->tag = NULL;
  297. INIT_LINK(task, link);
  298. INIT_LINK(task, ready_link);
  299. exiting = ISC_FALSE;
  300. LOCK(&manager->lock);
  301. if (!manager->exiting) {
  302. if (task->quantum == 0)
  303. task->quantum = manager->default_quantum;
  304. APPEND(manager->tasks, task, link);
  305. } else
  306. exiting = ISC_TRUE;
  307. UNLOCK(&manager->lock);
  308. if (exiting) {
  309. DESTROYLOCK(&task->lock);
  310. isc_mem_put(manager->mctx, task, sizeof(*task));
  311. return (ISC_R_SHUTTINGDOWN);
  312. }
  313. task->common.methods = (isc_taskmethods_t *)&taskmethods;
  314. task->common.magic = ISCAPI_TASK_MAGIC;
  315. task->common.impmagic = TASK_MAGIC;
  316. *taskp = (isc_task_t *)task;
  317. return (ISC_R_SUCCESS);
  318. }
  319. ISC_TASKFUNC_SCOPE void
  320. isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
  321. isc__task_t *source = (isc__task_t *)source0;
  322. /*
  323. * Attach *targetp to source.
  324. */
  325. REQUIRE(VALID_TASK(source));
  326. REQUIRE(targetp != NULL && *targetp == NULL);
  327. XTTRACE(source, "isc_task_attach");
  328. LOCK(&source->lock);
  329. source->references++;
  330. UNLOCK(&source->lock);
  331. *targetp = (isc_task_t *)source;
  332. }
  333. static inline isc_boolean_t
  334. task_shutdown(isc__task_t *task) {
  335. isc_boolean_t was_idle = ISC_FALSE;
  336. isc_event_t *event, *prev;
  337. /*
  338. * Caller must be holding the task's lock.
  339. */
  340. XTRACE("task_shutdown");
  341. if (! TASK_SHUTTINGDOWN(task)) {
  342. XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  343. ISC_MSG_SHUTTINGDOWN, "shutting down"));
  344. task->flags |= TASK_F_SHUTTINGDOWN;
  345. if (task->state == task_state_idle) {
  346. INSIST(EMPTY(task->events));
  347. task->state = task_state_ready;
  348. was_idle = ISC_TRUE;
  349. }
  350. INSIST(task->state == task_state_ready ||
  351. task->state == task_state_running);
  352. /*
  353. * Note that we post shutdown events LIFO.
  354. */
  355. for (event = TAIL(task->on_shutdown);
  356. event != NULL;
  357. event = prev) {
  358. prev = PREV(event, ev_link);
  359. DEQUEUE(task->on_shutdown, event, ev_link);
  360. ENQUEUE(task->events, event, ev_link);
  361. }
  362. }
  363. return (was_idle);
  364. }
  365. static inline void
  366. task_ready(isc__task_t *task) {
  367. isc__taskmgr_t *manager = task->manager;
  368. REQUIRE(VALID_MANAGER(manager));
  369. REQUIRE(task->state == task_state_ready);
  370. XTRACE("task_ready");
  371. LOCK(&manager->lock);
  372. ENQUEUE(manager->ready_tasks, task, ready_link);
  373. #ifdef USE_WORKER_THREADS
  374. SIGNAL(&manager->work_available);
  375. #endif /* USE_WORKER_THREADS */
  376. UNLOCK(&manager->lock);
  377. }
  378. static inline isc_boolean_t
  379. task_detach(isc__task_t *task) {
  380. /*
  381. * Caller must be holding the task lock.
  382. */
  383. REQUIRE(task->references > 0);
  384. XTRACE("detach");
  385. task->references--;
  386. if (task->references == 0 && task->state == task_state_idle) {
  387. INSIST(EMPTY(task->events));
  388. /*
  389. * There are no references to this task, and no
  390. * pending events. We could try to optimize and
  391. * either initiate shutdown or clean up the task,
  392. * depending on its state, but it's easier to just
  393. * make the task ready and allow run() or the event
  394. * loop to deal with shutting down and termination.
  395. */
  396. task->state = task_state_ready;
  397. return (ISC_TRUE);
  398. }
  399. return (ISC_FALSE);
  400. }
  401. ISC_TASKFUNC_SCOPE void
  402. isc__task_detach(isc_task_t **taskp) {
  403. isc__task_t *task;
  404. isc_boolean_t was_idle;
  405. /*
  406. * Detach *taskp from its task.
  407. */
  408. REQUIRE(taskp != NULL);
  409. task = (isc__task_t *)*taskp;
  410. REQUIRE(VALID_TASK(task));
  411. XTRACE("isc_task_detach");
  412. LOCK(&task->lock);
  413. was_idle = task_detach(task);
  414. UNLOCK(&task->lock);
  415. if (was_idle)
  416. task_ready(task);
  417. *taskp = NULL;
  418. }
  419. static inline isc_boolean_t
  420. task_send(isc__task_t *task, isc_event_t **eventp) {
  421. isc_boolean_t was_idle = ISC_FALSE;
  422. isc_event_t *event;
  423. /*
  424. * Caller must be holding the task lock.
  425. */
  426. REQUIRE(eventp != NULL);
  427. event = *eventp;
  428. REQUIRE(event != NULL);
  429. REQUIRE(event->ev_type > 0);
  430. REQUIRE(task->state != task_state_done);
  431. XTRACE("task_send");
  432. if (task->state == task_state_idle) {
  433. was_idle = ISC_TRUE;
  434. INSIST(EMPTY(task->events));
  435. task->state = task_state_ready;
  436. }
  437. INSIST(task->state == task_state_ready ||
  438. task->state == task_state_running);
  439. ENQUEUE(task->events, event, ev_link);
  440. *eventp = NULL;
  441. return (was_idle);
  442. }
  443. ISC_TASKFUNC_SCOPE void
  444. isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
  445. isc__task_t *task = (isc__task_t *)task0;
  446. isc_boolean_t was_idle;
  447. /*
  448. * Send '*event' to 'task'.
  449. */
  450. REQUIRE(VALID_TASK(task));
  451. XTRACE("isc_task_send");
  452. /*
  453. * We're trying hard to hold locks for as short a time as possible.
  454. * We're also trying to hold as few locks as possible. This is why
  455. * some processing is deferred until after the lock is released.
  456. */
  457. LOCK(&task->lock);
  458. was_idle = task_send(task, eventp);
  459. UNLOCK(&task->lock);
  460. if (was_idle) {
  461. /*
  462. * We need to add this task to the ready queue.
  463. *
  464. * We've waited until now to do it because making a task
  465. * ready requires locking the manager. If we tried to do
  466. * this while holding the task lock, we could deadlock.
  467. *
  468. * We've changed the state to ready, so no one else will
  469. * be trying to add this task to the ready queue. The
  470. * only way to leave the ready state is by executing the
  471. * task. It thus doesn't matter if events are added,
  472. * removed, or a shutdown is started in the interval
  473. * between the time we released the task lock, and the time
  474. * we add the task to the ready queue.
  475. */
  476. task_ready(task);
  477. }
  478. }
  479. ISC_TASKFUNC_SCOPE void
  480. isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
  481. isc_boolean_t idle1, idle2;
  482. isc__task_t *task;
  483. /*
  484. * Send '*event' to '*taskp' and then detach '*taskp' from its
  485. * task.
  486. */
  487. REQUIRE(taskp != NULL);
  488. task = (isc__task_t *)*taskp;
  489. REQUIRE(VALID_TASK(task));
  490. XTRACE("isc_task_sendanddetach");
  491. LOCK(&task->lock);
  492. idle1 = task_send(task, eventp);
  493. idle2 = task_detach(task);
  494. UNLOCK(&task->lock);
  495. /*
  496. * If idle1, then idle2 shouldn't be true as well since we're holding
  497. * the task lock, and thus the task cannot switch from ready back to
  498. * idle.
  499. */
  500. INSIST(!(idle1 && idle2));
  501. if (idle1 || idle2)
  502. task_ready(task);
  503. *taskp = NULL;
  504. }
  505. #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
  506. static unsigned int
  507. dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
  508. isc_eventtype_t last, void *tag,
  509. isc_eventlist_t *events, isc_boolean_t purging)
  510. {
  511. isc_event_t *event, *next_event;
  512. unsigned int count = 0;
  513. REQUIRE(VALID_TASK(task));
  514. REQUIRE(last >= first);
  515. XTRACE("dequeue_events");
  516. /*
  517. * Events matching 'sender', whose type is >= first and <= last, and
  518. * whose tag is 'tag' will be dequeued. If 'purging', matching events
  519. * which are marked as unpurgable will not be dequeued.
  520. *
  521. * sender == NULL means "any sender", and tag == NULL means "any tag".
  522. */
  523. LOCK(&task->lock);
  524. for (event = HEAD(task->events); event != NULL; event = next_event) {
  525. next_event = NEXT(event, ev_link);
  526. if (event->ev_type >= first && event->ev_type <= last &&
  527. (sender == NULL || event->ev_sender == sender) &&
  528. (tag == NULL || event->ev_tag == tag) &&
  529. (!purging || PURGE_OK(event))) {
  530. DEQUEUE(task->events, event, ev_link);
  531. ENQUEUE(*events, event, ev_link);
  532. count++;
  533. }
  534. }
  535. UNLOCK(&task->lock);
  536. return (count);
  537. }
  538. ISC_TASKFUNC_SCOPE unsigned int
  539. isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
  540. isc_eventtype_t last, void *tag)
  541. {
  542. isc__task_t *task = (isc__task_t *)task0;
  543. unsigned int count;
  544. isc_eventlist_t events;
  545. isc_event_t *event, *next_event;
  546. /*
  547. * Purge events from a task's event queue.
  548. */
  549. XTRACE("isc_task_purgerange");
  550. ISC_LIST_INIT(events);
  551. count = dequeue_events(task, sender, first, last, tag, &events,
  552. ISC_TRUE);
  553. for (event = HEAD(events); event != NULL; event = next_event) {
  554. next_event = NEXT(event, ev_link);
  555. isc_event_free(&event);
  556. }
  557. /*
  558. * Note that purging never changes the state of the task.
  559. */
  560. return (count);
  561. }
  562. ISC_TASKFUNC_SCOPE unsigned int
  563. isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
  564. void *tag)
  565. {
  566. /*
  567. * Purge events from a task's event queue.
  568. */
  569. XTRACE("isc_task_purge");
  570. return (isc__task_purgerange(task, sender, type, type, tag));
  571. }
  572. ISC_TASKFUNC_SCOPE isc_boolean_t
  573. isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
  574. isc__task_t *task = (isc__task_t *)task0;
  575. isc_event_t *curr_event, *next_event;
  576. /*
  577. * Purge 'event' from a task's event queue.
  578. *
  579. * XXXRTH: WARNING: This method may be removed before beta.
  580. */
  581. REQUIRE(VALID_TASK(task));
  582. /*
  583. * If 'event' is on the task's event queue, it will be purged,
  584. * unless it is marked as unpurgeable. 'event' does not have to be
  585. * on the task's event queue; in fact, it can even be an invalid
  586. * pointer. Purging only occurs if the event is actually on the task's
  587. * event queue.
  588. *
  589. * Purging never changes the state of the task.
  590. */
  591. LOCK(&task->lock);
  592. for (curr_event = HEAD(task->events);
  593. curr_event != NULL;
  594. curr_event = next_event) {
  595. next_event = NEXT(curr_event, ev_link);
  596. if (curr_event == event && PURGE_OK(event)) {
  597. DEQUEUE(task->events, curr_event, ev_link);
  598. break;
  599. }
  600. }
  601. UNLOCK(&task->lock);
  602. if (curr_event == NULL)
  603. return (ISC_FALSE);
  604. isc_event_free(&curr_event);
  605. return (ISC_TRUE);
  606. }
  607. ISC_TASKFUNC_SCOPE unsigned int
  608. isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
  609. isc_eventtype_t last, void *tag,
  610. isc_eventlist_t *events)
  611. {
  612. /*
  613. * Remove events from a task's event queue.
  614. */
  615. XTRACE("isc_task_unsendrange");
  616. return (dequeue_events((isc__task_t *)task, sender, first,
  617. last, tag, events, ISC_FALSE));
  618. }
  619. ISC_TASKFUNC_SCOPE unsigned int
  620. isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
  621. void *tag, isc_eventlist_t *events)
  622. {
  623. /*
  624. * Remove events from a task's event queue.
  625. */
  626. XTRACE("isc_task_unsend");
  627. return (dequeue_events((isc__task_t *)task, sender, type,
  628. type, tag, events, ISC_FALSE));
  629. }
  630. ISC_TASKFUNC_SCOPE isc_result_t
  631. isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
  632. const void *arg)
  633. {
  634. isc__task_t *task = (isc__task_t *)task0;
  635. isc_boolean_t disallowed = ISC_FALSE;
  636. isc_result_t result = ISC_R_SUCCESS;
  637. isc_event_t *event;
  638. /*
  639. * Send a shutdown event with action 'action' and argument 'arg' when
  640. * 'task' is shutdown.
  641. */
  642. REQUIRE(VALID_TASK(task));
  643. REQUIRE(action != NULL);
  644. event = isc_event_allocate(task->manager->mctx,
  645. NULL,
  646. ISC_TASKEVENT_SHUTDOWN,
  647. action,
  648. arg,
  649. sizeof(*event));
  650. if (event == NULL)
  651. return (ISC_R_NOMEMORY);
  652. LOCK(&task->lock);
  653. if (TASK_SHUTTINGDOWN(task)) {
  654. disallowed = ISC_TRUE;
  655. result = ISC_R_SHUTTINGDOWN;
  656. } else
  657. ENQUEUE(task->on_shutdown, event, ev_link);
  658. UNLOCK(&task->lock);
  659. if (disallowed)
  660. isc_mem_put(task->manager->mctx, event, sizeof(*event));
  661. return (result);
  662. }
  663. ISC_TASKFUNC_SCOPE void
  664. isc__task_shutdown(isc_task_t *task0) {
  665. isc__task_t *task = (isc__task_t *)task0;
  666. isc_boolean_t was_idle;
  667. /*
  668. * Shutdown 'task'.
  669. */
  670. REQUIRE(VALID_TASK(task));
  671. LOCK(&task->lock);
  672. was_idle = task_shutdown(task);
  673. UNLOCK(&task->lock);
  674. if (was_idle)
  675. task_ready(task);
  676. }
  677. ISC_TASKFUNC_SCOPE void
  678. isc__task_destroy(isc_task_t **taskp) {
  679. /*
  680. * Destroy '*taskp'.
  681. */
  682. REQUIRE(taskp != NULL);
  683. isc_task_shutdown(*taskp);
  684. isc_task_detach(taskp);
  685. }
  686. ISC_TASKFUNC_SCOPE void
  687. isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
  688. isc__task_t *task = (isc__task_t *)task0;
  689. /*
  690. * Name 'task'.
  691. */
  692. REQUIRE(VALID_TASK(task));
  693. LOCK(&task->lock);
  694. memset(task->name, 0, sizeof(task->name));
  695. strncpy(task->name, name, sizeof(task->name) - 1);
  696. task->tag = tag;
  697. UNLOCK(&task->lock);
  698. }
  699. ISC_TASKFUNC_SCOPE const char *
  700. isc__task_getname(isc_task_t *task0) {
  701. isc__task_t *task = (isc__task_t *)task0;
  702. REQUIRE(VALID_TASK(task));
  703. return (task->name);
  704. }
  705. ISC_TASKFUNC_SCOPE void *
  706. isc__task_gettag(isc_task_t *task0) {
  707. isc__task_t *task = (isc__task_t *)task0;
  708. REQUIRE(VALID_TASK(task));
  709. return (task->tag);
  710. }
  711. ISC_TASKFUNC_SCOPE void
  712. isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
  713. isc__task_t *task = (isc__task_t *)task0;
  714. REQUIRE(VALID_TASK(task));
  715. REQUIRE(t != NULL);
  716. LOCK(&task->lock);
  717. *t = task->now;
  718. UNLOCK(&task->lock);
  719. }
  720. /***
  721. *** Task Manager.
  722. ***/
  723. static void
  724. dispatch(isc__taskmgr_t *manager) {
  725. isc__task_t *task;
  726. #ifndef USE_WORKER_THREADS
  727. unsigned int total_dispatch_count = 0;
  728. isc__tasklist_t ready_tasks;
  729. #endif /* USE_WORKER_THREADS */
  730. REQUIRE(VALID_MANAGER(manager));
  731. /*
  732. * Again we're trying to hold the lock for as short a time as possible
  733. * and to do as little locking and unlocking as possible.
  734. *
  735. * In both while loops, the appropriate lock must be held before the
  736. * while body starts. Code which acquired the lock at the top of
  737. * the loop would be more readable, but would result in a lot of
  738. * extra locking. Compare:
  739. *
  740. * Straightforward:
  741. *
  742. * LOCK();
  743. * ...
  744. * UNLOCK();
  745. * while (expression) {
  746. * LOCK();
  747. * ...
  748. * UNLOCK();
  749. *
  750. * Unlocked part here...
  751. *
  752. * LOCK();
  753. * ...
  754. * UNLOCK();
  755. * }
  756. *
  757. * Note how if the loop continues we unlock and then immediately lock.
  758. * For N iterations of the loop, this code does 2N+1 locks and 2N+1
  759. * unlocks. Also note that the lock is not held when the while
  760. * condition is tested, which may or may not be important, depending
  761. * on the expression.
  762. *
  763. * As written:
  764. *
  765. * LOCK();
  766. * while (expression) {
  767. * ...
  768. * UNLOCK();
  769. *
  770. * Unlocked part here...
  771. *
  772. * LOCK();
  773. * ...
  774. * }
  775. * UNLOCK();
  776. *
  777. * For N iterations of the loop, this code does N+1 locks and N+1
  778. * unlocks. The while expression is always protected by the lock.
  779. */
  780. #ifndef USE_WORKER_THREADS
  781. ISC_LIST_INIT(ready_tasks);
  782. #endif
  783. LOCK(&manager->lock);
  784. while (!FINISHED(manager)) {
  785. #ifdef USE_WORKER_THREADS
  786. /*
  787. * For reasons similar to those given in the comment in
  788. * isc_task_send() above, it is safe for us to dequeue
  789. * the task while only holding the manager lock, and then
  790. * change the task to running state while only holding the
  791. * task lock.
  792. */
  793. while ((EMPTY(manager->ready_tasks) ||
  794. manager->exclusive_requested) &&
  795. !FINISHED(manager))
  796. {
  797. XTHREADTRACE(isc_msgcat_get(isc_msgcat,
  798. ISC_MSGSET_GENERAL,
  799. ISC_MSG_WAIT, "wait"));
  800. WAIT(&manager->work_available, &manager->lock);
  801. XTHREADTRACE(isc_msgcat_get(isc_msgcat,
  802. ISC_MSGSET_TASK,
  803. ISC_MSG_AWAKE, "awake"));
  804. }
  805. #else /* USE_WORKER_THREADS */
  806. if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
  807. EMPTY(manager->ready_tasks))
  808. break;
  809. #endif /* USE_WORKER_THREADS */
  810. XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
  811. ISC_MSG_WORKING, "working"));
  812. task = HEAD(manager->ready_tasks);
  813. if (task != NULL) {
  814. unsigned int dispatch_count = 0;
  815. isc_boolean_t done = ISC_FALSE;
  816. isc_boolean_t requeue = ISC_FALSE;
  817. isc_boolean_t finished = ISC_FALSE;
  818. isc_event_t *event;
  819. INSIST(VALID_TASK(task));
  820. /*
  821. * Note we only unlock the manager lock if we actually
  822. * have a task to do. We must reacquire the manager
  823. * lock before exiting the 'if (task != NULL)' block.
  824. */
  825. DEQUEUE(manager->ready_tasks, task, ready_link);
  826. manager->tasks_running++;
  827. UNLOCK(&manager->lock);
  828. LOCK(&task->lock);
  829. INSIST(task->state == task_state_ready);
  830. task->state = task_state_running;
  831. XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  832. ISC_MSG_RUNNING, "running"));
  833. isc_stdtime_get(&task->now);
  834. do {
  835. if (!EMPTY(task->events)) {
  836. event = HEAD(task->events);
  837. DEQUEUE(task->events, event, ev_link);
  838. /*
  839. * Execute the event action.
  840. */
  841. XTRACE(isc_msgcat_get(isc_msgcat,
  842. ISC_MSGSET_TASK,
  843. ISC_MSG_EXECUTE,
  844. "execute action"));
  845. if (event->ev_action != NULL) {
  846. UNLOCK(&task->lock);
  847. (event->ev_action)(
  848. (isc_task_t *)task,
  849. event);
  850. LOCK(&task->lock);
  851. }
  852. dispatch_count++;
  853. #ifndef USE_WORKER_THREADS
  854. total_dispatch_count++;
  855. #endif /* USE_WORKER_THREADS */
  856. }
  857. if (task->references == 0 &&
  858. EMPTY(task->events) &&
  859. !TASK_SHUTTINGDOWN(task)) {
  860. isc_boolean_t was_idle;
  861. /*
  862. * There are no references and no
  863. * pending events for this task,
  864. * which means it will not become
  865. * runnable again via an external
  866. * action (such as sending an event
  867. * or detaching).
  868. *
  869. * We initiate shutdown to prevent
  870. * it from becoming a zombie.
  871. *
  872. * We do this here instead of in
  873. * the "if EMPTY(task->events)" block
  874. * below because:
  875. *
  876. * If we post no shutdown events,
  877. * we want the task to finish.
  878. *
  879. * If we did post shutdown events,
  880. * will still want the task's
  881. * quantum to be applied.
  882. */
  883. was_idle = task_shutdown(task);
  884. INSIST(!was_idle);
  885. }
  886. if (EMPTY(task->events)) {
  887. /*
  888. * Nothing else to do for this task
  889. * right now.
  890. */
  891. XTRACE(isc_msgcat_get(isc_msgcat,
  892. ISC_MSGSET_TASK,
  893. ISC_MSG_EMPTY,
  894. "empty"));
  895. if (task->references == 0 &&
  896. TASK_SHUTTINGDOWN(task)) {
  897. /*
  898. * The task is done.
  899. */
  900. XTRACE(isc_msgcat_get(
  901. isc_msgcat,
  902. ISC_MSGSET_TASK,
  903. ISC_MSG_DONE,
  904. "done"));
  905. finished = ISC_TRUE;
  906. task->state = task_state_done;
  907. } else
  908. task->state = task_state_idle;
  909. done = ISC_TRUE;
  910. } else if (dispatch_count >= task->quantum) {
  911. /*
  912. * Our quantum has expired, but
  913. * there is more work to be done.
  914. * We'll requeue it to the ready
  915. * queue later.
  916. *
  917. * We don't check quantum until
  918. * dispatching at least one event,
  919. * so the minimum quantum is one.
  920. */
  921. XTRACE(isc_msgcat_get(isc_msgcat,
  922. ISC_MSGSET_TASK,
  923. ISC_MSG_QUANTUM,
  924. "quantum"));
  925. task->state = task_state_ready;
  926. requeue = ISC_TRUE;
  927. done = ISC_TRUE;
  928. }
  929. } while (!done);
  930. UNLOCK(&task->lock);
  931. if (finished)
  932. task_finished(task);
  933. LOCK(&manager->lock);
  934. manager->tasks_running--;
  935. #ifdef USE_WORKER_THREADS
  936. if (manager->exclusive_requested &&
  937. manager->tasks_running == 1) {
  938. SIGNAL(&manager->exclusive_granted);
  939. }
  940. #endif /* USE_WORKER_THREADS */
  941. if (requeue) {
  942. /*
  943. * We know we're awake, so we don't have
  944. * to wakeup any sleeping threads if the
  945. * ready queue is empty before we requeue.
  946. *
  947. * A possible optimization if the queue is
  948. * empty is to 'goto' the 'if (task != NULL)'
  949. * block, avoiding the ENQUEUE of the task
  950. * and the subsequent immediate DEQUEUE
  951. * (since it is the only executable task).
  952. * We don't do this because then we'd be
  953. * skipping the exit_requested check. The
  954. * cost of ENQUEUE is low anyway, especially
  955. * when you consider that we'd have to do
  956. * an extra EMPTY check to see if we could
  957. * do the optimization. If the ready queue
  958. * were usually nonempty, the 'optimization'
  959. * might even hurt rather than help.
  960. */
  961. #ifdef USE_WORKER_THREADS
  962. ENQUEUE(manager->ready_tasks, task,
  963. ready_link);
  964. #else
  965. ENQUEUE(ready_tasks, task, ready_link);
  966. #endif
  967. }
  968. }
  969. }
  970. #ifndef USE_WORKER_THREADS
  971. ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
  972. #endif
  973. UNLOCK(&manager->lock);
  974. }
  975. #ifdef USE_WORKER_THREADS
  976. static isc_threadresult_t
  977. #ifdef _WIN32
  978. WINAPI
  979. #endif
  980. run(void *uap) {
  981. isc__taskmgr_t *manager = uap;
  982. XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  983. ISC_MSG_STARTING, "starting"));
  984. dispatch(manager);
  985. XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  986. ISC_MSG_EXITING, "exiting"));
  987. #ifdef OPENSSL_LEAKS
  988. ERR_remove_state(0);
  989. #endif
  990. return ((isc_threadresult_t)0);
  991. }
  992. #endif /* USE_WORKER_THREADS */
  993. static void
  994. manager_free(isc__taskmgr_t *manager) {
  995. isc_mem_t *mctx;
  996. #ifdef USE_WORKER_THREADS
  997. (void)isc_condition_destroy(&manager->exclusive_granted);
  998. (void)isc_condition_destroy(&manager->work_available);
  999. isc_mem_free(manager->mctx, manager->threads);
  1000. #endif /* USE_WORKER_THREADS */
  1001. DESTROYLOCK(&manager->lock);
  1002. manager->common.impmagic = 0;
  1003. manager->common.magic = 0;
  1004. mctx = manager->mctx;
  1005. isc_mem_put(mctx, manager, sizeof(*manager));
  1006. isc_mem_detach(&mctx);
  1007. #ifdef USE_SHARED_MANAGER
  1008. taskmgr = NULL;
  1009. #endif /* USE_SHARED_MANAGER */
  1010. }
  1011. ISC_TASKFUNC_SCOPE isc_result_t
  1012. isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
  1013. unsigned int default_quantum, isc_taskmgr_t **managerp)
  1014. {
  1015. isc_result_t result;
  1016. unsigned int i, started = 0;
  1017. isc__taskmgr_t *manager;
  1018. /*
  1019. * Create a new task manager.
  1020. */
  1021. REQUIRE(workers > 0);
  1022. REQUIRE(managerp != NULL && *managerp == NULL);
  1023. #ifndef USE_WORKER_THREADS
  1024. UNUSED(i);
  1025. UNUSED(started);
  1026. #endif
  1027. #ifdef USE_SHARED_MANAGER
  1028. if (taskmgr != NULL) {
  1029. if (taskmgr->refs == 0)
  1030. return (ISC_R_SHUTTINGDOWN);
  1031. taskmgr->refs++;
  1032. *managerp = (isc_taskmgr_t *)taskmgr;
  1033. return (ISC_R_SUCCESS);
  1034. }
  1035. #endif /* USE_SHARED_MANAGER */
  1036. manager = isc_mem_get(mctx, sizeof(*manager));
  1037. if (manager == NULL)
  1038. return (ISC_R_NOMEMORY);
  1039. manager->common.methods = &taskmgrmethods;
  1040. manager->common.impmagic = TASK_MANAGER_MAGIC;
  1041. manager->common.magic = ISCAPI_TASKMGR_MAGIC;
  1042. manager->mctx = NULL;
  1043. result = isc_mutex_init(&manager->lock);
  1044. if (result != ISC_R_SUCCESS)
  1045. goto cleanup_mgr;
  1046. #ifdef USE_WORKER_THREADS
  1047. manager->workers = 0;
  1048. manager->threads = isc_mem_allocate(mctx,
  1049. workers * sizeof(isc_thread_t));
  1050. if (manager->threads == NULL) {
  1051. result = ISC_R_NOMEMORY;
  1052. goto cleanup_lock;
  1053. }
  1054. if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
  1055. UNEXPECTED_ERROR(__FILE__, __LINE__,
  1056. "isc_condition_init() %s",
  1057. isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  1058. ISC_MSG_FAILED, "failed"));
  1059. result = ISC_R_UNEXPECTED;
  1060. goto cleanup_threads;
  1061. }
  1062. if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
  1063. UNEXPECTED_ERROR(__FILE__, __LINE__,
  1064. "isc_condition_init() %s",
  1065. isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
  1066. ISC_MSG_FAILED, "failed"));
  1067. result = ISC_R_UNEXPECTED;
  1068. goto cleanup_workavailable;
  1069. }
  1070. #endif /* USE_WORKER_THREADS */
  1071. if (default_quantum == 0)
  1072. default_quantum = DEFAULT_DEFAULT_QUANTUM;
  1073. manager->default_quantum = default_quantum;
  1074. INIT_LIST(manager->tasks);
  1075. INIT_LIST(manager->ready_tasks);
  1076. manager->tasks_running = 0;
  1077. manager->exclusive_requested = ISC_FALSE;
  1078. manager->exiting = ISC_FALSE;
  1079. isc_mem_attach(mctx, &manager->mctx);
  1080. #ifdef USE_WORKER_THREADS
  1081. LOCK(&manager->lock);
  1082. /*
  1083. * Start workers.
  1084. */
  1085. for (i = 0; i < workers; i++) {
  1086. if (isc_thread_create(run, manager,
  1087. &manager->threads[manager->workers]) ==
  1088. ISC_R_SUCCESS) {
  1089. manager->workers++;
  1090. started++;
  1091. }
  1092. }
  1093. UNLOCK(&manager->lock);
  1094. if (started == 0) {
  1095. manager_free(manager);
  1096. return (ISC_R_NOTHREADS);
  1097. }
  1098. isc_thread_setconcurrency(workers);
  1099. #endif /* USE_WORKER_THREADS */
  1100. #ifdef USE_SHARED_MANAGER
  1101. manager->refs = 1;
  1102. taskmgr = manager;
  1103. #endif /* USE_SHARED_MANAGER */
  1104. *managerp = (isc_taskmgr_t *)manager;
  1105. return (ISC_R_SUCCESS);
  1106. #ifdef USE_WORKER_THREADS
  1107. cleanup_workavailable:
  1108. (void)isc_condition_destroy(&manager->work_available);
  1109. cleanup_threads:
  1110. isc_mem_free(mctx, manager->threads);
  1111. cleanup_lock:
  1112. DESTROYLOCK(&manager->lock);
  1113. #endif
  1114. cleanup_mgr:
  1115. isc_mem_put(mctx, manager, sizeof(*manager));
  1116. return (result);
  1117. }
  1118. ISC_TASKFUNC_SCOPE void
  1119. isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
  1120. isc__taskmgr_t *manager;
  1121. isc__task_t *task;
  1122. unsigned int i;
  1123. /*
  1124. * Destroy '*managerp'.
  1125. */
  1126. REQUIRE(managerp != NULL);
  1127. manager = (isc__taskmgr_t *)*managerp;
  1128. REQUIRE(VALID_MANAGER(manager));
  1129. #ifndef USE_WORKER_THREADS
  1130. UNUSED(i);
  1131. #endif /* USE_WORKER_THREADS */
  1132. #ifdef USE_SHARED_MANAGER
  1133. manager->refs--;
  1134. if (manager->refs > 0) {
  1135. *managerp = NULL;
  1136. return;
  1137. }
  1138. #endif
  1139. XTHREADTRACE("isc_taskmgr_destroy");
  1140. /*
  1141. * Only one non-worker thread may ever call this routine.
  1142. * If a worker thread wants to initiate shutdown of the
  1143. * task manager, it should ask some non-worker thread to call
  1144. * isc_taskmgr_destroy(), e.g. by signalling a condition variable
  1145. * that the startup thread is sleeping on.
  1146. */
  1147. /*
  1148. * Unlike elsewhere, we're going to hold this lock a long time.
  1149. * We need to do so, because otherwise the list of tasks could
  1150. * change while we were traversing it.
  1151. *
  1152. * This is also the only function where we will hold both the
  1153. * task manager lock and a task lock at the same time.
  1154. */
  1155. LOCK(&manager->lock);
  1156. /*
  1157. * Make sure we only get called once.
  1158. */
  1159. INSIST(!manager->exiting);
  1160. manager->exiting = ISC_TRUE;
  1161. /*
  1162. * Post shutdown event(s) to every task (if they haven't already been
  1163. * posted).
  1164. */
  1165. for (task = HEAD(manager->tasks);
  1166. task != NULL;
  1167. task = NEXT(task, link)) {
  1168. LOCK(&task->lock);
  1169. if (task_shutdown(task))
  1170. ENQUEUE(manager->ready_tasks, task, ready_link);
  1171. UNLOCK(&task->lock);
  1172. }
  1173. #ifdef USE_WORKER_THREADS
  1174. /*
  1175. * Wake up any sleeping workers. This ensures we get work done if
  1176. * there's work left to do, and if there are already no tasks left
  1177. * it will cause the workers to see manager->exiting.
  1178. */
  1179. BROADCAST(&manager->work_available);
  1180. UNLOCK(&manager->lock);
  1181. /*
  1182. * Wait for all the worker threads to exit.
  1183. */
  1184. for (i = 0; i < manager->workers; i++)
  1185. (void)isc_thread_join(manager->threads[i], NULL);
  1186. #else /* USE_WORKER_THREADS */
  1187. /*
  1188. * Dispatch the shutdown events.
  1189. */
  1190. UNLOCK(&manager->lock);
  1191. while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
  1192. (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
  1193. #ifdef BIND9
  1194. if (!ISC_LIST_EMPTY(manager->tasks))
  1195. isc_mem_printallactive(stderr);
  1196. #endif
  1197. INSIST(ISC_LIST_EMPTY(manager->tasks));
  1198. #ifdef USE_SHARED_MANAGER
  1199. taskmgr = NULL;
  1200. #endif
  1201. #endif /* USE_WORKER_THREADS */
  1202. manager_free(manager);
  1203. *managerp = NULL;
  1204. }
  1205. #ifndef USE_WORKER_THREADS
  1206. isc_boolean_t
  1207. isc__taskmgr_ready(isc_taskmgr_t *manager0) {
  1208. isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
  1209. #ifdef USE_SHARED_MANAGER
  1210. if (manager == NULL)
  1211. manager = taskmgr;
  1212. #endif
  1213. if (manager == NULL)
  1214. return (ISC_FALSE);
  1215. return (ISC_TF(!ISC_LIST_EMPTY(manager->ready_tasks)));
  1216. }
  1217. isc_result_t
  1218. isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
  1219. isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
  1220. #ifdef USE_SHARED_MANAGER
  1221. if (manager == NULL)
  1222. manager = taskmgr;
  1223. #endif
  1224. if (manager == NULL)
  1225. return (ISC_R_NOTFOUND);
  1226. dispatch(manager);
  1227. return (ISC_R_SUCCESS);
  1228. }
  1229. #endif /* USE_WORKER_THREADS */
  1230. ISC_TASKFUNC_SCOPE isc_result_t
  1231. isc__task_beginexclusive(isc_task_t *task0) {
  1232. #ifdef USE_WORKER_THREADS
  1233. isc__task_t *task = (isc__task_t *)task0;
  1234. isc__taskmgr_t *manager = task->manager;
  1235. REQUIRE(task->state == task_state_running);
  1236. LOCK(&manager->lock);
  1237. if (manager->exclusive_requested) {
  1238. UNLOCK(&manager->lock);
  1239. return (ISC_R_LOCKBUSY);
  1240. }
  1241. manager->exclusive_requested = ISC_TRUE;
  1242. while (manager->tasks_running > 1) {
  1243. WAIT(&manager->exclusive_granted, &manager->lock);
  1244. }
  1245. UNLOCK(&manager->lock);
  1246. #else
  1247. UNUSED(task0);
  1248. #endif
  1249. return (ISC_R_SUCCESS);
  1250. }
  1251. ISC_TASKFUNC_SCOPE void
  1252. isc__task_endexclusive(isc_task_t *task0) {
  1253. #ifdef USE_WORKER_THREADS
  1254. isc__task_t *task = (isc__task_t *)task0;
  1255. isc__taskmgr_t *manager = task->manager;
  1256. REQUIRE(task->state == task_state_running);
  1257. LOCK(&manager->lock);
  1258. REQUIRE(manager->exclusive_requested);
  1259. manager->exclusive_requested = ISC_FALSE;
  1260. BROADCAST(&manager->work_available);
  1261. UNLOCK(&manager->lock);
  1262. #else
  1263. UNUSED(task0);
  1264. #endif
  1265. }
  1266. #ifdef USE_SOCKETIMPREGISTER
  1267. isc_result_t
  1268. isc__task_register() {
  1269. return (isc_task_register(isc__taskmgr_create));
  1270. }
  1271. #endif
  1272. isc_boolean_t
  1273. isc_task_exiting(isc_task_t *t) {
  1274. isc__task_t *task = (isc__task_t *)t;
  1275. REQUIRE(VALID_TASK(task));
  1276. return (TASK_SHUTTINGDOWN(task));
  1277. }
  1278. #if defined(HAVE_LIBXML2) && defined(BIND9)
  1279. void
  1280. isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
  1281. isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
  1282. isc__task_t *task;
  1283. LOCK(&mgr->lock);
  1284. /*
  1285. * Write out the thread-model, and some details about each depending
  1286. * on which type is enabled.
  1287. */
  1288. xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
  1289. #ifdef ISC_PLATFORM_USETHREADS
  1290. xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
  1291. xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
  1292. xmlTextWriterEndElement(writer); /* type */
  1293. xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
  1294. xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
  1295. xmlTextWriterEndElement(writer); /* worker-threads */
  1296. #else /* ISC_PLATFORM_USETHREADS */
  1297. xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
  1298. xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
  1299. xmlTextWriterEndElement(writer); /* type */
  1300. xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
  1301. xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
  1302. xmlTextWriterEndElement(writer); /* references */
  1303. #endif /* ISC_PLATFORM_USETHREADS */
  1304. xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
  1305. xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
  1306. xmlTextWriterEndElement(writer); /* default-quantum */
  1307. xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
  1308. xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
  1309. xmlTextWriterEndElement(writer); /* tasks-running */
  1310. xmlTextWriterEndElement(writer); /* thread-model */
  1311. xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
  1312. task = ISC_LIST_HEAD(mgr->tasks);
  1313. while (task != NULL) {
  1314. LOCK(&task->lock);
  1315. xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
  1316. if (task->name[0] != 0) {
  1317. xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
  1318. xmlTextWriterWriteFormatString(writer, "%s",
  1319. task->name);
  1320. xmlTextWriterEndElement(writer); /* name */
  1321. }
  1322. xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
  1323. xmlTextWriterWriteFormatString(writer, "%d", task->references);
  1324. xmlTextWriterEndElement(writer); /* references */
  1325. xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
  1326. xmlTextWriterWriteFormatString(writer, "%p", task);
  1327. xmlTextWriterEndElement(writer); /* id */
  1328. xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
  1329. xmlTextWriterWriteFormatString(writer, "%s",
  1330. statenames[task->state]);
  1331. xmlTextWriterEndElement(writer); /* state */
  1332. xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
  1333. xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
  1334. xmlTextWriterEndElement(writer); /* quantum */
  1335. xmlTextWriterEndElement(writer);
  1336. UNLOCK(&task->lock);
  1337. task = ISC_LIST_NEXT(task, link);
  1338. }
  1339. xmlTextWriterEndElement(writer); /* tasks */
  1340. UNLOCK(&mgr->lock);
  1341. }
  1342. #endif /* HAVE_LIBXML2 && BIND9 */