PageRenderTime 94ms CodeModel.GetById 22ms app.highlight 61ms RepoModel.GetById 1ms app.codeStats 1ms

/contrib/bind9/lib/isc/task.c

https://bitbucket.org/freebsd/freebsd-head/
C | 1578 lines | 1005 code | 236 blank | 337 comment | 144 complexity | 3cac71d8f1789171d51e47337a728d82 MD5 | raw file
   1/*
   2 * Copyright (C) 2004-2012  Internet Systems Consortium, Inc. ("ISC")
   3 * Copyright (C) 1998-2003  Internet Software Consortium.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
  10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
  11 * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
  12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
  14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  15 * PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18/* $Id$ */
  19
  20/*! \file
  21 * \author Principal Author: Bob Halley
  22 */
  23
  24/*
  25 * XXXRTH  Need to document the states a task can be in, and the rules
  26 * for changing states.
  27 */
  28
  29#include <config.h>
  30
  31#include <isc/condition.h>
  32#include <isc/event.h>
  33#include <isc/magic.h>
  34#include <isc/mem.h>
  35#include <isc/msgs.h>
  36#include <isc/platform.h>
  37#include <isc/string.h>
  38#include <isc/task.h>
  39#include <isc/thread.h>
  40#include <isc/util.h>
  41#include <isc/xml.h>
  42
  43#ifdef OPENSSL_LEAKS
  44#include <openssl/err.h>
  45#endif
  46
  47/*%
  48 * For BIND9 internal applications:
  49 * when built with threads we use multiple worker threads shared by the whole
  50 * application.
  51 * when built without threads we share a single global task manager and use
  52 * an integrated event loop for socket, timer, and other generic task events.
  53 * For generic library:
  54 * we don't use either of them: an application can have multiple task managers
  55 * whether or not it's threaded, and if the application is threaded each thread
  56 * is expected to have a separate manager; no "worker threads" are shared by
  57 * the application threads.
  58 */
  59#ifdef BIND9
  60#ifdef ISC_PLATFORM_USETHREADS
  61#define USE_WORKER_THREADS
  62#else
  63#define USE_SHARED_MANAGER
  64#endif	/* ISC_PLATFORM_USETHREADS */
  65#endif	/* BIND9 */
  66
  67#ifndef USE_WORKER_THREADS
  68#include "task_p.h"
  69#endif /* USE_WORKER_THREADS */
  70
  71#ifdef ISC_TASK_TRACE
  72#define XTRACE(m)		fprintf(stderr, "task %p thread %lu: %s\n", \
  73				       task, isc_thread_self(), (m))
  74#define XTTRACE(t, m)		fprintf(stderr, "task %p thread %lu: %s\n", \
  75				       (t), isc_thread_self(), (m))
  76#define XTHREADTRACE(m)		fprintf(stderr, "thread %lu: %s\n", \
  77				       isc_thread_self(), (m))
  78#else
  79#define XTRACE(m)
  80#define XTTRACE(t, m)
  81#define XTHREADTRACE(m)
  82#endif
  83
  84/***
  85 *** Types.
  86 ***/
  87
  88typedef enum {
  89	task_state_idle, task_state_ready, task_state_running,
  90	task_state_done
  91} task_state_t;
  92
  93#if defined(HAVE_LIBXML2) && defined(BIND9)
  94static const char *statenames[] = {
  95	"idle", "ready", "running", "done",
  96};
  97#endif
  98
  99#define TASK_MAGIC			ISC_MAGIC('T', 'A', 'S', 'K')
 100#define VALID_TASK(t)			ISC_MAGIC_VALID(t, TASK_MAGIC)
 101
 102typedef struct isc__task isc__task_t;
 103typedef struct isc__taskmgr isc__taskmgr_t;
 104
 105struct isc__task {
 106	/* Not locked. */
 107	isc_task_t			common;
 108	isc__taskmgr_t *		manager;
 109	isc_mutex_t			lock;
 110	/* Locked by task lock. */
 111	task_state_t			state;
 112	unsigned int			references;
 113	isc_eventlist_t			events;
 114	isc_eventlist_t			on_shutdown;
 115	unsigned int			quantum;
 116	unsigned int			flags;
 117	isc_stdtime_t			now;
 118	char				name[16];
 119	void *				tag;
 120	/* Locked by task manager lock. */
 121	LINK(isc__task_t)		link;
 122	LINK(isc__task_t)		ready_link;
 123};
 124
 125#define TASK_F_SHUTTINGDOWN		0x01
 126
 127#define TASK_SHUTTINGDOWN(t)		(((t)->flags & TASK_F_SHUTTINGDOWN) \
 128					 != 0)
 129
 130#define TASK_MANAGER_MAGIC		ISC_MAGIC('T', 'S', 'K', 'M')
 131#define VALID_MANAGER(m)		ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
 132
 133typedef ISC_LIST(isc__task_t)	isc__tasklist_t;
 134
 135struct isc__taskmgr {
 136	/* Not locked. */
 137	isc_taskmgr_t			common;
 138	isc_mem_t *			mctx;
 139	isc_mutex_t			lock;
 140#ifdef ISC_PLATFORM_USETHREADS
 141	unsigned int			workers;
 142	isc_thread_t *			threads;
 143#endif /* ISC_PLATFORM_USETHREADS */
 144	/* Locked by task manager lock. */
 145	unsigned int			default_quantum;
 146	LIST(isc__task_t)		tasks;
 147	isc__tasklist_t			ready_tasks;
 148#ifdef ISC_PLATFORM_USETHREADS
 149	isc_condition_t			work_available;
 150	isc_condition_t			exclusive_granted;
 151#endif /* ISC_PLATFORM_USETHREADS */
 152	unsigned int			tasks_running;
 153	isc_boolean_t			exclusive_requested;
 154	isc_boolean_t			exiting;
 155#ifdef USE_SHARED_MANAGER
 156	unsigned int			refs;
 157#endif /* ISC_PLATFORM_USETHREADS */
 158};
 159
 160#define DEFAULT_TASKMGR_QUANTUM		10
 161#define DEFAULT_DEFAULT_QUANTUM		5
 162#define FINISHED(m)			((m)->exiting && EMPTY((m)->tasks))
 163
 164#ifdef USE_SHARED_MANAGER
 165static isc__taskmgr_t *taskmgr = NULL;
 166#endif /* USE_SHARED_MANAGER */
 167
 168/*%
 169 * The following can be either static or public, depending on build environment.
 170 */
 171
 172#ifdef BIND9
 173#define ISC_TASKFUNC_SCOPE
 174#else
 175#define ISC_TASKFUNC_SCOPE static
 176#endif
 177
 178ISC_TASKFUNC_SCOPE isc_result_t
 179isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
 180		 isc_task_t **taskp);
 181ISC_TASKFUNC_SCOPE void
 182isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
 183ISC_TASKFUNC_SCOPE void
 184isc__task_detach(isc_task_t **taskp);
 185ISC_TASKFUNC_SCOPE void
 186isc__task_send(isc_task_t *task0, isc_event_t **eventp);
 187ISC_TASKFUNC_SCOPE void
 188isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
 189ISC_TASKFUNC_SCOPE unsigned int
 190isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
 191		     isc_eventtype_t last, void *tag);
 192ISC_TASKFUNC_SCOPE unsigned int
 193isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
 194		void *tag);
 195ISC_TASKFUNC_SCOPE isc_boolean_t
 196isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
 197ISC_TASKFUNC_SCOPE unsigned int
 198isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
 199		      isc_eventtype_t last, void *tag,
 200		      isc_eventlist_t *events);
 201ISC_TASKFUNC_SCOPE unsigned int
 202isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
 203		 void *tag, isc_eventlist_t *events);
 204ISC_TASKFUNC_SCOPE isc_result_t
 205isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
 206		     const void *arg);
 207ISC_TASKFUNC_SCOPE void
 208isc__task_shutdown(isc_task_t *task0);
 209ISC_TASKFUNC_SCOPE void
 210isc__task_destroy(isc_task_t **taskp);
 211ISC_TASKFUNC_SCOPE void
 212isc__task_setname(isc_task_t *task0, const char *name, void *tag);
 213ISC_TASKFUNC_SCOPE const char *
 214isc__task_getname(isc_task_t *task0);
 215ISC_TASKFUNC_SCOPE void *
 216isc__task_gettag(isc_task_t *task0);
 217ISC_TASKFUNC_SCOPE void
 218isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
 219ISC_TASKFUNC_SCOPE isc_result_t
 220isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
 221		    unsigned int default_quantum, isc_taskmgr_t **managerp);
 222ISC_TASKFUNC_SCOPE void
 223isc__taskmgr_destroy(isc_taskmgr_t **managerp);
 224ISC_TASKFUNC_SCOPE isc_result_t
 225isc__task_beginexclusive(isc_task_t *task);
 226ISC_TASKFUNC_SCOPE void
 227isc__task_endexclusive(isc_task_t *task0);
 228
 229static struct isc__taskmethods {
 230	isc_taskmethods_t methods;
 231
 232	/*%
 233	 * The following are defined just for avoiding unused static functions.
 234	 */
 235#ifndef BIND9
 236	void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
 237#endif
 238} taskmethods = {
 239	{
 240		isc__task_attach,
 241		isc__task_detach,
 242		isc__task_destroy,
 243		isc__task_send,
 244		isc__task_sendanddetach,
 245		isc__task_unsend,
 246		isc__task_onshutdown,
 247		isc__task_shutdown,
 248		isc__task_setname,
 249		isc__task_purge,
 250		isc__task_purgerange,
 251		isc__task_beginexclusive,
 252		isc__task_endexclusive
 253	}
 254#ifndef BIND9
 255	,
 256	(void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
 257	(void *)isc__task_getname, (void *)isc__task_gettag,
 258	(void *)isc__task_getcurrenttime
 259#endif
 260};
 261
 262static isc_taskmgrmethods_t taskmgrmethods = {
 263	isc__taskmgr_destroy,
 264	isc__task_create
 265};
 266
 267/***
 268 *** Tasks.
 269 ***/
 270
 271static void
 272task_finished(isc__task_t *task) {
 273	isc__taskmgr_t *manager = task->manager;
 274
 275	REQUIRE(EMPTY(task->events));
 276	REQUIRE(EMPTY(task->on_shutdown));
 277	REQUIRE(task->references == 0);
 278	REQUIRE(task->state == task_state_done);
 279
 280	XTRACE("task_finished");
 281
 282	LOCK(&manager->lock);
 283	UNLINK(manager->tasks, task, link);
 284#ifdef USE_WORKER_THREADS
 285	if (FINISHED(manager)) {
 286		/*
 287		 * All tasks have completed and the
 288		 * task manager is exiting.  Wake up
 289		 * any idle worker threads so they
 290		 * can exit.
 291		 */
 292		BROADCAST(&manager->work_available);
 293	}
 294#endif /* USE_WORKER_THREADS */
 295	UNLOCK(&manager->lock);
 296
 297	DESTROYLOCK(&task->lock);
 298	task->common.impmagic = 0;
 299	task->common.magic = 0;
 300	isc_mem_put(manager->mctx, task, sizeof(*task));
 301}
 302
 303ISC_TASKFUNC_SCOPE isc_result_t
 304isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
 305		 isc_task_t **taskp)
 306{
 307	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
 308	isc__task_t *task;
 309	isc_boolean_t exiting;
 310	isc_result_t result;
 311
 312	REQUIRE(VALID_MANAGER(manager));
 313	REQUIRE(taskp != NULL && *taskp == NULL);
 314
 315	task = isc_mem_get(manager->mctx, sizeof(*task));
 316	if (task == NULL)
 317		return (ISC_R_NOMEMORY);
 318	XTRACE("isc_task_create");
 319	task->manager = manager;
 320	result = isc_mutex_init(&task->lock);
 321	if (result != ISC_R_SUCCESS) {
 322		isc_mem_put(manager->mctx, task, sizeof(*task));
 323		return (result);
 324	}
 325	task->state = task_state_idle;
 326	task->references = 1;
 327	INIT_LIST(task->events);
 328	INIT_LIST(task->on_shutdown);
 329	task->quantum = quantum;
 330	task->flags = 0;
 331	task->now = 0;
 332	memset(task->name, 0, sizeof(task->name));
 333	task->tag = NULL;
 334	INIT_LINK(task, link);
 335	INIT_LINK(task, ready_link);
 336
 337	exiting = ISC_FALSE;
 338	LOCK(&manager->lock);
 339	if (!manager->exiting) {
 340		if (task->quantum == 0)
 341			task->quantum = manager->default_quantum;
 342		APPEND(manager->tasks, task, link);
 343	} else
 344		exiting = ISC_TRUE;
 345	UNLOCK(&manager->lock);
 346
 347	if (exiting) {
 348		DESTROYLOCK(&task->lock);
 349		isc_mem_put(manager->mctx, task, sizeof(*task));
 350		return (ISC_R_SHUTTINGDOWN);
 351	}
 352
 353	task->common.methods = (isc_taskmethods_t *)&taskmethods;
 354	task->common.magic = ISCAPI_TASK_MAGIC;
 355	task->common.impmagic = TASK_MAGIC;
 356	*taskp = (isc_task_t *)task;
 357
 358	return (ISC_R_SUCCESS);
 359}
 360
 361ISC_TASKFUNC_SCOPE void
 362isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
 363	isc__task_t *source = (isc__task_t *)source0;
 364
 365	/*
 366	 * Attach *targetp to source.
 367	 */
 368
 369	REQUIRE(VALID_TASK(source));
 370	REQUIRE(targetp != NULL && *targetp == NULL);
 371
 372	XTTRACE(source, "isc_task_attach");
 373
 374	LOCK(&source->lock);
 375	source->references++;
 376	UNLOCK(&source->lock);
 377
 378	*targetp = (isc_task_t *)source;
 379}
 380
 381static inline isc_boolean_t
 382task_shutdown(isc__task_t *task) {
 383	isc_boolean_t was_idle = ISC_FALSE;
 384	isc_event_t *event, *prev;
 385
 386	/*
 387	 * Caller must be holding the task's lock.
 388	 */
 389
 390	XTRACE("task_shutdown");
 391
 392	if (! TASK_SHUTTINGDOWN(task)) {
 393		XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
 394				      ISC_MSG_SHUTTINGDOWN, "shutting down"));
 395		task->flags |= TASK_F_SHUTTINGDOWN;
 396		if (task->state == task_state_idle) {
 397			INSIST(EMPTY(task->events));
 398			task->state = task_state_ready;
 399			was_idle = ISC_TRUE;
 400		}
 401		INSIST(task->state == task_state_ready ||
 402		       task->state == task_state_running);
 403		/*
 404		 * Note that we post shutdown events LIFO.
 405		 */
 406		for (event = TAIL(task->on_shutdown);
 407		     event != NULL;
 408		     event = prev) {
 409			prev = PREV(event, ev_link);
 410			DEQUEUE(task->on_shutdown, event, ev_link);
 411			ENQUEUE(task->events, event, ev_link);
 412		}
 413	}
 414
 415	return (was_idle);
 416}
 417
 418static inline void
 419task_ready(isc__task_t *task) {
 420	isc__taskmgr_t *manager = task->manager;
 421
 422	REQUIRE(VALID_MANAGER(manager));
 423	REQUIRE(task->state == task_state_ready);
 424
 425	XTRACE("task_ready");
 426
 427	LOCK(&manager->lock);
 428
 429	ENQUEUE(manager->ready_tasks, task, ready_link);
 430#ifdef USE_WORKER_THREADS
 431	SIGNAL(&manager->work_available);
 432#endif /* USE_WORKER_THREADS */
 433
 434	UNLOCK(&manager->lock);
 435}
 436
 437static inline isc_boolean_t
 438task_detach(isc__task_t *task) {
 439
 440	/*
 441	 * Caller must be holding the task lock.
 442	 */
 443
 444	REQUIRE(task->references > 0);
 445
 446	XTRACE("detach");
 447
 448	task->references--;
 449	if (task->references == 0 && task->state == task_state_idle) {
 450		INSIST(EMPTY(task->events));
 451		/*
 452		 * There are no references to this task, and no
 453		 * pending events.  We could try to optimize and
 454		 * either initiate shutdown or clean up the task,
 455		 * depending on its state, but it's easier to just
 456		 * make the task ready and allow run() or the event
 457		 * loop to deal with shutting down and termination.
 458		 */
 459		task->state = task_state_ready;
 460		return (ISC_TRUE);
 461	}
 462
 463	return (ISC_FALSE);
 464}
 465
 466ISC_TASKFUNC_SCOPE void
 467isc__task_detach(isc_task_t **taskp) {
 468	isc__task_t *task;
 469	isc_boolean_t was_idle;
 470
 471	/*
 472	 * Detach *taskp from its task.
 473	 */
 474
 475	REQUIRE(taskp != NULL);
 476	task = (isc__task_t *)*taskp;
 477	REQUIRE(VALID_TASK(task));
 478
 479	XTRACE("isc_task_detach");
 480
 481	LOCK(&task->lock);
 482	was_idle = task_detach(task);
 483	UNLOCK(&task->lock);
 484
 485	if (was_idle)
 486		task_ready(task);
 487
 488	*taskp = NULL;
 489}
 490
 491static inline isc_boolean_t
 492task_send(isc__task_t *task, isc_event_t **eventp) {
 493	isc_boolean_t was_idle = ISC_FALSE;
 494	isc_event_t *event;
 495
 496	/*
 497	 * Caller must be holding the task lock.
 498	 */
 499
 500	REQUIRE(eventp != NULL);
 501	event = *eventp;
 502	REQUIRE(event != NULL);
 503	REQUIRE(event->ev_type > 0);
 504	REQUIRE(task->state != task_state_done);
 505
 506	XTRACE("task_send");
 507
 508	if (task->state == task_state_idle) {
 509		was_idle = ISC_TRUE;
 510		INSIST(EMPTY(task->events));
 511		task->state = task_state_ready;
 512	}
 513	INSIST(task->state == task_state_ready ||
 514	       task->state == task_state_running);
 515	ENQUEUE(task->events, event, ev_link);
 516	*eventp = NULL;
 517
 518	return (was_idle);
 519}
 520
 521ISC_TASKFUNC_SCOPE void
 522isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
 523	isc__task_t *task = (isc__task_t *)task0;
 524	isc_boolean_t was_idle;
 525
 526	/*
 527	 * Send '*event' to 'task'.
 528	 */
 529
 530	REQUIRE(VALID_TASK(task));
 531
 532	XTRACE("isc_task_send");
 533
 534	/*
 535	 * We're trying hard to hold locks for as short a time as possible.
 536	 * We're also trying to hold as few locks as possible.  This is why
 537	 * some processing is deferred until after the lock is released.
 538	 */
 539	LOCK(&task->lock);
 540	was_idle = task_send(task, eventp);
 541	UNLOCK(&task->lock);
 542
 543	if (was_idle) {
 544		/*
 545		 * We need to add this task to the ready queue.
 546		 *
 547		 * We've waited until now to do it because making a task
 548		 * ready requires locking the manager.  If we tried to do
 549		 * this while holding the task lock, we could deadlock.
 550		 *
 551		 * We've changed the state to ready, so no one else will
 552		 * be trying to add this task to the ready queue.  The
 553		 * only way to leave the ready state is by executing the
 554		 * task.  It thus doesn't matter if events are added,
 555		 * removed, or a shutdown is started in the interval
 556		 * between the time we released the task lock, and the time
 557		 * we add the task to the ready queue.
 558		 */
 559		task_ready(task);
 560	}
 561}
 562
 563ISC_TASKFUNC_SCOPE void
 564isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
 565	isc_boolean_t idle1, idle2;
 566	isc__task_t *task;
 567
 568	/*
 569	 * Send '*event' to '*taskp' and then detach '*taskp' from its
 570	 * task.
 571	 */
 572
 573	REQUIRE(taskp != NULL);
 574	task = (isc__task_t *)*taskp;
 575	REQUIRE(VALID_TASK(task));
 576
 577	XTRACE("isc_task_sendanddetach");
 578
 579	LOCK(&task->lock);
 580	idle1 = task_send(task, eventp);
 581	idle2 = task_detach(task);
 582	UNLOCK(&task->lock);
 583
 584	/*
 585	 * If idle1, then idle2 shouldn't be true as well since we're holding
 586	 * the task lock, and thus the task cannot switch from ready back to
 587	 * idle.
 588	 */
 589	INSIST(!(idle1 && idle2));
 590
 591	if (idle1 || idle2)
 592		task_ready(task);
 593
 594	*taskp = NULL;
 595}
 596
 597#define PURGE_OK(event)	(((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
 598
 599static unsigned int
 600dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
 601	       isc_eventtype_t last, void *tag,
 602	       isc_eventlist_t *events, isc_boolean_t purging)
 603{
 604	isc_event_t *event, *next_event;
 605	unsigned int count = 0;
 606
 607	REQUIRE(VALID_TASK(task));
 608	REQUIRE(last >= first);
 609
 610	XTRACE("dequeue_events");
 611
 612	/*
 613	 * Events matching 'sender', whose type is >= first and <= last, and
 614	 * whose tag is 'tag' will be dequeued.  If 'purging', matching events
 615	 * which are marked as unpurgable will not be dequeued.
 616	 *
 617	 * sender == NULL means "any sender", and tag == NULL means "any tag".
 618	 */
 619
 620	LOCK(&task->lock);
 621
 622	for (event = HEAD(task->events); event != NULL; event = next_event) {
 623		next_event = NEXT(event, ev_link);
 624		if (event->ev_type >= first && event->ev_type <= last &&
 625		    (sender == NULL || event->ev_sender == sender) &&
 626		    (tag == NULL || event->ev_tag == tag) &&
 627		    (!purging || PURGE_OK(event))) {
 628			DEQUEUE(task->events, event, ev_link);
 629			ENQUEUE(*events, event, ev_link);
 630			count++;
 631		}
 632	}
 633
 634	UNLOCK(&task->lock);
 635
 636	return (count);
 637}
 638
 639ISC_TASKFUNC_SCOPE unsigned int
 640isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
 641		     isc_eventtype_t last, void *tag)
 642{
 643	isc__task_t *task = (isc__task_t *)task0;
 644	unsigned int count;
 645	isc_eventlist_t events;
 646	isc_event_t *event, *next_event;
 647
 648	/*
 649	 * Purge events from a task's event queue.
 650	 */
 651
 652	XTRACE("isc_task_purgerange");
 653
 654	ISC_LIST_INIT(events);
 655
 656	count = dequeue_events(task, sender, first, last, tag, &events,
 657			       ISC_TRUE);
 658
 659	for (event = HEAD(events); event != NULL; event = next_event) {
 660		next_event = NEXT(event, ev_link);
 661		isc_event_free(&event);
 662	}
 663
 664	/*
 665	 * Note that purging never changes the state of the task.
 666	 */
 667
 668	return (count);
 669}
 670
 671ISC_TASKFUNC_SCOPE unsigned int
 672isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
 673		void *tag)
 674{
 675	/*
 676	 * Purge events from a task's event queue.
 677	 */
 678
 679	XTRACE("isc_task_purge");
 680
 681	return (isc__task_purgerange(task, sender, type, type, tag));
 682}
 683
 684ISC_TASKFUNC_SCOPE isc_boolean_t
 685isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
 686	isc__task_t *task = (isc__task_t *)task0;
 687	isc_event_t *curr_event, *next_event;
 688
 689	/*
 690	 * Purge 'event' from a task's event queue.
 691	 *
 692	 * XXXRTH:  WARNING:  This method may be removed before beta.
 693	 */
 694
 695	REQUIRE(VALID_TASK(task));
 696
 697	/*
 698	 * If 'event' is on the task's event queue, it will be purged,
 699	 * unless it is marked as unpurgeable.  'event' does not have to be
 700	 * on the task's event queue; in fact, it can even be an invalid
 701	 * pointer.  Purging only occurs if the event is actually on the task's
 702	 * event queue.
 703	 *
 704	 * Purging never changes the state of the task.
 705	 */
 706
 707	LOCK(&task->lock);
 708	for (curr_event = HEAD(task->events);
 709	     curr_event != NULL;
 710	     curr_event = next_event) {
 711		next_event = NEXT(curr_event, ev_link);
 712		if (curr_event == event && PURGE_OK(event)) {
 713			DEQUEUE(task->events, curr_event, ev_link);
 714			break;
 715		}
 716	}
 717	UNLOCK(&task->lock);
 718
 719	if (curr_event == NULL)
 720		return (ISC_FALSE);
 721
 722	isc_event_free(&curr_event);
 723
 724	return (ISC_TRUE);
 725}
 726
 727ISC_TASKFUNC_SCOPE unsigned int
 728isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
 729		      isc_eventtype_t last, void *tag,
 730		      isc_eventlist_t *events)
 731{
 732	/*
 733	 * Remove events from a task's event queue.
 734	 */
 735
 736	XTRACE("isc_task_unsendrange");
 737
 738	return (dequeue_events((isc__task_t *)task, sender, first,
 739			       last, tag, events, ISC_FALSE));
 740}
 741
 742ISC_TASKFUNC_SCOPE unsigned int
 743isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
 744		 void *tag, isc_eventlist_t *events)
 745{
 746	/*
 747	 * Remove events from a task's event queue.
 748	 */
 749
 750	XTRACE("isc_task_unsend");
 751
 752	return (dequeue_events((isc__task_t *)task, sender, type,
 753			       type, tag, events, ISC_FALSE));
 754}
 755
 756ISC_TASKFUNC_SCOPE isc_result_t
 757isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
 758		     const void *arg)
 759{
 760	isc__task_t *task = (isc__task_t *)task0;
 761	isc_boolean_t disallowed = ISC_FALSE;
 762	isc_result_t result = ISC_R_SUCCESS;
 763	isc_event_t *event;
 764
 765	/*
 766	 * Send a shutdown event with action 'action' and argument 'arg' when
 767	 * 'task' is shutdown.
 768	 */
 769
 770	REQUIRE(VALID_TASK(task));
 771	REQUIRE(action != NULL);
 772
 773	event = isc_event_allocate(task->manager->mctx,
 774				   NULL,
 775				   ISC_TASKEVENT_SHUTDOWN,
 776				   action,
 777				   arg,
 778				   sizeof(*event));
 779	if (event == NULL)
 780		return (ISC_R_NOMEMORY);
 781
 782	LOCK(&task->lock);
 783	if (TASK_SHUTTINGDOWN(task)) {
 784		disallowed = ISC_TRUE;
 785		result = ISC_R_SHUTTINGDOWN;
 786	} else
 787		ENQUEUE(task->on_shutdown, event, ev_link);
 788	UNLOCK(&task->lock);
 789
 790	if (disallowed)
 791		isc_mem_put(task->manager->mctx, event, sizeof(*event));
 792
 793	return (result);
 794}
 795
 796ISC_TASKFUNC_SCOPE void
 797isc__task_shutdown(isc_task_t *task0) {
 798	isc__task_t *task = (isc__task_t *)task0;
 799	isc_boolean_t was_idle;
 800
 801	/*
 802	 * Shutdown 'task'.
 803	 */
 804
 805	REQUIRE(VALID_TASK(task));
 806
 807	LOCK(&task->lock);
 808	was_idle = task_shutdown(task);
 809	UNLOCK(&task->lock);
 810
 811	if (was_idle)
 812		task_ready(task);
 813}
 814
 815ISC_TASKFUNC_SCOPE void
 816isc__task_destroy(isc_task_t **taskp) {
 817
 818	/*
 819	 * Destroy '*taskp'.
 820	 */
 821
 822	REQUIRE(taskp != NULL);
 823
 824	isc_task_shutdown(*taskp);
 825	isc_task_detach(taskp);
 826}
 827
 828ISC_TASKFUNC_SCOPE void
 829isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
 830	isc__task_t *task = (isc__task_t *)task0;
 831
 832	/*
 833	 * Name 'task'.
 834	 */
 835
 836	REQUIRE(VALID_TASK(task));
 837
 838	LOCK(&task->lock);
 839	memset(task->name, 0, sizeof(task->name));
 840	strncpy(task->name, name, sizeof(task->name) - 1);
 841	task->tag = tag;
 842	UNLOCK(&task->lock);
 843}
 844
 845ISC_TASKFUNC_SCOPE const char *
 846isc__task_getname(isc_task_t *task0) {
 847	isc__task_t *task = (isc__task_t *)task0;
 848
 849	REQUIRE(VALID_TASK(task));
 850
 851	return (task->name);
 852}
 853
 854ISC_TASKFUNC_SCOPE void *
 855isc__task_gettag(isc_task_t *task0) {
 856	isc__task_t *task = (isc__task_t *)task0;
 857
 858	REQUIRE(VALID_TASK(task));
 859
 860	return (task->tag);
 861}
 862
 863ISC_TASKFUNC_SCOPE void
 864isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
 865	isc__task_t *task = (isc__task_t *)task0;
 866
 867	REQUIRE(VALID_TASK(task));
 868	REQUIRE(t != NULL);
 869
 870	LOCK(&task->lock);
 871
 872	*t = task->now;
 873
 874	UNLOCK(&task->lock);
 875}
 876
 877/***
 878 *** Task Manager.
 879 ***/
 880static void
 881dispatch(isc__taskmgr_t *manager) {
 882	isc__task_t *task;
 883#ifndef USE_WORKER_THREADS
 884	unsigned int total_dispatch_count = 0;
 885	isc__tasklist_t ready_tasks;
 886#endif /* USE_WORKER_THREADS */
 887
 888	REQUIRE(VALID_MANAGER(manager));
 889
 890	/*
 891	 * Again we're trying to hold the lock for as short a time as possible
 892	 * and to do as little locking and unlocking as possible.
 893	 *
 894	 * In both while loops, the appropriate lock must be held before the
 895	 * while body starts.  Code which acquired the lock at the top of
 896	 * the loop would be more readable, but would result in a lot of
 897	 * extra locking.  Compare:
 898	 *
 899	 * Straightforward:
 900	 *
 901	 *	LOCK();
 902	 *	...
 903	 *	UNLOCK();
 904	 *	while (expression) {
 905	 *		LOCK();
 906	 *		...
 907	 *		UNLOCK();
 908	 *
 909	 *	       	Unlocked part here...
 910	 *
 911	 *		LOCK();
 912	 *		...
 913	 *		UNLOCK();
 914	 *	}
 915	 *
 916	 * Note how if the loop continues we unlock and then immediately lock.
 917	 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
 918	 * unlocks.  Also note that the lock is not held when the while
 919	 * condition is tested, which may or may not be important, depending
 920	 * on the expression.
 921	 *
 922	 * As written:
 923	 *
 924	 *	LOCK();
 925	 *	while (expression) {
 926	 *		...
 927	 *		UNLOCK();
 928	 *
 929	 *	       	Unlocked part here...
 930	 *
 931	 *		LOCK();
 932	 *		...
 933	 *	}
 934	 *	UNLOCK();
 935	 *
 936	 * For N iterations of the loop, this code does N+1 locks and N+1
 937	 * unlocks.  The while expression is always protected by the lock.
 938	 */
 939
 940#ifndef USE_WORKER_THREADS
 941	ISC_LIST_INIT(ready_tasks);
 942#endif
 943	LOCK(&manager->lock);
 944	while (!FINISHED(manager)) {
 945#ifdef USE_WORKER_THREADS
 946		/*
 947		 * For reasons similar to those given in the comment in
 948		 * isc_task_send() above, it is safe for us to dequeue
 949		 * the task while only holding the manager lock, and then
 950		 * change the task to running state while only holding the
 951		 * task lock.
 952		 */
 953		while ((EMPTY(manager->ready_tasks) ||
 954			manager->exclusive_requested) &&
 955			!FINISHED(manager))
 956		{
 957			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
 958						    ISC_MSGSET_GENERAL,
 959						    ISC_MSG_WAIT, "wait"));
 960			WAIT(&manager->work_available, &manager->lock);
 961			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
 962						    ISC_MSGSET_TASK,
 963						    ISC_MSG_AWAKE, "awake"));
 964		}
 965#else /* USE_WORKER_THREADS */
 966		if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
 967		    EMPTY(manager->ready_tasks))
 968			break;
 969#endif /* USE_WORKER_THREADS */
 970		XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
 971					    ISC_MSG_WORKING, "working"));
 972
 973		task = HEAD(manager->ready_tasks);
 974		if (task != NULL) {
 975			unsigned int dispatch_count = 0;
 976			isc_boolean_t done = ISC_FALSE;
 977			isc_boolean_t requeue = ISC_FALSE;
 978			isc_boolean_t finished = ISC_FALSE;
 979			isc_event_t *event;
 980
 981			INSIST(VALID_TASK(task));
 982
 983			/*
 984			 * Note we only unlock the manager lock if we actually
 985			 * have a task to do.  We must reacquire the manager
 986			 * lock before exiting the 'if (task != NULL)' block.
 987			 */
 988			DEQUEUE(manager->ready_tasks, task, ready_link);
 989			manager->tasks_running++;
 990			UNLOCK(&manager->lock);
 991
 992			LOCK(&task->lock);
 993			INSIST(task->state == task_state_ready);
 994			task->state = task_state_running;
 995			XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
 996					      ISC_MSG_RUNNING, "running"));
 997			isc_stdtime_get(&task->now);
 998			do {
 999				if (!EMPTY(task->events)) {
1000					event = HEAD(task->events);
1001					DEQUEUE(task->events, event, ev_link);
1002
1003					/*
1004					 * Execute the event action.
1005					 */
1006					XTRACE(isc_msgcat_get(isc_msgcat,
1007							    ISC_MSGSET_TASK,
1008							    ISC_MSG_EXECUTE,
1009							    "execute action"));
1010					if (event->ev_action != NULL) {
1011						UNLOCK(&task->lock);
1012						(event->ev_action)(
1013							(isc_task_t *)task,
1014							event);
1015						LOCK(&task->lock);
1016					}
1017					dispatch_count++;
1018#ifndef USE_WORKER_THREADS
1019					total_dispatch_count++;
1020#endif /* USE_WORKER_THREADS */
1021				}
1022
1023				if (task->references == 0 &&
1024				    EMPTY(task->events) &&
1025				    !TASK_SHUTTINGDOWN(task)) {
1026					isc_boolean_t was_idle;
1027
1028					/*
1029					 * There are no references and no
1030					 * pending events for this task,
1031					 * which means it will not become
1032					 * runnable again via an external
1033					 * action (such as sending an event
1034					 * or detaching).
1035					 *
1036					 * We initiate shutdown to prevent
1037					 * it from becoming a zombie.
1038					 *
1039					 * We do this here instead of in
1040					 * the "if EMPTY(task->events)" block
1041					 * below because:
1042					 *
1043					 *	If we post no shutdown events,
1044					 *	we want the task to finish.
1045					 *
1046					 *	If we did post shutdown events,
1047					 *	will still want the task's
1048					 *	quantum to be applied.
1049					 */
1050					was_idle = task_shutdown(task);
1051					INSIST(!was_idle);
1052				}
1053
1054				if (EMPTY(task->events)) {
1055					/*
1056					 * Nothing else to do for this task
1057					 * right now.
1058					 */
1059					XTRACE(isc_msgcat_get(isc_msgcat,
1060							      ISC_MSGSET_TASK,
1061							      ISC_MSG_EMPTY,
1062							      "empty"));
1063					if (task->references == 0 &&
1064					    TASK_SHUTTINGDOWN(task)) {
1065						/*
1066						 * The task is done.
1067						 */
1068						XTRACE(isc_msgcat_get(
1069							       isc_msgcat,
1070							       ISC_MSGSET_TASK,
1071							       ISC_MSG_DONE,
1072							       "done"));
1073						finished = ISC_TRUE;
1074						task->state = task_state_done;
1075					} else
1076						task->state = task_state_idle;
1077					done = ISC_TRUE;
1078				} else if (dispatch_count >= task->quantum) {
1079					/*
1080					 * Our quantum has expired, but
1081					 * there is more work to be done.
1082					 * We'll requeue it to the ready
1083					 * queue later.
1084					 *
1085					 * We don't check quantum until
1086					 * dispatching at least one event,
1087					 * so the minimum quantum is one.
1088					 */
1089					XTRACE(isc_msgcat_get(isc_msgcat,
1090							      ISC_MSGSET_TASK,
1091							      ISC_MSG_QUANTUM,
1092							      "quantum"));
1093					task->state = task_state_ready;
1094					requeue = ISC_TRUE;
1095					done = ISC_TRUE;
1096				}
1097			} while (!done);
1098			UNLOCK(&task->lock);
1099
1100			if (finished)
1101				task_finished(task);
1102
1103			LOCK(&manager->lock);
1104			manager->tasks_running--;
1105#ifdef USE_WORKER_THREADS
1106			if (manager->exclusive_requested &&
1107			    manager->tasks_running == 1) {
1108				SIGNAL(&manager->exclusive_granted);
1109			}
1110#endif /* USE_WORKER_THREADS */
1111			if (requeue) {
1112				/*
1113				 * We know we're awake, so we don't have
1114				 * to wakeup any sleeping threads if the
1115				 * ready queue is empty before we requeue.
1116				 *
1117				 * A possible optimization if the queue is
1118				 * empty is to 'goto' the 'if (task != NULL)'
1119				 * block, avoiding the ENQUEUE of the task
1120				 * and the subsequent immediate DEQUEUE
1121				 * (since it is the only executable task).
1122				 * We don't do this because then we'd be
1123				 * skipping the exit_requested check.  The
1124				 * cost of ENQUEUE is low anyway, especially
1125				 * when you consider that we'd have to do
1126				 * an extra EMPTY check to see if we could
1127				 * do the optimization.  If the ready queue
1128				 * were usually nonempty, the 'optimization'
1129				 * might even hurt rather than help.
1130				 */
1131#ifdef USE_WORKER_THREADS
1132				ENQUEUE(manager->ready_tasks, task,
1133					ready_link);
1134#else
1135				ENQUEUE(ready_tasks, task, ready_link);
1136#endif
1137			}
1138		}
1139	}
1140#ifndef USE_WORKER_THREADS
1141	ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
1142#endif
1143	UNLOCK(&manager->lock);
1144}
1145
1146#ifdef USE_WORKER_THREADS
1147static isc_threadresult_t
1148#ifdef _WIN32
1149WINAPI
1150#endif
1151run(void *uap) {
1152	isc__taskmgr_t *manager = uap;
1153
1154	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1155				    ISC_MSG_STARTING, "starting"));
1156
1157	dispatch(manager);
1158
1159	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1160				    ISC_MSG_EXITING, "exiting"));
1161
1162#ifdef OPENSSL_LEAKS
1163	ERR_remove_state(0);
1164#endif
1165
1166	return ((isc_threadresult_t)0);
1167}
1168#endif /* USE_WORKER_THREADS */
1169
1170static void
1171manager_free(isc__taskmgr_t *manager) {
1172	isc_mem_t *mctx;
1173
1174#ifdef USE_WORKER_THREADS
1175	(void)isc_condition_destroy(&manager->exclusive_granted);
1176	(void)isc_condition_destroy(&manager->work_available);
1177	isc_mem_free(manager->mctx, manager->threads);
1178#endif /* USE_WORKER_THREADS */
1179	DESTROYLOCK(&manager->lock);
1180	manager->common.impmagic = 0;
1181	manager->common.magic = 0;
1182	mctx = manager->mctx;
1183	isc_mem_put(mctx, manager, sizeof(*manager));
1184	isc_mem_detach(&mctx);
1185
1186#ifdef USE_SHARED_MANAGER
1187	taskmgr = NULL;
1188#endif	/* USE_SHARED_MANAGER */
1189}
1190
1191ISC_TASKFUNC_SCOPE isc_result_t
1192isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1193		    unsigned int default_quantum, isc_taskmgr_t **managerp)
1194{
1195	isc_result_t result;
1196	unsigned int i, started = 0;
1197	isc__taskmgr_t *manager;
1198
1199	/*
1200	 * Create a new task manager.
1201	 */
1202
1203	REQUIRE(workers > 0);
1204	REQUIRE(managerp != NULL && *managerp == NULL);
1205
1206#ifndef USE_WORKER_THREADS
1207	UNUSED(i);
1208	UNUSED(started);
1209#endif
1210
1211#ifdef USE_SHARED_MANAGER
1212	if (taskmgr != NULL) {
1213		if (taskmgr->refs == 0)
1214			return (ISC_R_SHUTTINGDOWN);
1215		taskmgr->refs++;
1216		*managerp = (isc_taskmgr_t *)taskmgr;
1217		return (ISC_R_SUCCESS);
1218	}
1219#endif /* USE_SHARED_MANAGER */
1220
1221	manager = isc_mem_get(mctx, sizeof(*manager));
1222	if (manager == NULL)
1223		return (ISC_R_NOMEMORY);
1224	manager->common.methods = &taskmgrmethods;
1225	manager->common.impmagic = TASK_MANAGER_MAGIC;
1226	manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1227	manager->mctx = NULL;
1228	result = isc_mutex_init(&manager->lock);
1229	if (result != ISC_R_SUCCESS)
1230		goto cleanup_mgr;
1231
1232#ifdef USE_WORKER_THREADS
1233	manager->workers = 0;
1234	manager->threads = isc_mem_allocate(mctx,
1235					    workers * sizeof(isc_thread_t));
1236	if (manager->threads == NULL) {
1237		result = ISC_R_NOMEMORY;
1238		goto cleanup_lock;
1239	}
1240	if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1241		UNEXPECTED_ERROR(__FILE__, __LINE__,
1242				 "isc_condition_init() %s",
1243				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1244						ISC_MSG_FAILED, "failed"));
1245		result = ISC_R_UNEXPECTED;
1246		goto cleanup_threads;
1247	}
1248	if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1249		UNEXPECTED_ERROR(__FILE__, __LINE__,
1250				 "isc_condition_init() %s",
1251				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1252						ISC_MSG_FAILED, "failed"));
1253		result = ISC_R_UNEXPECTED;
1254		goto cleanup_workavailable;
1255	}
1256#endif /* USE_WORKER_THREADS */
1257	if (default_quantum == 0)
1258		default_quantum = DEFAULT_DEFAULT_QUANTUM;
1259	manager->default_quantum = default_quantum;
1260	INIT_LIST(manager->tasks);
1261	INIT_LIST(manager->ready_tasks);
1262	manager->tasks_running = 0;
1263	manager->exclusive_requested = ISC_FALSE;
1264	manager->exiting = ISC_FALSE;
1265
1266	isc_mem_attach(mctx, &manager->mctx);
1267
1268#ifdef USE_WORKER_THREADS
1269	LOCK(&manager->lock);
1270	/*
1271	 * Start workers.
1272	 */
1273	for (i = 0; i < workers; i++) {
1274		if (isc_thread_create(run, manager,
1275				      &manager->threads[manager->workers]) ==
1276		    ISC_R_SUCCESS) {
1277			manager->workers++;
1278			started++;
1279		}
1280	}
1281	UNLOCK(&manager->lock);
1282
1283	if (started == 0) {
1284		manager_free(manager);
1285		return (ISC_R_NOTHREADS);
1286	}
1287	isc_thread_setconcurrency(workers);
1288#endif /* USE_WORKER_THREADS */
1289#ifdef USE_SHARED_MANAGER
1290	manager->refs = 1;
1291	taskmgr = manager;
1292#endif /* USE_SHARED_MANAGER */
1293
1294	*managerp = (isc_taskmgr_t *)manager;
1295
1296	return (ISC_R_SUCCESS);
1297
1298#ifdef USE_WORKER_THREADS
1299 cleanup_workavailable:
1300	(void)isc_condition_destroy(&manager->work_available);
1301 cleanup_threads:
1302	isc_mem_free(mctx, manager->threads);
1303 cleanup_lock:
1304	DESTROYLOCK(&manager->lock);
1305#endif
1306 cleanup_mgr:
1307	isc_mem_put(mctx, manager, sizeof(*manager));
1308	return (result);
1309}
1310
1311ISC_TASKFUNC_SCOPE void
1312isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1313	isc__taskmgr_t *manager;
1314	isc__task_t *task;
1315	unsigned int i;
1316
1317	/*
1318	 * Destroy '*managerp'.
1319	 */
1320
1321	REQUIRE(managerp != NULL);
1322	manager = (isc__taskmgr_t *)*managerp;
1323	REQUIRE(VALID_MANAGER(manager));
1324
1325#ifndef USE_WORKER_THREADS
1326	UNUSED(i);
1327#endif /* USE_WORKER_THREADS */
1328
1329#ifdef USE_SHARED_MANAGER
1330	manager->refs--;
1331	if (manager->refs > 0) {
1332		*managerp = NULL;
1333		return;
1334	}
1335#endif
1336
1337	XTHREADTRACE("isc_taskmgr_destroy");
1338	/*
1339	 * Only one non-worker thread may ever call this routine.
1340	 * If a worker thread wants to initiate shutdown of the
1341	 * task manager, it should ask some non-worker thread to call
1342	 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1343	 * that the startup thread is sleeping on.
1344	 */
1345
1346	/*
1347	 * Unlike elsewhere, we're going to hold this lock a long time.
1348	 * We need to do so, because otherwise the list of tasks could
1349	 * change while we were traversing it.
1350	 *
1351	 * This is also the only function where we will hold both the
1352	 * task manager lock and a task lock at the same time.
1353	 */
1354
1355	LOCK(&manager->lock);
1356
1357	/*
1358	 * Make sure we only get called once.
1359	 */
1360	INSIST(!manager->exiting);
1361	manager->exiting = ISC_TRUE;
1362
1363	/*
1364	 * Post shutdown event(s) to every task (if they haven't already been
1365	 * posted).
1366	 */
1367	for (task = HEAD(manager->tasks);
1368	     task != NULL;
1369	     task = NEXT(task, link)) {
1370		LOCK(&task->lock);
1371		if (task_shutdown(task))
1372			ENQUEUE(manager->ready_tasks, task, ready_link);
1373		UNLOCK(&task->lock);
1374	}
1375#ifdef USE_WORKER_THREADS
1376	/*
1377	 * Wake up any sleeping workers.  This ensures we get work done if
1378	 * there's work left to do, and if there are already no tasks left
1379	 * it will cause the workers to see manager->exiting.
1380	 */
1381	BROADCAST(&manager->work_available);
1382	UNLOCK(&manager->lock);
1383
1384	/*
1385	 * Wait for all the worker threads to exit.
1386	 */
1387	for (i = 0; i < manager->workers; i++)
1388		(void)isc_thread_join(manager->threads[i], NULL);
1389#else /* USE_WORKER_THREADS */
1390	/*
1391	 * Dispatch the shutdown events.
1392	 */
1393	UNLOCK(&manager->lock);
1394	while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1395		(void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1396#ifdef BIND9
1397	if (!ISC_LIST_EMPTY(manager->tasks))
1398		isc_mem_printallactive(stderr);
1399#endif
1400	INSIST(ISC_LIST_EMPTY(manager->tasks));
1401#ifdef USE_SHARED_MANAGER
1402	taskmgr = NULL;
1403#endif
1404#endif /* USE_WORKER_THREADS */
1405
1406	manager_free(manager);
1407
1408	*managerp = NULL;
1409}
1410
1411#ifndef USE_WORKER_THREADS
1412isc_boolean_t
1413isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1414	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1415
1416#ifdef USE_SHARED_MANAGER
1417	if (manager == NULL)
1418		manager = taskmgr;
1419#endif
1420	if (manager == NULL)
1421		return (ISC_FALSE);
1422	return (ISC_TF(!ISC_LIST_EMPTY(manager->ready_tasks)));
1423}
1424
1425isc_result_t
1426isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1427	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1428
1429#ifdef USE_SHARED_MANAGER
1430	if (manager == NULL)
1431		manager = taskmgr;
1432#endif
1433	if (manager == NULL)
1434		return (ISC_R_NOTFOUND);
1435
1436	dispatch(manager);
1437
1438	return (ISC_R_SUCCESS);
1439}
1440
1441#endif /* USE_WORKER_THREADS */
1442
1443ISC_TASKFUNC_SCOPE isc_result_t
1444isc__task_beginexclusive(isc_task_t *task0) {
1445#ifdef USE_WORKER_THREADS
1446	isc__task_t *task = (isc__task_t *)task0;
1447	isc__taskmgr_t *manager = task->manager;
1448	REQUIRE(task->state == task_state_running);
1449	LOCK(&manager->lock);
1450	if (manager->exclusive_requested) {
1451		UNLOCK(&manager->lock);
1452		return (ISC_R_LOCKBUSY);
1453	}
1454	manager->exclusive_requested = ISC_TRUE;
1455	while (manager->tasks_running > 1) {
1456		WAIT(&manager->exclusive_granted, &manager->lock);
1457	}
1458	UNLOCK(&manager->lock);
1459#else
1460	UNUSED(task0);
1461#endif
1462	return (ISC_R_SUCCESS);
1463}
1464
1465ISC_TASKFUNC_SCOPE void
1466isc__task_endexclusive(isc_task_t *task0) {
1467#ifdef USE_WORKER_THREADS
1468	isc__task_t *task = (isc__task_t *)task0;
1469	isc__taskmgr_t *manager = task->manager;
1470
1471	REQUIRE(task->state == task_state_running);
1472	LOCK(&manager->lock);
1473	REQUIRE(manager->exclusive_requested);
1474	manager->exclusive_requested = ISC_FALSE;
1475	BROADCAST(&manager->work_available);
1476	UNLOCK(&manager->lock);
1477#else
1478	UNUSED(task0);
1479#endif
1480}
1481
1482#ifdef USE_SOCKETIMPREGISTER
1483isc_result_t
1484isc__task_register() {
1485	return (isc_task_register(isc__taskmgr_create));
1486}
1487#endif
1488
1489isc_boolean_t
1490isc_task_exiting(isc_task_t *t) {
1491	isc__task_t *task = (isc__task_t *)t;
1492
1493	REQUIRE(VALID_TASK(task));
1494	return (TASK_SHUTTINGDOWN(task));
1495}
1496
1497
1498#if defined(HAVE_LIBXML2) && defined(BIND9)
1499void
1500isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1501	isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1502	isc__task_t *task;
1503
1504	LOCK(&mgr->lock);
1505
1506	/*
1507	 * Write out the thread-model, and some details about each depending
1508	 * on which type is enabled.
1509	 */
1510	xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1511#ifdef ISC_PLATFORM_USETHREADS
1512	xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1513	xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1514	xmlTextWriterEndElement(writer); /* type */
1515
1516	xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1517	xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1518	xmlTextWriterEndElement(writer); /* worker-threads */
1519#else /* ISC_PLATFORM_USETHREADS */
1520	xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1521	xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1522	xmlTextWriterEndElement(writer); /* type */
1523
1524	xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1525	xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1526	xmlTextWriterEndElement(writer); /* references */
1527#endif /* ISC_PLATFORM_USETHREADS */
1528
1529	xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1530	xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1531	xmlTextWriterEndElement(writer); /* default-quantum */
1532
1533	xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1534	xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1535	xmlTextWriterEndElement(writer); /* tasks-running */
1536
1537	xmlTextWriterEndElement(writer); /* thread-model */
1538
1539	xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1540	task = ISC_LIST_HEAD(mgr->tasks);
1541	while (task != NULL) {
1542		LOCK(&task->lock);
1543		xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1544
1545		if (task->name[0] != 0) {
1546			xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1547			xmlTextWriterWriteFormatString(writer, "%s",
1548						       task->name);
1549			xmlTextWriterEndElement(writer); /* name */
1550		}
1551
1552		xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1553		xmlTextWriterWriteFormatString(writer, "%d", task->references);
1554		xmlTextWriterEndElement(writer); /* references */
1555
1556		xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1557		xmlTextWriterWriteFormatString(writer, "%p", task);
1558		xmlTextWriterEndElement(writer); /* id */
1559
1560		xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1561		xmlTextWriterWriteFormatString(writer, "%s",
1562					       statenames[task->state]);
1563		xmlTextWriterEndElement(writer); /* state */
1564
1565		xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1566		xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1567		xmlTextWriterEndElement(writer); /* quantum */
1568
1569		xmlTextWriterEndElement(writer);
1570
1571		UNLOCK(&task->lock);
1572		task = ISC_LIST_NEXT(task, link);
1573	}
1574	xmlTextWriterEndElement(writer); /* tasks */
1575
1576	UNLOCK(&mgr->lock);
1577}
1578#endif /* HAVE_LIBXML2 && BIND9 */