PageRenderTime 55ms CodeModel.GetById 2ms app.highlight 46ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/base/power/main.c

https://bitbucket.org/zossso/android-kernel-2.6.34-motus
C | 1136 lines | 767 code | 140 blank | 229 comment | 134 complexity | ed8af789fc37cfc7e798b7b74ce6c914 MD5 | raw file
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will intialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/resume-trace.h>
  26#include <linux/interrupt.h>
  27#include <linux/sched.h>
  28#include <linux/async.h>
  29#include <linux/timer.h>
  30
  31#include "../base.h"
  32#include "power.h"
  33
  34/*
  35 * The entries in the dpm_list list are in a depth first order, simply
  36 * because children are guaranteed to be discovered after parents, and
  37 * are inserted at the back of the list on discovery.
  38 *
  39 * Since device_pm_add() may be called with a device lock held,
  40 * we must never try to acquire a device lock while holding
  41 * dpm_list_mutex.
  42 */
  43
  44LIST_HEAD(dpm_list);
  45
  46static DEFINE_MUTEX(dpm_list_mtx);
  47static pm_message_t pm_transition;
  48
  49static void dpm_drv_timeout(unsigned long data);
  50static DEFINE_TIMER(dpm_drv_wd, dpm_drv_timeout, 0, 0);
  51
  52/*
  53 * Set once the preparation of devices for a PM transition has started, reset
  54 * before starting to resume devices.  Protected by dpm_list_mtx.
  55 */
  56static bool transition_started;
  57
  58/**
  59 * device_pm_init - Initialize the PM-related part of a device object.
  60 * @dev: Device object being initialized.
  61 */
  62void device_pm_init(struct device *dev)
  63{
  64	dev->power.status = DPM_ON;
  65	init_completion(&dev->power.completion);
  66	pm_runtime_init(dev);
  67}
  68
  69/**
  70 * device_pm_lock - Lock the list of active devices used by the PM core.
  71 */
  72void device_pm_lock(void)
  73{
  74	mutex_lock(&dpm_list_mtx);
  75}
  76
  77/**
  78 * device_pm_unlock - Unlock the list of active devices used by the PM core.
  79 */
  80void device_pm_unlock(void)
  81{
  82	mutex_unlock(&dpm_list_mtx);
  83}
  84
  85/**
  86 * device_pm_add - Add a device to the PM core's list of active devices.
  87 * @dev: Device to add to the list.
  88 */
  89void device_pm_add(struct device *dev)
  90{
  91	pr_debug("PM: Adding info for %s:%s\n",
  92		 dev->bus ? dev->bus->name : "No Bus",
  93		 kobject_name(&dev->kobj));
  94	mutex_lock(&dpm_list_mtx);
  95	if (dev->parent) {
  96		if (dev->parent->power.status >= DPM_SUSPENDING)
  97			dev_warn(dev, "parent %s should not be sleeping\n",
  98				 dev_name(dev->parent));
  99	} else if (transition_started) {
 100		/*
 101		 * We refuse to register parentless devices while a PM
 102		 * transition is in progress in order to avoid leaving them
 103		 * unhandled down the road
 104		 */
 105		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
 106	}
 107
 108	list_add_tail(&dev->power.entry, &dpm_list);
 109	mutex_unlock(&dpm_list_mtx);
 110}
 111
 112/**
 113 * device_pm_remove - Remove a device from the PM core's list of active devices.
 114 * @dev: Device to be removed from the list.
 115 */
 116void device_pm_remove(struct device *dev)
 117{
 118	pr_debug("PM: Removing info for %s:%s\n",
 119		 dev->bus ? dev->bus->name : "No Bus",
 120		 kobject_name(&dev->kobj));
 121	complete_all(&dev->power.completion);
 122	mutex_lock(&dpm_list_mtx);
 123	list_del_init(&dev->power.entry);
 124	mutex_unlock(&dpm_list_mtx);
 125	pm_runtime_remove(dev);
 126}
 127
 128/**
 129 * device_pm_move_before - Move device in the PM core's list of active devices.
 130 * @deva: Device to move in dpm_list.
 131 * @devb: Device @deva should come before.
 132 */
 133void device_pm_move_before(struct device *deva, struct device *devb)
 134{
 135	pr_debug("PM: Moving %s:%s before %s:%s\n",
 136		 deva->bus ? deva->bus->name : "No Bus",
 137		 kobject_name(&deva->kobj),
 138		 devb->bus ? devb->bus->name : "No Bus",
 139		 kobject_name(&devb->kobj));
 140	/* Delete deva from dpm_list and reinsert before devb. */
 141	list_move_tail(&deva->power.entry, &devb->power.entry);
 142}
 143
 144/**
 145 * device_pm_move_after - Move device in the PM core's list of active devices.
 146 * @deva: Device to move in dpm_list.
 147 * @devb: Device @deva should come after.
 148 */
 149void device_pm_move_after(struct device *deva, struct device *devb)
 150{
 151	pr_debug("PM: Moving %s:%s after %s:%s\n",
 152		 deva->bus ? deva->bus->name : "No Bus",
 153		 kobject_name(&deva->kobj),
 154		 devb->bus ? devb->bus->name : "No Bus",
 155		 kobject_name(&devb->kobj));
 156	/* Delete deva from dpm_list and reinsert after devb. */
 157	list_move(&deva->power.entry, &devb->power.entry);
 158}
 159
 160/**
 161 * device_pm_move_last - Move device to end of the PM core's list of devices.
 162 * @dev: Device to move in dpm_list.
 163 */
 164void device_pm_move_last(struct device *dev)
 165{
 166	pr_debug("PM: Moving %s:%s to end of list\n",
 167		 dev->bus ? dev->bus->name : "No Bus",
 168		 kobject_name(&dev->kobj));
 169	list_move_tail(&dev->power.entry, &dpm_list);
 170}
 171
 172static ktime_t initcall_debug_start(struct device *dev)
 173{
 174	ktime_t calltime = ktime_set(0, 0);
 175
 176	if (initcall_debug) {
 177		pr_info("calling  %s+ @ %i\n",
 178				dev_name(dev), task_pid_nr(current));
 179		calltime = ktime_get();
 180	}
 181
 182	return calltime;
 183}
 184
 185static void initcall_debug_report(struct device *dev, ktime_t calltime,
 186				  int error)
 187{
 188	ktime_t delta, rettime;
 189
 190	if (initcall_debug) {
 191		rettime = ktime_get();
 192		delta = ktime_sub(rettime, calltime);
 193		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 194			error, (unsigned long long)ktime_to_ns(delta) >> 10);
 195	}
 196}
 197
 198/**
 199 * dpm_wait - Wait for a PM operation to complete.
 200 * @dev: Device to wait for.
 201 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 202 */
 203static void dpm_wait(struct device *dev, bool async)
 204{
 205	if (!dev)
 206		return;
 207
 208	if (async || (pm_async_enabled && dev->power.async_suspend))
 209		wait_for_completion(&dev->power.completion);
 210}
 211
 212static int dpm_wait_fn(struct device *dev, void *async_ptr)
 213{
 214	dpm_wait(dev, *((bool *)async_ptr));
 215	return 0;
 216}
 217
 218static void dpm_wait_for_children(struct device *dev, bool async)
 219{
 220       device_for_each_child(dev, &async, dpm_wait_fn);
 221}
 222
 223/**
 224 * pm_op - Execute the PM operation appropriate for given PM event.
 225 * @dev: Device to handle.
 226 * @ops: PM operations to choose from.
 227 * @state: PM transition of the system being carried out.
 228 */
 229static int pm_op(struct device *dev,
 230		 const struct dev_pm_ops *ops,
 231		 pm_message_t state)
 232{
 233	int error = 0;
 234	ktime_t calltime;
 235
 236	calltime = initcall_debug_start(dev);
 237
 238	switch (state.event) {
 239#ifdef CONFIG_SUSPEND
 240	case PM_EVENT_SUSPEND:
 241		if (ops->suspend) {
 242			error = ops->suspend(dev);
 243			suspend_report_result(ops->suspend, error);
 244		}
 245		break;
 246	case PM_EVENT_RESUME:
 247		if (ops->resume) {
 248			error = ops->resume(dev);
 249			suspend_report_result(ops->resume, error);
 250		}
 251		break;
 252#endif /* CONFIG_SUSPEND */
 253#ifdef CONFIG_HIBERNATION
 254	case PM_EVENT_FREEZE:
 255	case PM_EVENT_QUIESCE:
 256		if (ops->freeze) {
 257			error = ops->freeze(dev);
 258			suspend_report_result(ops->freeze, error);
 259		}
 260		break;
 261	case PM_EVENT_HIBERNATE:
 262		if (ops->poweroff) {
 263			error = ops->poweroff(dev);
 264			suspend_report_result(ops->poweroff, error);
 265		}
 266		break;
 267	case PM_EVENT_THAW:
 268	case PM_EVENT_RECOVER:
 269		if (ops->thaw) {
 270			error = ops->thaw(dev);
 271			suspend_report_result(ops->thaw, error);
 272		}
 273		break;
 274	case PM_EVENT_RESTORE:
 275		if (ops->restore) {
 276			error = ops->restore(dev);
 277			suspend_report_result(ops->restore, error);
 278		}
 279		break;
 280#endif /* CONFIG_HIBERNATION */
 281	default:
 282		error = -EINVAL;
 283	}
 284
 285	initcall_debug_report(dev, calltime, error);
 286
 287	return error;
 288}
 289
 290/**
 291 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 292 * @dev: Device to handle.
 293 * @ops: PM operations to choose from.
 294 * @state: PM transition of the system being carried out.
 295 *
 296 * The driver of @dev will not receive interrupts while this function is being
 297 * executed.
 298 */
 299static int pm_noirq_op(struct device *dev,
 300			const struct dev_pm_ops *ops,
 301			pm_message_t state)
 302{
 303	int error = 0;
 304	ktime_t calltime, delta, rettime;
 305
 306	if (initcall_debug) {
 307		pr_info("calling  %s+ @ %i, parent: %s\n",
 308				dev_name(dev), task_pid_nr(current),
 309				dev->parent ? dev_name(dev->parent) : "none");
 310		calltime = ktime_get();
 311	}
 312
 313	switch (state.event) {
 314#ifdef CONFIG_SUSPEND
 315	case PM_EVENT_SUSPEND:
 316		if (ops->suspend_noirq) {
 317			error = ops->suspend_noirq(dev);
 318			suspend_report_result(ops->suspend_noirq, error);
 319		}
 320		break;
 321	case PM_EVENT_RESUME:
 322		if (ops->resume_noirq) {
 323			error = ops->resume_noirq(dev);
 324			suspend_report_result(ops->resume_noirq, error);
 325		}
 326		break;
 327#endif /* CONFIG_SUSPEND */
 328#ifdef CONFIG_HIBERNATION
 329	case PM_EVENT_FREEZE:
 330	case PM_EVENT_QUIESCE:
 331		if (ops->freeze_noirq) {
 332			error = ops->freeze_noirq(dev);
 333			suspend_report_result(ops->freeze_noirq, error);
 334		}
 335		break;
 336	case PM_EVENT_HIBERNATE:
 337		if (ops->poweroff_noirq) {
 338			error = ops->poweroff_noirq(dev);
 339			suspend_report_result(ops->poweroff_noirq, error);
 340		}
 341		break;
 342	case PM_EVENT_THAW:
 343	case PM_EVENT_RECOVER:
 344		if (ops->thaw_noirq) {
 345			error = ops->thaw_noirq(dev);
 346			suspend_report_result(ops->thaw_noirq, error);
 347		}
 348		break;
 349	case PM_EVENT_RESTORE:
 350		if (ops->restore_noirq) {
 351			error = ops->restore_noirq(dev);
 352			suspend_report_result(ops->restore_noirq, error);
 353		}
 354		break;
 355#endif /* CONFIG_HIBERNATION */
 356	default:
 357		error = -EINVAL;
 358	}
 359
 360	if (initcall_debug) {
 361		rettime = ktime_get();
 362		delta = ktime_sub(rettime, calltime);
 363		printk("initcall %s_i+ returned %d after %Ld usecs\n",
 364			dev_name(dev), error,
 365			(unsigned long long)ktime_to_ns(delta) >> 10);
 366	}
 367
 368	return error;
 369}
 370
 371static char *pm_verb(int event)
 372{
 373	switch (event) {
 374	case PM_EVENT_SUSPEND:
 375		return "suspend";
 376	case PM_EVENT_RESUME:
 377		return "resume";
 378	case PM_EVENT_FREEZE:
 379		return "freeze";
 380	case PM_EVENT_QUIESCE:
 381		return "quiesce";
 382	case PM_EVENT_HIBERNATE:
 383		return "hibernate";
 384	case PM_EVENT_THAW:
 385		return "thaw";
 386	case PM_EVENT_RESTORE:
 387		return "restore";
 388	case PM_EVENT_RECOVER:
 389		return "recover";
 390	default:
 391		return "(unknown PM event)";
 392	}
 393}
 394
 395static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 396{
 397	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 398		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 399		", may wakeup" : "");
 400}
 401
 402static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 403			int error)
 404{
 405	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 406		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
 407}
 408
 409static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 410{
 411	ktime_t calltime;
 412	s64 usecs64;
 413	int usecs;
 414
 415	calltime = ktime_get();
 416	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 417	do_div(usecs64, NSEC_PER_USEC);
 418	usecs = usecs64;
 419	if (usecs == 0)
 420		usecs = 1;
 421	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 422		info ?: "", info ? " " : "", pm_verb(state.event),
 423		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 424}
 425
 426/*------------------------- Resume routines -------------------------*/
 427
 428/**
 429 * device_resume_noirq - Execute an "early resume" callback for given device.
 430 * @dev: Device to handle.
 431 * @state: PM transition of the system being carried out.
 432 *
 433 * The driver of @dev will not receive interrupts while this function is being
 434 * executed.
 435 */
 436static int device_resume_noirq(struct device *dev, pm_message_t state)
 437{
 438	int error = 0;
 439
 440	TRACE_DEVICE(dev);
 441	TRACE_RESUME(0);
 442
 443	if (dev->bus && dev->bus->pm) {
 444		pm_dev_dbg(dev, state, "EARLY ");
 445		error = pm_noirq_op(dev, dev->bus->pm, state);
 446		if (error)
 447			goto End;
 448	}
 449
 450	if (dev->type && dev->type->pm) {
 451		pm_dev_dbg(dev, state, "EARLY type ");
 452		error = pm_noirq_op(dev, dev->type->pm, state);
 453		if (error)
 454			goto End;
 455	}
 456
 457	if (dev->class && dev->class->pm) {
 458		pm_dev_dbg(dev, state, "EARLY class ");
 459		error = pm_noirq_op(dev, dev->class->pm, state);
 460	}
 461
 462End:
 463	TRACE_RESUME(error);
 464	return error;
 465}
 466
 467/**
 468 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 469 * @state: PM transition of the system being carried out.
 470 *
 471 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 472 * enable device drivers to receive interrupts.
 473 */
 474void dpm_resume_noirq(pm_message_t state)
 475{
 476	struct device *dev;
 477	ktime_t starttime = ktime_get();
 478
 479	mutex_lock(&dpm_list_mtx);
 480	transition_started = false;
 481	list_for_each_entry(dev, &dpm_list, power.entry)
 482		if (dev->power.status > DPM_OFF) {
 483			int error;
 484
 485			dev->power.status = DPM_OFF;
 486			error = device_resume_noirq(dev, state);
 487			if (error)
 488				pm_dev_err(dev, state, " early", error);
 489		}
 490	mutex_unlock(&dpm_list_mtx);
 491	dpm_show_time(starttime, state, "early");
 492	resume_device_irqs();
 493}
 494EXPORT_SYMBOL_GPL(dpm_resume_noirq);
 495
 496/**
 497 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
 498 * @dev: Device to resume.
 499 * @cb: Resume callback to execute.
 500 */
 501static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
 502{
 503	int error;
 504	ktime_t calltime;
 505
 506	calltime = initcall_debug_start(dev);
 507
 508	error = cb(dev);
 509	suspend_report_result(cb, error);
 510
 511	initcall_debug_report(dev, calltime, error);
 512
 513	return error;
 514}
 515
 516/**
 517 * device_resume - Execute "resume" callbacks for given device.
 518 * @dev: Device to handle.
 519 * @state: PM transition of the system being carried out.
 520 * @async: If true, the device is being resumed asynchronously.
 521 */
 522static int device_resume(struct device *dev, pm_message_t state, bool async)
 523{
 524	int error = 0;
 525
 526	TRACE_DEVICE(dev);
 527	TRACE_RESUME(0);
 528
 529	if (dev->parent && dev->parent->power.status >= DPM_OFF)
 530		dpm_wait(dev->parent, async);
 531	device_lock(dev);
 532
 533	dev->power.status = DPM_RESUMING;
 534
 535	if (dev->bus) {
 536		if (dev->bus->pm) {
 537			pm_dev_dbg(dev, state, "");
 538			error = pm_op(dev, dev->bus->pm, state);
 539		} else if (dev->bus->resume) {
 540			pm_dev_dbg(dev, state, "legacy ");
 541			error = legacy_resume(dev, dev->bus->resume);
 542		}
 543		if (error)
 544			goto End;
 545	}
 546
 547	if (dev->type) {
 548		if (dev->type->pm) {
 549			pm_dev_dbg(dev, state, "type ");
 550			error = pm_op(dev, dev->type->pm, state);
 551		}
 552		if (error)
 553			goto End;
 554	}
 555
 556	if (dev->class) {
 557		if (dev->class->pm) {
 558			pm_dev_dbg(dev, state, "class ");
 559			error = pm_op(dev, dev->class->pm, state);
 560		} else if (dev->class->resume) {
 561			pm_dev_dbg(dev, state, "legacy class ");
 562			error = legacy_resume(dev, dev->class->resume);
 563		}
 564	}
 565 End:
 566	device_unlock(dev);
 567	complete_all(&dev->power.completion);
 568
 569	TRACE_RESUME(error);
 570	return error;
 571}
 572
 573static void async_resume(void *data, async_cookie_t cookie)
 574{
 575	struct device *dev = (struct device *)data;
 576	int error;
 577
 578	error = device_resume(dev, pm_transition, true);
 579	if (error)
 580		pm_dev_err(dev, pm_transition, " async", error);
 581	put_device(dev);
 582}
 583
 584static bool is_async(struct device *dev)
 585{
 586	return dev->power.async_suspend && pm_async_enabled
 587		&& !pm_trace_is_enabled();
 588}
 589
 590/**
 591 *	dpm_drv_timeout - Driver suspend / resume watchdog handler
 592 *	@data: struct device which timed out
 593 *
 594 * 	Called when a driver has timed out suspending or resuming.
 595 * 	There's not much we can do here to recover so
 596 * 	BUG() out for a crash-dump
 597 *
 598 */
 599static void dpm_drv_timeout(unsigned long data)
 600{
 601	struct device *dev = (struct device *) data;
 602
 603	printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
 604	       (dev->driver ? dev->driver->name : "no driver"));
 605	BUG();
 606}
 607
 608/**
 609 *	dpm_drv_wdset - Sets up driver suspend/resume watchdog timer.
 610 *	@dev: struct device which we're guarding.
 611 *
 612 */
 613static void dpm_drv_wdset(struct device *dev)
 614{
 615	dpm_drv_wd.data = (unsigned long) dev;
 616	mod_timer(&dpm_drv_wd, jiffies + (HZ * 3));
 617}
 618
 619/**
 620 *	dpm_drv_wdclr - clears driver suspend/resume watchdog timer.
 621 *	@dev: struct device which we're no longer guarding.
 622 *
 623 */
 624static void dpm_drv_wdclr(struct device *dev)
 625{
 626	del_timer_sync(&dpm_drv_wd);
 627}
 628
 629/**
 630 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 631 * @state: PM transition of the system being carried out.
 632 *
 633 * Execute the appropriate "resume" callback for all devices whose status
 634 * indicates that they are suspended.
 635 */
 636static void dpm_resume(pm_message_t state)
 637{
 638	struct list_head list;
 639	struct device *dev;
 640	ktime_t starttime = ktime_get();
 641
 642	INIT_LIST_HEAD(&list);
 643	mutex_lock(&dpm_list_mtx);
 644	pm_transition = state;
 645
 646	list_for_each_entry(dev, &dpm_list, power.entry) {
 647		if (dev->power.status < DPM_OFF)
 648			continue;
 649
 650		INIT_COMPLETION(dev->power.completion);
 651		if (is_async(dev)) {
 652			get_device(dev);
 653			async_schedule(async_resume, dev);
 654		}
 655	}
 656
 657	while (!list_empty(&dpm_list)) {
 658		dev = to_device(dpm_list.next);
 659		get_device(dev);
 660		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
 661			int error;
 662
 663			mutex_unlock(&dpm_list_mtx);
 664
 665			error = device_resume(dev, state, false);
 666
 667			mutex_lock(&dpm_list_mtx);
 668			if (error)
 669				pm_dev_err(dev, state, "", error);
 670		} else if (dev->power.status == DPM_SUSPENDING) {
 671			/* Allow new children of the device to be registered */
 672			dev->power.status = DPM_RESUMING;
 673		}
 674		if (!list_empty(&dev->power.entry))
 675			list_move_tail(&dev->power.entry, &list);
 676		put_device(dev);
 677	}
 678	list_splice(&list, &dpm_list);
 679	mutex_unlock(&dpm_list_mtx);
 680	async_synchronize_full();
 681	dpm_show_time(starttime, state, NULL);
 682}
 683
 684/**
 685 * device_complete - Complete a PM transition for given device.
 686 * @dev: Device to handle.
 687 * @state: PM transition of the system being carried out.
 688 */
 689static void device_complete(struct device *dev, pm_message_t state)
 690{
 691	device_lock(dev);
 692
 693	if (dev->class && dev->class->pm && dev->class->pm->complete) {
 694		pm_dev_dbg(dev, state, "completing class ");
 695		dev->class->pm->complete(dev);
 696	}
 697
 698	if (dev->type && dev->type->pm && dev->type->pm->complete) {
 699		pm_dev_dbg(dev, state, "completing type ");
 700		dev->type->pm->complete(dev);
 701	}
 702
 703	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
 704		pm_dev_dbg(dev, state, "completing ");
 705		dev->bus->pm->complete(dev);
 706	}
 707
 708	device_unlock(dev);
 709}
 710
 711/**
 712 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 713 * @state: PM transition of the system being carried out.
 714 *
 715 * Execute the ->complete() callbacks for all devices whose PM status is not
 716 * DPM_ON (this allows new devices to be registered).
 717 */
 718static void dpm_complete(pm_message_t state)
 719{
 720	struct list_head list;
 721
 722	INIT_LIST_HEAD(&list);
 723	mutex_lock(&dpm_list_mtx);
 724	transition_started = false;
 725	while (!list_empty(&dpm_list)) {
 726		struct device *dev = to_device(dpm_list.prev);
 727
 728		get_device(dev);
 729		if (dev->power.status > DPM_ON) {
 730			dev->power.status = DPM_ON;
 731			mutex_unlock(&dpm_list_mtx);
 732
 733			device_complete(dev, state);
 734			pm_runtime_put_sync(dev);
 735
 736			mutex_lock(&dpm_list_mtx);
 737		}
 738		if (!list_empty(&dev->power.entry))
 739			list_move(&dev->power.entry, &list);
 740		put_device(dev);
 741	}
 742	list_splice(&list, &dpm_list);
 743	mutex_unlock(&dpm_list_mtx);
 744}
 745
 746/**
 747 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 748 * @state: PM transition of the system being carried out.
 749 *
 750 * Execute "resume" callbacks for all devices and complete the PM transition of
 751 * the system.
 752 */
 753void dpm_resume_end(pm_message_t state)
 754{
 755	might_sleep();
 756	dpm_resume(state);
 757	dpm_complete(state);
 758}
 759EXPORT_SYMBOL_GPL(dpm_resume_end);
 760
 761
 762/*------------------------- Suspend routines -------------------------*/
 763
 764/**
 765 * resume_event - Return a "resume" message for given "suspend" sleep state.
 766 * @sleep_state: PM message representing a sleep state.
 767 *
 768 * Return a PM message representing the resume event corresponding to given
 769 * sleep state.
 770 */
 771static pm_message_t resume_event(pm_message_t sleep_state)
 772{
 773	switch (sleep_state.event) {
 774	case PM_EVENT_SUSPEND:
 775		return PMSG_RESUME;
 776	case PM_EVENT_FREEZE:
 777	case PM_EVENT_QUIESCE:
 778		return PMSG_RECOVER;
 779	case PM_EVENT_HIBERNATE:
 780		return PMSG_RESTORE;
 781	}
 782	return PMSG_ON;
 783}
 784
 785/**
 786 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 787 * @dev: Device to handle.
 788 * @state: PM transition of the system being carried out.
 789 *
 790 * The driver of @dev will not receive interrupts while this function is being
 791 * executed.
 792 */
 793static int device_suspend_noirq(struct device *dev, pm_message_t state)
 794{
 795	int error = 0;
 796
 797	if (dev->class && dev->class->pm) {
 798		pm_dev_dbg(dev, state, "LATE class ");
 799		error = pm_noirq_op(dev, dev->class->pm, state);
 800		if (error)
 801			goto End;
 802	}
 803
 804	if (dev->type && dev->type->pm) {
 805		pm_dev_dbg(dev, state, "LATE type ");
 806		error = pm_noirq_op(dev, dev->type->pm, state);
 807		if (error)
 808			goto End;
 809	}
 810
 811	if (dev->bus && dev->bus->pm) {
 812		pm_dev_dbg(dev, state, "LATE ");
 813		error = pm_noirq_op(dev, dev->bus->pm, state);
 814	}
 815
 816End:
 817	return error;
 818}
 819
 820/**
 821 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 822 * @state: PM transition of the system being carried out.
 823 *
 824 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 825 * handlers for all non-sysdev devices.
 826 */
 827int dpm_suspend_noirq(pm_message_t state)
 828{
 829	struct device *dev;
 830	ktime_t starttime = ktime_get();
 831	int error = 0;
 832
 833	suspend_device_irqs();
 834	mutex_lock(&dpm_list_mtx);
 835	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
 836		error = device_suspend_noirq(dev, state);
 837		if (error) {
 838			pm_dev_err(dev, state, " late", error);
 839			break;
 840		}
 841		dev->power.status = DPM_OFF_IRQ;
 842	}
 843	mutex_unlock(&dpm_list_mtx);
 844	if (error)
 845		dpm_resume_noirq(resume_event(state));
 846	else
 847		dpm_show_time(starttime, state, "late");
 848	return error;
 849}
 850EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
 851
 852/**
 853 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
 854 * @dev: Device to suspend.
 855 * @state: PM transition of the system being carried out.
 856 * @cb: Suspend callback to execute.
 857 */
 858static int legacy_suspend(struct device *dev, pm_message_t state,
 859			  int (*cb)(struct device *dev, pm_message_t state))
 860{
 861	int error;
 862	ktime_t calltime;
 863
 864	calltime = initcall_debug_start(dev);
 865
 866	error = cb(dev, state);
 867	suspend_report_result(cb, error);
 868
 869	initcall_debug_report(dev, calltime, error);
 870
 871	return error;
 872}
 873
 874static int async_error;
 875
 876/**
 877 * device_suspend - Execute "suspend" callbacks for given device.
 878 * @dev: Device to handle.
 879 * @state: PM transition of the system being carried out.
 880 * @async: If true, the device is being suspended asynchronously.
 881 */
 882static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 883{
 884	int error = 0;
 885
 886	dpm_wait_for_children(dev, async);
 887	device_lock(dev);
 888
 889	if (async_error)
 890		goto End;
 891
 892	if (dev->class) {
 893		if (dev->class->pm) {
 894			pm_dev_dbg(dev, state, "class ");
 895			error = pm_op(dev, dev->class->pm, state);
 896		} else if (dev->class->suspend) {
 897			pm_dev_dbg(dev, state, "legacy class ");
 898			error = legacy_suspend(dev, state, dev->class->suspend);
 899		}
 900		if (error)
 901			goto End;
 902	}
 903
 904	if (dev->type) {
 905		if (dev->type->pm) {
 906			pm_dev_dbg(dev, state, "type ");
 907			error = pm_op(dev, dev->type->pm, state);
 908		}
 909		if (error)
 910			goto End;
 911	}
 912
 913	if (dev->bus) {
 914		if (dev->bus->pm) {
 915			pm_dev_dbg(dev, state, "");
 916			error = pm_op(dev, dev->bus->pm, state);
 917		} else if (dev->bus->suspend) {
 918			pm_dev_dbg(dev, state, "legacy ");
 919			error = legacy_suspend(dev, state, dev->bus->suspend);
 920		}
 921	}
 922
 923	if (!error)
 924		dev->power.status = DPM_OFF;
 925
 926 End:
 927	device_unlock(dev);
 928	complete_all(&dev->power.completion);
 929
 930	return error;
 931}
 932
 933static void async_suspend(void *data, async_cookie_t cookie)
 934{
 935	struct device *dev = (struct device *)data;
 936	int error;
 937
 938	error = __device_suspend(dev, pm_transition, true);
 939	if (error) {
 940		pm_dev_err(dev, pm_transition, " async", error);
 941		async_error = error;
 942	}
 943
 944	put_device(dev);
 945}
 946
 947static int device_suspend(struct device *dev)
 948{
 949	INIT_COMPLETION(dev->power.completion);
 950
 951	if (pm_async_enabled && dev->power.async_suspend) {
 952		get_device(dev);
 953		async_schedule(async_suspend, dev);
 954		return 0;
 955	}
 956
 957	return __device_suspend(dev, pm_transition, false);
 958}
 959
 960/**
 961 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 962 * @state: PM transition of the system being carried out.
 963 */
 964static int dpm_suspend(pm_message_t state)
 965{
 966	struct list_head list;
 967	ktime_t starttime = ktime_get();
 968	int error = 0;
 969
 970	INIT_LIST_HEAD(&list);
 971	mutex_lock(&dpm_list_mtx);
 972	pm_transition = state;
 973	async_error = 0;
 974	while (!list_empty(&dpm_list)) {
 975		struct device *dev = to_device(dpm_list.prev);
 976
 977		get_device(dev);
 978		mutex_unlock(&dpm_list_mtx);
 979
 980		dpm_drv_wdset(dev);
 981		error = device_suspend(dev);
 982		dpm_drv_wdclr(dev);
 983
 984		mutex_lock(&dpm_list_mtx);
 985		if (error) {
 986			pm_dev_err(dev, state, "", error);
 987			put_device(dev);
 988			break;
 989		}
 990		if (!list_empty(&dev->power.entry))
 991			list_move(&dev->power.entry, &list);
 992		put_device(dev);
 993		if (async_error)
 994			break;
 995	}
 996	list_splice(&list, dpm_list.prev);
 997	mutex_unlock(&dpm_list_mtx);
 998	async_synchronize_full();
 999	if (!error)
1000		error = async_error;
1001	if (!error)
1002		dpm_show_time(starttime, state, NULL);
1003	return error;
1004}
1005
1006/**
1007 * device_prepare - Prepare a device for system power transition.
1008 * @dev: Device to handle.
1009 * @state: PM transition of the system being carried out.
1010 *
1011 * Execute the ->prepare() callback(s) for given device.  No new children of the
1012 * device may be registered after this function has returned.
1013 */
1014static int device_prepare(struct device *dev, pm_message_t state)
1015{
1016	int error = 0;
1017
1018	device_lock(dev);
1019
1020	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
1021		pm_dev_dbg(dev, state, "preparing ");
1022		error = dev->bus->pm->prepare(dev);
1023		suspend_report_result(dev->bus->pm->prepare, error);
1024		if (error)
1025			goto End;
1026	}
1027
1028	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
1029		pm_dev_dbg(dev, state, "preparing type ");
1030		error = dev->type->pm->prepare(dev);
1031		suspend_report_result(dev->type->pm->prepare, error);
1032		if (error)
1033			goto End;
1034	}
1035
1036	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
1037		pm_dev_dbg(dev, state, "preparing class ");
1038		error = dev->class->pm->prepare(dev);
1039		suspend_report_result(dev->class->pm->prepare, error);
1040	}
1041 End:
1042	device_unlock(dev);
1043
1044	return error;
1045}
1046
1047/**
1048 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1049 * @state: PM transition of the system being carried out.
1050 *
1051 * Execute the ->prepare() callback(s) for all devices.
1052 */
1053static int dpm_prepare(pm_message_t state)
1054{
1055	struct list_head list;
1056	int error = 0;
1057
1058	INIT_LIST_HEAD(&list);
1059	mutex_lock(&dpm_list_mtx);
1060	transition_started = true;
1061	while (!list_empty(&dpm_list)) {
1062		struct device *dev = to_device(dpm_list.next);
1063
1064		get_device(dev);
1065		dev->power.status = DPM_PREPARING;
1066		mutex_unlock(&dpm_list_mtx);
1067
1068		pm_runtime_get_noresume(dev);
1069		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
1070			/* Wake-up requested during system sleep transition. */
1071			pm_runtime_put_sync(dev);
1072			error = -EBUSY;
1073		} else {
1074			error = device_prepare(dev, state);
1075		}
1076
1077		mutex_lock(&dpm_list_mtx);
1078		if (error) {
1079			dev->power.status = DPM_ON;
1080			if (error == -EAGAIN) {
1081				put_device(dev);
1082				error = 0;
1083				continue;
1084			}
1085			printk(KERN_ERR "PM: Failed to prepare device %s "
1086				"for power transition: error %d\n",
1087				kobject_name(&dev->kobj), error);
1088			put_device(dev);
1089			break;
1090		}
1091		dev->power.status = DPM_SUSPENDING;
1092		if (!list_empty(&dev->power.entry))
1093			list_move_tail(&dev->power.entry, &list);
1094		put_device(dev);
1095	}
1096	list_splice(&list, &dpm_list);
1097	mutex_unlock(&dpm_list_mtx);
1098	return error;
1099}
1100
1101/**
1102 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1103 * @state: PM transition of the system being carried out.
1104 *
1105 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1106 * callbacks for them.
1107 */
1108int dpm_suspend_start(pm_message_t state)
1109{
1110	int error;
1111
1112	might_sleep();
1113	error = dpm_prepare(state);
1114	if (!error)
1115		error = dpm_suspend(state);
1116	return error;
1117}
1118EXPORT_SYMBOL_GPL(dpm_suspend_start);
1119
1120void __suspend_report_result(const char *function, void *fn, int ret)
1121{
1122	if (ret)
1123		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1124}
1125EXPORT_SYMBOL_GPL(__suspend_report_result);
1126
1127/**
1128 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1129 * @dev: Device to wait for.
1130 * @subordinate: Device that needs to wait for @dev.
1131 */
1132void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1133{
1134	dpm_wait(dev, subordinate->power.async_suspend);
1135}
1136EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);