PageRenderTime 250ms CodeModel.GetById 166ms app.highlight 73ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/base/power/main.c

https://bitbucket.org/emiliolopez/linux
C | 1844 lines | 1251 code | 271 blank | 322 comment | 227 complexity | 8dea91c9fb4a3af327e2a9761d149883 MD5 | raw file
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
  30#include <linux/sched/debug.h>
  31#include <linux/async.h>
  32#include <linux/suspend.h>
  33#include <trace/events/power.h>
  34#include <linux/cpufreq.h>
  35#include <linux/cpuidle.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43/*
  44 * The entries in the dpm_list list are in a depth first order, simply
  45 * because children are guaranteed to be discovered after parents, and
  46 * are inserted at the back of the list on discovery.
  47 *
  48 * Since device_pm_add() may be called with a device lock held,
  49 * we must never try to acquire a device lock while holding
  50 * dpm_list_mutex.
  51 */
  52
  53LIST_HEAD(dpm_list);
  54static LIST_HEAD(dpm_prepared_list);
  55static LIST_HEAD(dpm_suspended_list);
  56static LIST_HEAD(dpm_late_early_list);
  57static LIST_HEAD(dpm_noirq_list);
  58
  59struct suspend_stats suspend_stats;
  60static DEFINE_MUTEX(dpm_list_mtx);
  61static pm_message_t pm_transition;
  62
  63static int async_error;
  64
  65static const char *pm_verb(int event)
  66{
  67	switch (event) {
  68	case PM_EVENT_SUSPEND:
  69		return "suspend";
  70	case PM_EVENT_RESUME:
  71		return "resume";
  72	case PM_EVENT_FREEZE:
  73		return "freeze";
  74	case PM_EVENT_QUIESCE:
  75		return "quiesce";
  76	case PM_EVENT_HIBERNATE:
  77		return "hibernate";
  78	case PM_EVENT_THAW:
  79		return "thaw";
  80	case PM_EVENT_RESTORE:
  81		return "restore";
  82	case PM_EVENT_RECOVER:
  83		return "recover";
  84	default:
  85		return "(unknown PM event)";
  86	}
  87}
  88
  89/**
  90 * device_pm_sleep_init - Initialize system suspend-related device fields.
  91 * @dev: Device object being initialized.
  92 */
  93void device_pm_sleep_init(struct device *dev)
  94{
  95	dev->power.is_prepared = false;
  96	dev->power.is_suspended = false;
  97	dev->power.is_noirq_suspended = false;
  98	dev->power.is_late_suspended = false;
  99	init_completion(&dev->power.completion);
 100	complete_all(&dev->power.completion);
 101	dev->power.wakeup = NULL;
 102	INIT_LIST_HEAD(&dev->power.entry);
 103}
 104
 105/**
 106 * device_pm_lock - Lock the list of active devices used by the PM core.
 107 */
 108void device_pm_lock(void)
 109{
 110	mutex_lock(&dpm_list_mtx);
 111}
 112
 113/**
 114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 115 */
 116void device_pm_unlock(void)
 117{
 118	mutex_unlock(&dpm_list_mtx);
 119}
 120
 121/**
 122 * device_pm_add - Add a device to the PM core's list of active devices.
 123 * @dev: Device to add to the list.
 124 */
 125void device_pm_add(struct device *dev)
 126{
 127	pr_debug("PM: Adding info for %s:%s\n",
 128		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 129	device_pm_check_callbacks(dev);
 130	mutex_lock(&dpm_list_mtx);
 131	if (dev->parent && dev->parent->power.is_prepared)
 132		dev_warn(dev, "parent %s should not be sleeping\n",
 133			dev_name(dev->parent));
 134	list_add_tail(&dev->power.entry, &dpm_list);
 135	dev->power.in_dpm_list = true;
 136	mutex_unlock(&dpm_list_mtx);
 137}
 138
 139/**
 140 * device_pm_remove - Remove a device from the PM core's list of active devices.
 141 * @dev: Device to be removed from the list.
 142 */
 143void device_pm_remove(struct device *dev)
 144{
 145	pr_debug("PM: Removing info for %s:%s\n",
 146		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 147	complete_all(&dev->power.completion);
 148	mutex_lock(&dpm_list_mtx);
 149	list_del_init(&dev->power.entry);
 150	dev->power.in_dpm_list = false;
 151	mutex_unlock(&dpm_list_mtx);
 152	device_wakeup_disable(dev);
 153	pm_runtime_remove(dev);
 154	device_pm_check_callbacks(dev);
 155}
 156
 157/**
 158 * device_pm_move_before - Move device in the PM core's list of active devices.
 159 * @deva: Device to move in dpm_list.
 160 * @devb: Device @deva should come before.
 161 */
 162void device_pm_move_before(struct device *deva, struct device *devb)
 163{
 164	pr_debug("PM: Moving %s:%s before %s:%s\n",
 165		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 166		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 167	/* Delete deva from dpm_list and reinsert before devb. */
 168	list_move_tail(&deva->power.entry, &devb->power.entry);
 169}
 170
 171/**
 172 * device_pm_move_after - Move device in the PM core's list of active devices.
 173 * @deva: Device to move in dpm_list.
 174 * @devb: Device @deva should come after.
 175 */
 176void device_pm_move_after(struct device *deva, struct device *devb)
 177{
 178	pr_debug("PM: Moving %s:%s after %s:%s\n",
 179		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 180		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 181	/* Delete deva from dpm_list and reinsert after devb. */
 182	list_move(&deva->power.entry, &devb->power.entry);
 183}
 184
 185/**
 186 * device_pm_move_last - Move device to end of the PM core's list of devices.
 187 * @dev: Device to move in dpm_list.
 188 */
 189void device_pm_move_last(struct device *dev)
 190{
 191	pr_debug("PM: Moving %s:%s to end of list\n",
 192		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 193	list_move_tail(&dev->power.entry, &dpm_list);
 194}
 195
 196static ktime_t initcall_debug_start(struct device *dev)
 197{
 198	ktime_t calltime = 0;
 199
 200	if (pm_print_times_enabled) {
 201		pr_info("calling  %s+ @ %i, parent: %s\n",
 202			dev_name(dev), task_pid_nr(current),
 203			dev->parent ? dev_name(dev->parent) : "none");
 204		calltime = ktime_get();
 205	}
 206
 207	return calltime;
 208}
 209
 210static void initcall_debug_report(struct device *dev, ktime_t calltime,
 211				  int error, pm_message_t state,
 212				  const char *info)
 213{
 214	ktime_t rettime;
 215	s64 nsecs;
 216
 217	rettime = ktime_get();
 218	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 219
 220	if (pm_print_times_enabled) {
 221		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 222			error, (unsigned long long)nsecs >> 10);
 223	}
 224}
 225
 226/**
 227 * dpm_wait - Wait for a PM operation to complete.
 228 * @dev: Device to wait for.
 229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 230 */
 231static void dpm_wait(struct device *dev, bool async)
 232{
 233	if (!dev)
 234		return;
 235
 236	if (async || (pm_async_enabled && dev->power.async_suspend))
 237		wait_for_completion(&dev->power.completion);
 238}
 239
 240static int dpm_wait_fn(struct device *dev, void *async_ptr)
 241{
 242	dpm_wait(dev, *((bool *)async_ptr));
 243	return 0;
 244}
 245
 246static void dpm_wait_for_children(struct device *dev, bool async)
 247{
 248       device_for_each_child(dev, &async, dpm_wait_fn);
 249}
 250
 251static void dpm_wait_for_suppliers(struct device *dev, bool async)
 252{
 253	struct device_link *link;
 254	int idx;
 255
 256	idx = device_links_read_lock();
 257
 258	/*
 259	 * If the supplier goes away right after we've checked the link to it,
 260	 * we'll wait for its completion to change the state, but that's fine,
 261	 * because the only things that will block as a result are the SRCU
 262	 * callbacks freeing the link objects for the links in the list we're
 263	 * walking.
 264	 */
 265	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
 266		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 267			dpm_wait(link->supplier, async);
 268
 269	device_links_read_unlock(idx);
 270}
 271
 272static void dpm_wait_for_superior(struct device *dev, bool async)
 273{
 274	dpm_wait(dev->parent, async);
 275	dpm_wait_for_suppliers(dev, async);
 276}
 277
 278static void dpm_wait_for_consumers(struct device *dev, bool async)
 279{
 280	struct device_link *link;
 281	int idx;
 282
 283	idx = device_links_read_lock();
 284
 285	/*
 286	 * The status of a device link can only be changed from "dormant" by a
 287	 * probe, but that cannot happen during system suspend/resume.  In
 288	 * theory it can change to "dormant" at that time, but then it is
 289	 * reasonable to wait for the target device anyway (eg. if it goes
 290	 * away, it's better to wait for it to go away completely and then
 291	 * continue instead of trying to continue in parallel with its
 292	 * unregistration).
 293	 */
 294	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
 295		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 296			dpm_wait(link->consumer, async);
 297
 298	device_links_read_unlock(idx);
 299}
 300
 301static void dpm_wait_for_subordinate(struct device *dev, bool async)
 302{
 303	dpm_wait_for_children(dev, async);
 304	dpm_wait_for_consumers(dev, async);
 305}
 306
 307/**
 308 * pm_op - Return the PM operation appropriate for given PM event.
 309 * @ops: PM operations to choose from.
 310 * @state: PM transition of the system being carried out.
 311 */
 312static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 313{
 314	switch (state.event) {
 315#ifdef CONFIG_SUSPEND
 316	case PM_EVENT_SUSPEND:
 317		return ops->suspend;
 318	case PM_EVENT_RESUME:
 319		return ops->resume;
 320#endif /* CONFIG_SUSPEND */
 321#ifdef CONFIG_HIBERNATE_CALLBACKS
 322	case PM_EVENT_FREEZE:
 323	case PM_EVENT_QUIESCE:
 324		return ops->freeze;
 325	case PM_EVENT_HIBERNATE:
 326		return ops->poweroff;
 327	case PM_EVENT_THAW:
 328	case PM_EVENT_RECOVER:
 329		return ops->thaw;
 330		break;
 331	case PM_EVENT_RESTORE:
 332		return ops->restore;
 333#endif /* CONFIG_HIBERNATE_CALLBACKS */
 334	}
 335
 336	return NULL;
 337}
 338
 339/**
 340 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 341 * @ops: PM operations to choose from.
 342 * @state: PM transition of the system being carried out.
 343 *
 344 * Runtime PM is disabled for @dev while this function is being executed.
 345 */
 346static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 347				      pm_message_t state)
 348{
 349	switch (state.event) {
 350#ifdef CONFIG_SUSPEND
 351	case PM_EVENT_SUSPEND:
 352		return ops->suspend_late;
 353	case PM_EVENT_RESUME:
 354		return ops->resume_early;
 355#endif /* CONFIG_SUSPEND */
 356#ifdef CONFIG_HIBERNATE_CALLBACKS
 357	case PM_EVENT_FREEZE:
 358	case PM_EVENT_QUIESCE:
 359		return ops->freeze_late;
 360	case PM_EVENT_HIBERNATE:
 361		return ops->poweroff_late;
 362	case PM_EVENT_THAW:
 363	case PM_EVENT_RECOVER:
 364		return ops->thaw_early;
 365	case PM_EVENT_RESTORE:
 366		return ops->restore_early;
 367#endif /* CONFIG_HIBERNATE_CALLBACKS */
 368	}
 369
 370	return NULL;
 371}
 372
 373/**
 374 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 375 * @ops: PM operations to choose from.
 376 * @state: PM transition of the system being carried out.
 377 *
 378 * The driver of @dev will not receive interrupts while this function is being
 379 * executed.
 380 */
 381static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 382{
 383	switch (state.event) {
 384#ifdef CONFIG_SUSPEND
 385	case PM_EVENT_SUSPEND:
 386		return ops->suspend_noirq;
 387	case PM_EVENT_RESUME:
 388		return ops->resume_noirq;
 389#endif /* CONFIG_SUSPEND */
 390#ifdef CONFIG_HIBERNATE_CALLBACKS
 391	case PM_EVENT_FREEZE:
 392	case PM_EVENT_QUIESCE:
 393		return ops->freeze_noirq;
 394	case PM_EVENT_HIBERNATE:
 395		return ops->poweroff_noirq;
 396	case PM_EVENT_THAW:
 397	case PM_EVENT_RECOVER:
 398		return ops->thaw_noirq;
 399	case PM_EVENT_RESTORE:
 400		return ops->restore_noirq;
 401#endif /* CONFIG_HIBERNATE_CALLBACKS */
 402	}
 403
 404	return NULL;
 405}
 406
 407static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 408{
 409	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 410		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 411		", may wakeup" : "");
 412}
 413
 414static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 415			int error)
 416{
 417	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 418		dev_name(dev), pm_verb(state.event), info, error);
 419}
 420
 421#ifdef CONFIG_PM_DEBUG
 422static void dpm_show_time(ktime_t starttime, pm_message_t state,
 423			  const char *info)
 424{
 425	ktime_t calltime;
 426	u64 usecs64;
 427	int usecs;
 428
 429	calltime = ktime_get();
 430	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 431	do_div(usecs64, NSEC_PER_USEC);
 432	usecs = usecs64;
 433	if (usecs == 0)
 434		usecs = 1;
 435	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 436		info ?: "", info ? " " : "", pm_verb(state.event),
 437		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 438}
 439#else
 440static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
 441				 const char *info) {}
 442#endif /* CONFIG_PM_DEBUG */
 443
 444static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 445			    pm_message_t state, const char *info)
 446{
 447	ktime_t calltime;
 448	int error;
 449
 450	if (!cb)
 451		return 0;
 452
 453	calltime = initcall_debug_start(dev);
 454
 455	pm_dev_dbg(dev, state, info);
 456	trace_device_pm_callback_start(dev, info, state.event);
 457	error = cb(dev);
 458	trace_device_pm_callback_end(dev, error);
 459	suspend_report_result(cb, error);
 460
 461	initcall_debug_report(dev, calltime, error, state, info);
 462
 463	return error;
 464}
 465
 466#ifdef CONFIG_DPM_WATCHDOG
 467struct dpm_watchdog {
 468	struct device		*dev;
 469	struct task_struct	*tsk;
 470	struct timer_list	timer;
 471};
 472
 473#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 474	struct dpm_watchdog wd
 475
 476/**
 477 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 478 * @data: Watchdog object address.
 479 *
 480 * Called when a driver has timed out suspending or resuming.
 481 * There's not much we can do here to recover so panic() to
 482 * capture a crash-dump in pstore.
 483 */
 484static void dpm_watchdog_handler(unsigned long data)
 485{
 486	struct dpm_watchdog *wd = (void *)data;
 487
 488	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 489	show_stack(wd->tsk, NULL);
 490	panic("%s %s: unrecoverable failure\n",
 491		dev_driver_string(wd->dev), dev_name(wd->dev));
 492}
 493
 494/**
 495 * dpm_watchdog_set - Enable pm watchdog for given device.
 496 * @wd: Watchdog. Must be allocated on the stack.
 497 * @dev: Device to handle.
 498 */
 499static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 500{
 501	struct timer_list *timer = &wd->timer;
 502
 503	wd->dev = dev;
 504	wd->tsk = current;
 505
 506	init_timer_on_stack(timer);
 507	/* use same timeout value for both suspend and resume */
 508	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 509	timer->function = dpm_watchdog_handler;
 510	timer->data = (unsigned long)wd;
 511	add_timer(timer);
 512}
 513
 514/**
 515 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 516 * @wd: Watchdog to disable.
 517 */
 518static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 519{
 520	struct timer_list *timer = &wd->timer;
 521
 522	del_timer_sync(timer);
 523	destroy_timer_on_stack(timer);
 524}
 525#else
 526#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 527#define dpm_watchdog_set(x, y)
 528#define dpm_watchdog_clear(x)
 529#endif
 530
 531/*------------------------- Resume routines -------------------------*/
 532
 533/**
 534 * device_resume_noirq - Execute an "early resume" callback for given device.
 535 * @dev: Device to handle.
 536 * @state: PM transition of the system being carried out.
 537 * @async: If true, the device is being resumed asynchronously.
 538 *
 539 * The driver of @dev will not receive interrupts while this function is being
 540 * executed.
 541 */
 542static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 543{
 544	pm_callback_t callback = NULL;
 545	const char *info = NULL;
 546	int error = 0;
 547
 548	TRACE_DEVICE(dev);
 549	TRACE_RESUME(0);
 550
 551	if (dev->power.syscore || dev->power.direct_complete)
 552		goto Out;
 553
 554	if (!dev->power.is_noirq_suspended)
 555		goto Out;
 556
 557	dpm_wait_for_superior(dev, async);
 558
 559	if (dev->pm_domain) {
 560		info = "noirq power domain ";
 561		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 562	} else if (dev->type && dev->type->pm) {
 563		info = "noirq type ";
 564		callback = pm_noirq_op(dev->type->pm, state);
 565	} else if (dev->class && dev->class->pm) {
 566		info = "noirq class ";
 567		callback = pm_noirq_op(dev->class->pm, state);
 568	} else if (dev->bus && dev->bus->pm) {
 569		info = "noirq bus ";
 570		callback = pm_noirq_op(dev->bus->pm, state);
 571	}
 572
 573	if (!callback && dev->driver && dev->driver->pm) {
 574		info = "noirq driver ";
 575		callback = pm_noirq_op(dev->driver->pm, state);
 576	}
 577
 578	error = dpm_run_callback(callback, dev, state, info);
 579	dev->power.is_noirq_suspended = false;
 580
 581 Out:
 582	complete_all(&dev->power.completion);
 583	TRACE_RESUME(error);
 584	return error;
 585}
 586
 587static bool is_async(struct device *dev)
 588{
 589	return dev->power.async_suspend && pm_async_enabled
 590		&& !pm_trace_is_enabled();
 591}
 592
 593static void async_resume_noirq(void *data, async_cookie_t cookie)
 594{
 595	struct device *dev = (struct device *)data;
 596	int error;
 597
 598	error = device_resume_noirq(dev, pm_transition, true);
 599	if (error)
 600		pm_dev_err(dev, pm_transition, " async", error);
 601
 602	put_device(dev);
 603}
 604
 605/**
 606 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 607 * @state: PM transition of the system being carried out.
 608 *
 609 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
 610 * enable device drivers to receive interrupts.
 611 */
 612void dpm_resume_noirq(pm_message_t state)
 613{
 614	struct device *dev;
 615	ktime_t starttime = ktime_get();
 616
 617	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 618	mutex_lock(&dpm_list_mtx);
 619	pm_transition = state;
 620
 621	/*
 622	 * Advanced the async threads upfront,
 623	 * in case the starting of async threads is
 624	 * delayed by non-async resuming devices.
 625	 */
 626	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
 627		reinit_completion(&dev->power.completion);
 628		if (is_async(dev)) {
 629			get_device(dev);
 630			async_schedule(async_resume_noirq, dev);
 631		}
 632	}
 633
 634	while (!list_empty(&dpm_noirq_list)) {
 635		dev = to_device(dpm_noirq_list.next);
 636		get_device(dev);
 637		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 638		mutex_unlock(&dpm_list_mtx);
 639
 640		if (!is_async(dev)) {
 641			int error;
 642
 643			error = device_resume_noirq(dev, state, false);
 644			if (error) {
 645				suspend_stats.failed_resume_noirq++;
 646				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 647				dpm_save_failed_dev(dev_name(dev));
 648				pm_dev_err(dev, state, " noirq", error);
 649			}
 650		}
 651
 652		mutex_lock(&dpm_list_mtx);
 653		put_device(dev);
 654	}
 655	mutex_unlock(&dpm_list_mtx);
 656	async_synchronize_full();
 657	dpm_show_time(starttime, state, "noirq");
 658	resume_device_irqs();
 659	device_wakeup_disarm_wake_irqs();
 660	cpuidle_resume();
 661	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 662}
 663
 664/**
 665 * device_resume_early - Execute an "early resume" callback for given device.
 666 * @dev: Device to handle.
 667 * @state: PM transition of the system being carried out.
 668 * @async: If true, the device is being resumed asynchronously.
 669 *
 670 * Runtime PM is disabled for @dev while this function is being executed.
 671 */
 672static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 673{
 674	pm_callback_t callback = NULL;
 675	const char *info = NULL;
 676	int error = 0;
 677
 678	TRACE_DEVICE(dev);
 679	TRACE_RESUME(0);
 680
 681	if (dev->power.syscore || dev->power.direct_complete)
 682		goto Out;
 683
 684	if (!dev->power.is_late_suspended)
 685		goto Out;
 686
 687	dpm_wait_for_superior(dev, async);
 688
 689	if (dev->pm_domain) {
 690		info = "early power domain ";
 691		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 692	} else if (dev->type && dev->type->pm) {
 693		info = "early type ";
 694		callback = pm_late_early_op(dev->type->pm, state);
 695	} else if (dev->class && dev->class->pm) {
 696		info = "early class ";
 697		callback = pm_late_early_op(dev->class->pm, state);
 698	} else if (dev->bus && dev->bus->pm) {
 699		info = "early bus ";
 700		callback = pm_late_early_op(dev->bus->pm, state);
 701	}
 702
 703	if (!callback && dev->driver && dev->driver->pm) {
 704		info = "early driver ";
 705		callback = pm_late_early_op(dev->driver->pm, state);
 706	}
 707
 708	error = dpm_run_callback(callback, dev, state, info);
 709	dev->power.is_late_suspended = false;
 710
 711 Out:
 712	TRACE_RESUME(error);
 713
 714	pm_runtime_enable(dev);
 715	complete_all(&dev->power.completion);
 716	return error;
 717}
 718
 719static void async_resume_early(void *data, async_cookie_t cookie)
 720{
 721	struct device *dev = (struct device *)data;
 722	int error;
 723
 724	error = device_resume_early(dev, pm_transition, true);
 725	if (error)
 726		pm_dev_err(dev, pm_transition, " async", error);
 727
 728	put_device(dev);
 729}
 730
 731/**
 732 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 733 * @state: PM transition of the system being carried out.
 734 */
 735void dpm_resume_early(pm_message_t state)
 736{
 737	struct device *dev;
 738	ktime_t starttime = ktime_get();
 739
 740	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 741	mutex_lock(&dpm_list_mtx);
 742	pm_transition = state;
 743
 744	/*
 745	 * Advanced the async threads upfront,
 746	 * in case the starting of async threads is
 747	 * delayed by non-async resuming devices.
 748	 */
 749	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
 750		reinit_completion(&dev->power.completion);
 751		if (is_async(dev)) {
 752			get_device(dev);
 753			async_schedule(async_resume_early, dev);
 754		}
 755	}
 756
 757	while (!list_empty(&dpm_late_early_list)) {
 758		dev = to_device(dpm_late_early_list.next);
 759		get_device(dev);
 760		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 761		mutex_unlock(&dpm_list_mtx);
 762
 763		if (!is_async(dev)) {
 764			int error;
 765
 766			error = device_resume_early(dev, state, false);
 767			if (error) {
 768				suspend_stats.failed_resume_early++;
 769				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 770				dpm_save_failed_dev(dev_name(dev));
 771				pm_dev_err(dev, state, " early", error);
 772			}
 773		}
 774		mutex_lock(&dpm_list_mtx);
 775		put_device(dev);
 776	}
 777	mutex_unlock(&dpm_list_mtx);
 778	async_synchronize_full();
 779	dpm_show_time(starttime, state, "early");
 780	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 781}
 782
 783/**
 784 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 785 * @state: PM transition of the system being carried out.
 786 */
 787void dpm_resume_start(pm_message_t state)
 788{
 789	dpm_resume_noirq(state);
 790	dpm_resume_early(state);
 791}
 792EXPORT_SYMBOL_GPL(dpm_resume_start);
 793
 794/**
 795 * device_resume - Execute "resume" callbacks for given device.
 796 * @dev: Device to handle.
 797 * @state: PM transition of the system being carried out.
 798 * @async: If true, the device is being resumed asynchronously.
 799 */
 800static int device_resume(struct device *dev, pm_message_t state, bool async)
 801{
 802	pm_callback_t callback = NULL;
 803	const char *info = NULL;
 804	int error = 0;
 805	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 806
 807	TRACE_DEVICE(dev);
 808	TRACE_RESUME(0);
 809
 810	if (dev->power.syscore)
 811		goto Complete;
 812
 813	if (dev->power.direct_complete) {
 814		/* Match the pm_runtime_disable() in __device_suspend(). */
 815		pm_runtime_enable(dev);
 816		goto Complete;
 817	}
 818
 819	dpm_wait_for_superior(dev, async);
 820	dpm_watchdog_set(&wd, dev);
 821	device_lock(dev);
 822
 823	/*
 824	 * This is a fib.  But we'll allow new children to be added below
 825	 * a resumed device, even if the device hasn't been completed yet.
 826	 */
 827	dev->power.is_prepared = false;
 828
 829	if (!dev->power.is_suspended)
 830		goto Unlock;
 831
 832	if (dev->pm_domain) {
 833		info = "power domain ";
 834		callback = pm_op(&dev->pm_domain->ops, state);
 835		goto Driver;
 836	}
 837
 838	if (dev->type && dev->type->pm) {
 839		info = "type ";
 840		callback = pm_op(dev->type->pm, state);
 841		goto Driver;
 842	}
 843
 844	if (dev->class) {
 845		if (dev->class->pm) {
 846			info = "class ";
 847			callback = pm_op(dev->class->pm, state);
 848			goto Driver;
 849		} else if (dev->class->resume) {
 850			info = "legacy class ";
 851			callback = dev->class->resume;
 852			goto End;
 853		}
 854	}
 855
 856	if (dev->bus) {
 857		if (dev->bus->pm) {
 858			info = "bus ";
 859			callback = pm_op(dev->bus->pm, state);
 860		} else if (dev->bus->resume) {
 861			info = "legacy bus ";
 862			callback = dev->bus->resume;
 863			goto End;
 864		}
 865	}
 866
 867 Driver:
 868	if (!callback && dev->driver && dev->driver->pm) {
 869		info = "driver ";
 870		callback = pm_op(dev->driver->pm, state);
 871	}
 872
 873 End:
 874	error = dpm_run_callback(callback, dev, state, info);
 875	dev->power.is_suspended = false;
 876
 877 Unlock:
 878	device_unlock(dev);
 879	dpm_watchdog_clear(&wd);
 880
 881 Complete:
 882	complete_all(&dev->power.completion);
 883
 884	TRACE_RESUME(error);
 885
 886	return error;
 887}
 888
 889static void async_resume(void *data, async_cookie_t cookie)
 890{
 891	struct device *dev = (struct device *)data;
 892	int error;
 893
 894	error = device_resume(dev, pm_transition, true);
 895	if (error)
 896		pm_dev_err(dev, pm_transition, " async", error);
 897	put_device(dev);
 898}
 899
 900/**
 901 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 902 * @state: PM transition of the system being carried out.
 903 *
 904 * Execute the appropriate "resume" callback for all devices whose status
 905 * indicates that they are suspended.
 906 */
 907void dpm_resume(pm_message_t state)
 908{
 909	struct device *dev;
 910	ktime_t starttime = ktime_get();
 911
 912	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
 913	might_sleep();
 914
 915	mutex_lock(&dpm_list_mtx);
 916	pm_transition = state;
 917	async_error = 0;
 918
 919	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 920		reinit_completion(&dev->power.completion);
 921		if (is_async(dev)) {
 922			get_device(dev);
 923			async_schedule(async_resume, dev);
 924		}
 925	}
 926
 927	while (!list_empty(&dpm_suspended_list)) {
 928		dev = to_device(dpm_suspended_list.next);
 929		get_device(dev);
 930		if (!is_async(dev)) {
 931			int error;
 932
 933			mutex_unlock(&dpm_list_mtx);
 934
 935			error = device_resume(dev, state, false);
 936			if (error) {
 937				suspend_stats.failed_resume++;
 938				dpm_save_failed_step(SUSPEND_RESUME);
 939				dpm_save_failed_dev(dev_name(dev));
 940				pm_dev_err(dev, state, "", error);
 941			}
 942
 943			mutex_lock(&dpm_list_mtx);
 944		}
 945		if (!list_empty(&dev->power.entry))
 946			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 947		put_device(dev);
 948	}
 949	mutex_unlock(&dpm_list_mtx);
 950	async_synchronize_full();
 951	dpm_show_time(starttime, state, NULL);
 952
 953	cpufreq_resume();
 954	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
 955}
 956
 957/**
 958 * device_complete - Complete a PM transition for given device.
 959 * @dev: Device to handle.
 960 * @state: PM transition of the system being carried out.
 961 */
 962static void device_complete(struct device *dev, pm_message_t state)
 963{
 964	void (*callback)(struct device *) = NULL;
 965	const char *info = NULL;
 966
 967	if (dev->power.syscore)
 968		return;
 969
 970	device_lock(dev);
 971
 972	if (dev->pm_domain) {
 973		info = "completing power domain ";
 974		callback = dev->pm_domain->ops.complete;
 975	} else if (dev->type && dev->type->pm) {
 976		info = "completing type ";
 977		callback = dev->type->pm->complete;
 978	} else if (dev->class && dev->class->pm) {
 979		info = "completing class ";
 980		callback = dev->class->pm->complete;
 981	} else if (dev->bus && dev->bus->pm) {
 982		info = "completing bus ";
 983		callback = dev->bus->pm->complete;
 984	}
 985
 986	if (!callback && dev->driver && dev->driver->pm) {
 987		info = "completing driver ";
 988		callback = dev->driver->pm->complete;
 989	}
 990
 991	if (callback) {
 992		pm_dev_dbg(dev, state, info);
 993		callback(dev);
 994	}
 995
 996	device_unlock(dev);
 997
 998	pm_runtime_put(dev);
 999}
1000
1001/**
1002 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1003 * @state: PM transition of the system being carried out.
1004 *
1005 * Execute the ->complete() callbacks for all devices whose PM status is not
1006 * DPM_ON (this allows new devices to be registered).
1007 */
1008void dpm_complete(pm_message_t state)
1009{
1010	struct list_head list;
1011
1012	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1013	might_sleep();
1014
1015	INIT_LIST_HEAD(&list);
1016	mutex_lock(&dpm_list_mtx);
1017	while (!list_empty(&dpm_prepared_list)) {
1018		struct device *dev = to_device(dpm_prepared_list.prev);
1019
1020		get_device(dev);
1021		dev->power.is_prepared = false;
1022		list_move(&dev->power.entry, &list);
1023		mutex_unlock(&dpm_list_mtx);
1024
1025		trace_device_pm_callback_start(dev, "", state.event);
1026		device_complete(dev, state);
1027		trace_device_pm_callback_end(dev, 0);
1028
1029		mutex_lock(&dpm_list_mtx);
1030		put_device(dev);
1031	}
1032	list_splice(&list, &dpm_list);
1033	mutex_unlock(&dpm_list_mtx);
1034
1035	/* Allow device probing and trigger re-probing of deferred devices */
1036	device_unblock_probing();
1037	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1038}
1039
1040/**
1041 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1042 * @state: PM transition of the system being carried out.
1043 *
1044 * Execute "resume" callbacks for all devices and complete the PM transition of
1045 * the system.
1046 */
1047void dpm_resume_end(pm_message_t state)
1048{
1049	dpm_resume(state);
1050	dpm_complete(state);
1051}
1052EXPORT_SYMBOL_GPL(dpm_resume_end);
1053
1054
1055/*------------------------- Suspend routines -------------------------*/
1056
1057/**
1058 * resume_event - Return a "resume" message for given "suspend" sleep state.
1059 * @sleep_state: PM message representing a sleep state.
1060 *
1061 * Return a PM message representing the resume event corresponding to given
1062 * sleep state.
1063 */
1064static pm_message_t resume_event(pm_message_t sleep_state)
1065{
1066	switch (sleep_state.event) {
1067	case PM_EVENT_SUSPEND:
1068		return PMSG_RESUME;
1069	case PM_EVENT_FREEZE:
1070	case PM_EVENT_QUIESCE:
1071		return PMSG_RECOVER;
1072	case PM_EVENT_HIBERNATE:
1073		return PMSG_RESTORE;
1074	}
1075	return PMSG_ON;
1076}
1077
1078/**
1079 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1080 * @dev: Device to handle.
1081 * @state: PM transition of the system being carried out.
1082 * @async: If true, the device is being suspended asynchronously.
1083 *
1084 * The driver of @dev will not receive interrupts while this function is being
1085 * executed.
1086 */
1087static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1088{
1089	pm_callback_t callback = NULL;
1090	const char *info = NULL;
1091	int error = 0;
1092
1093	TRACE_DEVICE(dev);
1094	TRACE_SUSPEND(0);
1095
1096	dpm_wait_for_subordinate(dev, async);
1097
1098	if (async_error)
1099		goto Complete;
1100
1101	if (dev->power.syscore || dev->power.direct_complete)
1102		goto Complete;
1103
1104	if (dev->pm_domain) {
1105		info = "noirq power domain ";
1106		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1107	} else if (dev->type && dev->type->pm) {
1108		info = "noirq type ";
1109		callback = pm_noirq_op(dev->type->pm, state);
1110	} else if (dev->class && dev->class->pm) {
1111		info = "noirq class ";
1112		callback = pm_noirq_op(dev->class->pm, state);
1113	} else if (dev->bus && dev->bus->pm) {
1114		info = "noirq bus ";
1115		callback = pm_noirq_op(dev->bus->pm, state);
1116	}
1117
1118	if (!callback && dev->driver && dev->driver->pm) {
1119		info = "noirq driver ";
1120		callback = pm_noirq_op(dev->driver->pm, state);
1121	}
1122
1123	error = dpm_run_callback(callback, dev, state, info);
1124	if (!error)
1125		dev->power.is_noirq_suspended = true;
1126	else
1127		async_error = error;
1128
1129Complete:
1130	complete_all(&dev->power.completion);
1131	TRACE_SUSPEND(error);
1132	return error;
1133}
1134
1135static void async_suspend_noirq(void *data, async_cookie_t cookie)
1136{
1137	struct device *dev = (struct device *)data;
1138	int error;
1139
1140	error = __device_suspend_noirq(dev, pm_transition, true);
1141	if (error) {
1142		dpm_save_failed_dev(dev_name(dev));
1143		pm_dev_err(dev, pm_transition, " async", error);
1144	}
1145
1146	put_device(dev);
1147}
1148
1149static int device_suspend_noirq(struct device *dev)
1150{
1151	reinit_completion(&dev->power.completion);
1152
1153	if (is_async(dev)) {
1154		get_device(dev);
1155		async_schedule(async_suspend_noirq, dev);
1156		return 0;
1157	}
1158	return __device_suspend_noirq(dev, pm_transition, false);
1159}
1160
1161/**
1162 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1163 * @state: PM transition of the system being carried out.
1164 *
1165 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1166 * handlers for all non-sysdev devices.
1167 */
1168int dpm_suspend_noirq(pm_message_t state)
1169{
1170	ktime_t starttime = ktime_get();
1171	int error = 0;
1172
1173	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1174	cpuidle_pause();
1175	device_wakeup_arm_wake_irqs();
1176	suspend_device_irqs();
1177	mutex_lock(&dpm_list_mtx);
1178	pm_transition = state;
1179	async_error = 0;
1180
1181	while (!list_empty(&dpm_late_early_list)) {
1182		struct device *dev = to_device(dpm_late_early_list.prev);
1183
1184		get_device(dev);
1185		mutex_unlock(&dpm_list_mtx);
1186
1187		error = device_suspend_noirq(dev);
1188
1189		mutex_lock(&dpm_list_mtx);
1190		if (error) {
1191			pm_dev_err(dev, state, " noirq", error);
1192			dpm_save_failed_dev(dev_name(dev));
1193			put_device(dev);
1194			break;
1195		}
1196		if (!list_empty(&dev->power.entry))
1197			list_move(&dev->power.entry, &dpm_noirq_list);
1198		put_device(dev);
1199
1200		if (async_error)
1201			break;
1202	}
1203	mutex_unlock(&dpm_list_mtx);
1204	async_synchronize_full();
1205	if (!error)
1206		error = async_error;
1207
1208	if (error) {
1209		suspend_stats.failed_suspend_noirq++;
1210		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1211		dpm_resume_noirq(resume_event(state));
1212	} else {
1213		dpm_show_time(starttime, state, "noirq");
1214	}
1215	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1216	return error;
1217}
1218
1219/**
1220 * device_suspend_late - Execute a "late suspend" callback for given device.
1221 * @dev: Device to handle.
1222 * @state: PM transition of the system being carried out.
1223 * @async: If true, the device is being suspended asynchronously.
1224 *
1225 * Runtime PM is disabled for @dev while this function is being executed.
1226 */
1227static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1228{
1229	pm_callback_t callback = NULL;
1230	const char *info = NULL;
1231	int error = 0;
1232
1233	TRACE_DEVICE(dev);
1234	TRACE_SUSPEND(0);
1235
1236	__pm_runtime_disable(dev, false);
1237
1238	dpm_wait_for_subordinate(dev, async);
1239
1240	if (async_error)
1241		goto Complete;
1242
1243	if (pm_wakeup_pending()) {
1244		async_error = -EBUSY;
1245		goto Complete;
1246	}
1247
1248	if (dev->power.syscore || dev->power.direct_complete)
1249		goto Complete;
1250
1251	if (dev->pm_domain) {
1252		info = "late power domain ";
1253		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1254	} else if (dev->type && dev->type->pm) {
1255		info = "late type ";
1256		callback = pm_late_early_op(dev->type->pm, state);
1257	} else if (dev->class && dev->class->pm) {
1258		info = "late class ";
1259		callback = pm_late_early_op(dev->class->pm, state);
1260	} else if (dev->bus && dev->bus->pm) {
1261		info = "late bus ";
1262		callback = pm_late_early_op(dev->bus->pm, state);
1263	}
1264
1265	if (!callback && dev->driver && dev->driver->pm) {
1266		info = "late driver ";
1267		callback = pm_late_early_op(dev->driver->pm, state);
1268	}
1269
1270	error = dpm_run_callback(callback, dev, state, info);
1271	if (!error)
1272		dev->power.is_late_suspended = true;
1273	else
1274		async_error = error;
1275
1276Complete:
1277	TRACE_SUSPEND(error);
1278	complete_all(&dev->power.completion);
1279	return error;
1280}
1281
1282static void async_suspend_late(void *data, async_cookie_t cookie)
1283{
1284	struct device *dev = (struct device *)data;
1285	int error;
1286
1287	error = __device_suspend_late(dev, pm_transition, true);
1288	if (error) {
1289		dpm_save_failed_dev(dev_name(dev));
1290		pm_dev_err(dev, pm_transition, " async", error);
1291	}
1292	put_device(dev);
1293}
1294
1295static int device_suspend_late(struct device *dev)
1296{
1297	reinit_completion(&dev->power.completion);
1298
1299	if (is_async(dev)) {
1300		get_device(dev);
1301		async_schedule(async_suspend_late, dev);
1302		return 0;
1303	}
1304
1305	return __device_suspend_late(dev, pm_transition, false);
1306}
1307
1308/**
1309 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1310 * @state: PM transition of the system being carried out.
1311 */
1312int dpm_suspend_late(pm_message_t state)
1313{
1314	ktime_t starttime = ktime_get();
1315	int error = 0;
1316
1317	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1318	mutex_lock(&dpm_list_mtx);
1319	pm_transition = state;
1320	async_error = 0;
1321
1322	while (!list_empty(&dpm_suspended_list)) {
1323		struct device *dev = to_device(dpm_suspended_list.prev);
1324
1325		get_device(dev);
1326		mutex_unlock(&dpm_list_mtx);
1327
1328		error = device_suspend_late(dev);
1329
1330		mutex_lock(&dpm_list_mtx);
1331		if (!list_empty(&dev->power.entry))
1332			list_move(&dev->power.entry, &dpm_late_early_list);
1333
1334		if (error) {
1335			pm_dev_err(dev, state, " late", error);
1336			dpm_save_failed_dev(dev_name(dev));
1337			put_device(dev);
1338			break;
1339		}
1340		put_device(dev);
1341
1342		if (async_error)
1343			break;
1344	}
1345	mutex_unlock(&dpm_list_mtx);
1346	async_synchronize_full();
1347	if (!error)
1348		error = async_error;
1349	if (error) {
1350		suspend_stats.failed_suspend_late++;
1351		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1352		dpm_resume_early(resume_event(state));
1353	} else {
1354		dpm_show_time(starttime, state, "late");
1355	}
1356	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1357	return error;
1358}
1359
1360/**
1361 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1362 * @state: PM transition of the system being carried out.
1363 */
1364int dpm_suspend_end(pm_message_t state)
1365{
1366	int error = dpm_suspend_late(state);
1367	if (error)
1368		return error;
1369
1370	error = dpm_suspend_noirq(state);
1371	if (error) {
1372		dpm_resume_early(resume_event(state));
1373		return error;
1374	}
1375
1376	return 0;
1377}
1378EXPORT_SYMBOL_GPL(dpm_suspend_end);
1379
1380/**
1381 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1382 * @dev: Device to suspend.
1383 * @state: PM transition of the system being carried out.
1384 * @cb: Suspend callback to execute.
1385 * @info: string description of caller.
1386 */
1387static int legacy_suspend(struct device *dev, pm_message_t state,
1388			  int (*cb)(struct device *dev, pm_message_t state),
1389			  const char *info)
1390{
1391	int error;
1392	ktime_t calltime;
1393
1394	calltime = initcall_debug_start(dev);
1395
1396	trace_device_pm_callback_start(dev, info, state.event);
1397	error = cb(dev, state);
1398	trace_device_pm_callback_end(dev, error);
1399	suspend_report_result(cb, error);
1400
1401	initcall_debug_report(dev, calltime, error, state, info);
1402
1403	return error;
1404}
1405
1406static void dpm_clear_suppliers_direct_complete(struct device *dev)
1407{
1408	struct device_link *link;
1409	int idx;
1410
1411	idx = device_links_read_lock();
1412
1413	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1414		spin_lock_irq(&link->supplier->power.lock);
1415		link->supplier->power.direct_complete = false;
1416		spin_unlock_irq(&link->supplier->power.lock);
1417	}
1418
1419	device_links_read_unlock(idx);
1420}
1421
1422/**
1423 * device_suspend - Execute "suspend" callbacks for given device.
1424 * @dev: Device to handle.
1425 * @state: PM transition of the system being carried out.
1426 * @async: If true, the device is being suspended asynchronously.
1427 */
1428static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1429{
1430	pm_callback_t callback = NULL;
1431	const char *info = NULL;
1432	int error = 0;
1433	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1434
1435	TRACE_DEVICE(dev);
1436	TRACE_SUSPEND(0);
1437
1438	dpm_wait_for_subordinate(dev, async);
1439
1440	if (async_error)
1441		goto Complete;
1442
1443	/*
1444	 * If a device configured to wake up the system from sleep states
1445	 * has been suspended at run time and there's a resume request pending
1446	 * for it, this is equivalent to the device signaling wakeup, so the
1447	 * system suspend operation should be aborted.
1448	 */
1449	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1450		pm_wakeup_event(dev, 0);
1451
1452	if (pm_wakeup_pending()) {
1453		async_error = -EBUSY;
1454		goto Complete;
1455	}
1456
1457	if (dev->power.syscore)
1458		goto Complete;
1459
1460	if (dev->power.direct_complete) {
1461		if (pm_runtime_status_suspended(dev)) {
1462			pm_runtime_disable(dev);
1463			if (pm_runtime_status_suspended(dev))
1464				goto Complete;
1465
1466			pm_runtime_enable(dev);
1467		}
1468		dev->power.direct_complete = false;
1469	}
1470
1471	dpm_watchdog_set(&wd, dev);
1472	device_lock(dev);
1473
1474	if (dev->pm_domain) {
1475		info = "power domain ";
1476		callback = pm_op(&dev->pm_domain->ops, state);
1477		goto Run;
1478	}
1479
1480	if (dev->type && dev->type->pm) {
1481		info = "type ";
1482		callback = pm_op(dev->type->pm, state);
1483		goto Run;
1484	}
1485
1486	if (dev->class) {
1487		if (dev->class->pm) {
1488			info = "class ";
1489			callback = pm_op(dev->class->pm, state);
1490			goto Run;
1491		} else if (dev->class->suspend) {
1492			pm_dev_dbg(dev, state, "legacy class ");
1493			error = legacy_suspend(dev, state, dev->class->suspend,
1494						"legacy class ");
1495			goto End;
1496		}
1497	}
1498
1499	if (dev->bus) {
1500		if (dev->bus->pm) {
1501			info = "bus ";
1502			callback = pm_op(dev->bus->pm, state);
1503		} else if (dev->bus->suspend) {
1504			pm_dev_dbg(dev, state, "legacy bus ");
1505			error = legacy_suspend(dev, state, dev->bus->suspend,
1506						"legacy bus ");
1507			goto End;
1508		}
1509	}
1510
1511 Run:
1512	if (!callback && dev->driver && dev->driver->pm) {
1513		info = "driver ";
1514		callback = pm_op(dev->driver->pm, state);
1515	}
1516
1517	error = dpm_run_callback(callback, dev, state, info);
1518
1519 End:
1520	if (!error) {
1521		struct device *parent = dev->parent;
1522
1523		dev->power.is_suspended = true;
1524		if (parent) {
1525			spin_lock_irq(&parent->power.lock);
1526
1527			dev->parent->power.direct_complete = false;
1528			if (dev->power.wakeup_path
1529			    && !dev->parent->power.ignore_children)
1530				dev->parent->power.wakeup_path = true;
1531
1532			spin_unlock_irq(&parent->power.lock);
1533		}
1534		dpm_clear_suppliers_direct_complete(dev);
1535	}
1536
1537	device_unlock(dev);
1538	dpm_watchdog_clear(&wd);
1539
1540 Complete:
1541	if (error)
1542		async_error = error;
1543
1544	complete_all(&dev->power.completion);
1545	TRACE_SUSPEND(error);
1546	return error;
1547}
1548
1549static void async_suspend(void *data, async_cookie_t cookie)
1550{
1551	struct device *dev = (struct device *)data;
1552	int error;
1553
1554	error = __device_suspend(dev, pm_transition, true);
1555	if (error) {
1556		dpm_save_failed_dev(dev_name(dev));
1557		pm_dev_err(dev, pm_transition, " async", error);
1558	}
1559
1560	put_device(dev);
1561}
1562
1563static int device_suspend(struct device *dev)
1564{
1565	reinit_completion(&dev->power.completion);
1566
1567	if (is_async(dev)) {
1568		get_device(dev);
1569		async_schedule(async_suspend, dev);
1570		return 0;
1571	}
1572
1573	return __device_suspend(dev, pm_transition, false);
1574}
1575
1576/**
1577 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1578 * @state: PM transition of the system being carried out.
1579 */
1580int dpm_suspend(pm_message_t state)
1581{
1582	ktime_t starttime = ktime_get();
1583	int error = 0;
1584
1585	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1586	might_sleep();
1587
1588	cpufreq_suspend();
1589
1590	mutex_lock(&dpm_list_mtx);
1591	pm_transition = state;
1592	async_error = 0;
1593	while (!list_empty(&dpm_prepared_list)) {
1594		struct device *dev = to_device(dpm_prepared_list.prev);
1595
1596		get_device(dev);
1597		mutex_unlock(&dpm_list_mtx);
1598
1599		error = device_suspend(dev);
1600
1601		mutex_lock(&dpm_list_mtx);
1602		if (error) {
1603			pm_dev_err(dev, state, "", error);
1604			dpm_save_failed_dev(dev_name(dev));
1605			put_device(dev);
1606			break;
1607		}
1608		if (!list_empty(&dev->power.entry))
1609			list_move(&dev->power.entry, &dpm_suspended_list);
1610		put_device(dev);
1611		if (async_error)
1612			break;
1613	}
1614	mutex_unlock(&dpm_list_mtx);
1615	async_synchronize_full();
1616	if (!error)
1617		error = async_error;
1618	if (error) {
1619		suspend_stats.failed_suspend++;
1620		dpm_save_failed_step(SUSPEND_SUSPEND);
1621	} else
1622		dpm_show_time(starttime, state, NULL);
1623	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1624	return error;
1625}
1626
1627/**
1628 * device_prepare - Prepare a device for system power transition.
1629 * @dev: Device to handle.
1630 * @state: PM transition of the system being carried out.
1631 *
1632 * Execute the ->prepare() callback(s) for given device.  No new children of the
1633 * device may be registered after this function has returned.
1634 */
1635static int device_prepare(struct device *dev, pm_message_t state)
1636{
1637	int (*callback)(struct device *) = NULL;
1638	int ret = 0;
1639
1640	if (dev->power.syscore)
1641		return 0;
1642
1643	/*
1644	 * If a device's parent goes into runtime suspend at the wrong time,
1645	 * it won't be possible to resume the device.  To prevent this we
1646	 * block runtime suspend here, during the prepare phase, and allow
1647	 * it again during the complete phase.
1648	 */
1649	pm_runtime_get_noresume(dev);
1650
1651	device_lock(dev);
1652
1653	dev->power.wakeup_path = device_may_wakeup(dev);
1654
1655	if (dev->power.no_pm_callbacks) {
1656		ret = 1;	/* Let device go direct_complete */
1657		goto unlock;
1658	}
1659
1660	if (dev->pm_domain)
1661		callback = dev->pm_domain->ops.prepare;
1662	else if (dev->type && dev->type->pm)
1663		callback = dev->type->pm->prepare;
1664	else if (dev->class && dev->class->pm)
1665		callback = dev->class->pm->prepare;
1666	else if (dev->bus && dev->bus->pm)
1667		callback = dev->bus->pm->prepare;
1668
1669	if (!callback && dev->driver && dev->driver->pm)
1670		callback = dev->driver->pm->prepare;
1671
1672	if (callback)
1673		ret = callback(dev);
1674
1675unlock:
1676	device_unlock(dev);
1677
1678	if (ret < 0) {
1679		suspend_report_result(callback, ret);
1680		pm_runtime_put(dev);
1681		return ret;
1682	}
1683	/*
1684	 * A positive return value from ->prepare() means "this device appears
1685	 * to be runtime-suspended and its state is fine, so if it really is
1686	 * runtime-suspended, you can leave it in that state provided that you
1687	 * will do the same thing with all of its descendants".  This only
1688	 * applies to suspend transitions, however.
1689	 */
1690	spin_lock_irq(&dev->power.lock);
1691	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1692	spin_unlock_irq(&dev->power.lock);
1693	return 0;
1694}
1695
1696/**
1697 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1698 * @state: PM transition of the system being carried out.
1699 *
1700 * Execute the ->prepare() callback(s) for all devices.
1701 */
1702int dpm_prepare(pm_message_t state)
1703{
1704	int error = 0;
1705
1706	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1707	might_sleep();
1708
1709	/*
1710	 * Give a chance for the known devices to complete their probes, before
1711	 * disable probing of devices. This sync point is important at least
1712	 * at boot time + hibernation restore.
1713	 */
1714	wait_for_device_probe();
1715	/*
1716	 * It is unsafe if probing of devices will happen during suspend or
1717	 * hibernation and system behavior will be unpredictable in this case.
1718	 * So, let's prohibit device's probing here and defer their probes
1719	 * instead. The normal behavior will be restored in dpm_complete().
1720	 */
1721	device_block_probing();
1722
1723	mutex_lock(&dpm_list_mtx);
1724	while (!list_empty(&dpm_list)) {
1725		struct device *dev = to_device(dpm_list.next);
1726
1727		get_device(dev);
1728		mutex_unlock(&dpm_list_mtx);
1729
1730		trace_device_pm_callback_start(dev, "", state.event);
1731		error = device_prepare(dev, state);
1732		trace_device_pm_callback_end(dev, error);
1733
1734		mutex_lock(&dpm_list_mtx);
1735		if (error) {
1736			if (error == -EAGAIN) {
1737				put_device(dev);
1738				error = 0;
1739				continue;
1740			}
1741			printk(KERN_INFO "PM: Device %s not prepared "
1742				"for power transition: code %d\n",
1743				dev_name(dev), error);
1744			put_device(dev);
1745			break;
1746		}
1747		dev->power.is_prepared = true;
1748		if (!list_empty(&dev->power.entry))
1749			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1750		put_device(dev);
1751	}
1752	mutex_unlock(&dpm_list_mtx);
1753	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1754	return error;
1755}
1756
1757/**
1758 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1759 * @state: PM transition of the system being carried out.
1760 *
1761 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1762 * callbacks for them.
1763 */
1764int dpm_suspend_start(pm_message_t state)
1765{
1766	int error;
1767
1768	error = dpm_prepare(state);
1769	if (error) {
1770		suspend_stats.failed_prepare++;
1771		dpm_save_failed_step(SUSPEND_PREPARE);
1772	} else
1773		error = dpm_suspend(state);
1774	return error;
1775}
1776EXPORT_SYMBOL_GPL(dpm_suspend_start);
1777
1778void __suspend_report_result(const char *function, void *fn, int ret)
1779{
1780	if (ret)
1781		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1782}
1783EXPORT_SYMBOL_GPL(__suspend_report_result);
1784
1785/**
1786 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1787 * @dev: Device to wait for.
1788 * @subordinate: Device that needs to wait for @dev.
1789 */
1790int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1791{
1792	dpm_wait(dev, subordinate->power.async_suspend);
1793	return async_error;
1794}
1795EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1796
1797/**
1798 * dpm_for_each_dev - device iterator.
1799 * @data: data for the callback.
1800 * @fn: function to be called for each device.
1801 *
1802 * Iterate over devices in dpm_list, and call @fn for each device,
1803 * passing it @data.
1804 */
1805void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1806{
1807	struct device *dev;
1808
1809	if (!fn)
1810		return;
1811
1812	device_pm_lock();
1813	list_for_each_entry(dev, &dpm_list, power.entry)
1814		fn(dev, data);
1815	device_pm_unlock();
1816}
1817EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1818
1819static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1820{
1821	if (!ops)
1822		return true;
1823
1824	return !ops->prepare &&
1825	       !ops->suspend &&
1826	       !ops->suspend_late &&
1827	       !ops->suspend_noirq &&
1828	       !ops->resume_noirq &&
1829	       !ops->resume_early &&
1830	       !ops->resume &&
1831	       !ops->complete;
1832}
1833
1834void device_pm_check_callbacks(struct device *dev)
1835{
1836	spin_lock_irq(&dev->power.lock);
1837	dev->power.no_pm_callbacks =
1838		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1839		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1840		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1841		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1842		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
1843	spin_unlock_irq(&dev->power.lock);
1844}