PageRenderTime 60ms CodeModel.GetById 2ms app.highlight 51ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/base/power/main.c

https://bitbucket.org/mifl/android_kernel_qrd_msm
C | 1202 lines | 834 code | 155 blank | 213 comment | 152 complexity | 29e6b3a2b6c465da61f7d862234e5c16 MD5 | raw file
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
   7 *
   8 * This file is released under the GPLv2
   9 *
  10 *
  11 * The driver model core calls device_pm_add() when a device is registered.
  12 * This will initialize the embedded device_pm_info object in the device
  13 * and add it to the list of power-controlled devices. sysfs entries for
  14 * controlling device power management will also be added.
  15 *
  16 * A separate list is used for keeping track of power info, because the power
  17 * domain dependencies may differ from the ancestral dependencies that the
  18 * subsystem list maintains.
  19 */
  20
  21#include <linux/device.h>
  22#include <linux/kallsyms.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/resume-trace.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/async.h>
  30#include <linux/suspend.h>
  31#include <linux/timer.h>
  32
  33#include "../base.h"
  34#include "power.h"
  35
  36#ifdef CONFIG_MSM_SM_EVENT
  37#include <linux/sm_event_log.h>
  38#include <linux/sm_event.h>
  39#endif
  40/*
  41 * The entries in the dpm_list list are in a depth first order, simply
  42 * because children are guaranteed to be discovered after parents, and
  43 * are inserted at the back of the list on discovery.
  44 *
  45 * Since device_pm_add() may be called with a device lock held,
  46 * we must never try to acquire a device lock while holding
  47 * dpm_list_mutex.
  48 */
  49
  50LIST_HEAD(dpm_list);
  51LIST_HEAD(dpm_prepared_list);
  52LIST_HEAD(dpm_suspended_list);
  53LIST_HEAD(dpm_noirq_list);
  54
  55static DEFINE_MUTEX(dpm_list_mtx);
  56static pm_message_t pm_transition;
  57
  58static void dpm_drv_timeout(unsigned long data);
  59struct dpm_drv_wd_data {
  60	struct device *dev;
  61	struct task_struct *tsk;
  62};
  63
  64static int async_error;
  65
  66/**
  67 * device_pm_init - Initialize the PM-related part of a device object.
  68 * @dev: Device object being initialized.
  69 */
  70void device_pm_init(struct device *dev)
  71{
  72	dev->power.is_prepared = false;
  73	dev->power.is_suspended = false;
  74	init_completion(&dev->power.completion);
  75	complete_all(&dev->power.completion);
  76	dev->power.wakeup = NULL;
  77	spin_lock_init(&dev->power.lock);
  78	pm_runtime_init(dev);
  79	INIT_LIST_HEAD(&dev->power.entry);
  80}
  81
  82/**
  83 * device_pm_lock - Lock the list of active devices used by the PM core.
  84 */
  85void device_pm_lock(void)
  86{
  87	mutex_lock(&dpm_list_mtx);
  88}
  89
  90/**
  91 * device_pm_unlock - Unlock the list of active devices used by the PM core.
  92 */
  93void device_pm_unlock(void)
  94{
  95	mutex_unlock(&dpm_list_mtx);
  96}
  97
  98/**
  99 * device_pm_add - Add a device to the PM core's list of active devices.
 100 * @dev: Device to add to the list.
 101 */
 102void device_pm_add(struct device *dev)
 103{
 104	pr_debug("PM: Adding info for %s:%s\n",
 105		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 106	mutex_lock(&dpm_list_mtx);
 107	if (dev->parent && dev->parent->power.is_prepared)
 108		dev_warn(dev, "parent %s should not be sleeping\n",
 109			dev_name(dev->parent));
 110	list_add_tail(&dev->power.entry, &dpm_list);
 111	mutex_unlock(&dpm_list_mtx);
 112}
 113
 114/**
 115 * device_pm_remove - Remove a device from the PM core's list of active devices.
 116 * @dev: Device to be removed from the list.
 117 */
 118void device_pm_remove(struct device *dev)
 119{
 120	pr_debug("PM: Removing info for %s:%s\n",
 121		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 122	complete_all(&dev->power.completion);
 123	mutex_lock(&dpm_list_mtx);
 124	list_del_init(&dev->power.entry);
 125	mutex_unlock(&dpm_list_mtx);
 126	device_wakeup_disable(dev);
 127	pm_runtime_remove(dev);
 128}
 129
 130/**
 131 * device_pm_move_before - Move device in the PM core's list of active devices.
 132 * @deva: Device to move in dpm_list.
 133 * @devb: Device @deva should come before.
 134 */
 135void device_pm_move_before(struct device *deva, struct device *devb)
 136{
 137	pr_debug("PM: Moving %s:%s before %s:%s\n",
 138		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 139		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 140	/* Delete deva from dpm_list and reinsert before devb. */
 141	list_move_tail(&deva->power.entry, &devb->power.entry);
 142}
 143
 144/**
 145 * device_pm_move_after - Move device in the PM core's list of active devices.
 146 * @deva: Device to move in dpm_list.
 147 * @devb: Device @deva should come after.
 148 */
 149void device_pm_move_after(struct device *deva, struct device *devb)
 150{
 151	pr_debug("PM: Moving %s:%s after %s:%s\n",
 152		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 153		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 154	/* Delete deva from dpm_list and reinsert after devb. */
 155	list_move(&deva->power.entry, &devb->power.entry);
 156}
 157
 158/**
 159 * device_pm_move_last - Move device to end of the PM core's list of devices.
 160 * @dev: Device to move in dpm_list.
 161 */
 162void device_pm_move_last(struct device *dev)
 163{
 164	pr_debug("PM: Moving %s:%s to end of list\n",
 165		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 166	list_move_tail(&dev->power.entry, &dpm_list);
 167}
 168
 169static ktime_t initcall_debug_start(struct device *dev)
 170{
 171	ktime_t calltime = ktime_set(0, 0);
 172
 173	if (initcall_debug) {
 174		pr_info("calling  %s+ @ %i\n",
 175				dev_name(dev), task_pid_nr(current));
 176		calltime = ktime_get();
 177	}
 178
 179	return calltime;
 180}
 181
 182static void initcall_debug_report(struct device *dev, ktime_t calltime,
 183				  int error)
 184{
 185	ktime_t delta, rettime;
 186
 187	if (initcall_debug) {
 188		rettime = ktime_get();
 189		delta = ktime_sub(rettime, calltime);
 190		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 191			error, (unsigned long long)ktime_to_ns(delta) >> 10);
 192	}
 193}
 194
 195/**
 196 * dpm_wait - Wait for a PM operation to complete.
 197 * @dev: Device to wait for.
 198 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 199 */
 200static void dpm_wait(struct device *dev, bool async)
 201{
 202	if (!dev)
 203		return;
 204
 205	if (async || (pm_async_enabled && dev->power.async_suspend))
 206		wait_for_completion(&dev->power.completion);
 207}
 208
 209static int dpm_wait_fn(struct device *dev, void *async_ptr)
 210{
 211	dpm_wait(dev, *((bool *)async_ptr));
 212	return 0;
 213}
 214
 215static void dpm_wait_for_children(struct device *dev, bool async)
 216{
 217       device_for_each_child(dev, &async, dpm_wait_fn);
 218}
 219
 220/**
 221 * pm_op - Execute the PM operation appropriate for given PM event.
 222 * @dev: Device to handle.
 223 * @ops: PM operations to choose from.
 224 * @state: PM transition of the system being carried out.
 225 */
 226static int pm_op(struct device *dev,
 227		 const struct dev_pm_ops *ops,
 228		 pm_message_t state)
 229{
 230	int error = 0;
 231	ktime_t calltime;
 232#ifdef CONFIG_MSM_SM_EVENT
 233	uint32_t suspend_type = 0, delta_time = 0;
 234	ktime_t rettime;
 235#endif
 236	calltime = initcall_debug_start(dev);
 237#ifdef CONFIG_MSM_SM_EVENT
 238	calltime = ktime_get();
 239#endif
 240
 241
 242	switch (state.event) {
 243#ifdef CONFIG_SUSPEND
 244	case PM_EVENT_SUSPEND:
 245		if (ops->suspend) {
 246			error = ops->suspend(dev);
 247			suspend_report_result(ops->suspend, error);
 248#ifdef CONFIG_MSM_SM_EVENT
 249			suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_SUSPEND;
 250#endif
 251		}
 252		break;
 253	case PM_EVENT_RESUME:
 254		if (ops->resume) {
 255			error = ops->resume(dev);
 256			suspend_report_result(ops->resume, error);
 257#ifdef CONFIG_MSM_SM_EVENT
 258			suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_RESUME;
 259#endif
 260		}
 261		break;
 262#endif /* CONFIG_SUSPEND */
 263#ifdef CONFIG_HIBERNATE_CALLBACKS
 264	case PM_EVENT_FREEZE:
 265	case PM_EVENT_QUIESCE:
 266		if (ops->freeze) {
 267			error = ops->freeze(dev);
 268			suspend_report_result(ops->freeze, error);
 269		}
 270		break;
 271	case PM_EVENT_HIBERNATE:
 272		if (ops->poweroff) {
 273			error = ops->poweroff(dev);
 274			suspend_report_result(ops->poweroff, error);
 275		}
 276		break;
 277	case PM_EVENT_THAW:
 278	case PM_EVENT_RECOVER:
 279		if (ops->thaw) {
 280			error = ops->thaw(dev);
 281			suspend_report_result(ops->thaw, error);
 282		}
 283		break;
 284	case PM_EVENT_RESTORE:
 285		if (ops->restore) {
 286			error = ops->restore(dev);
 287			suspend_report_result(ops->restore, error);
 288		}
 289		break;
 290#endif /* CONFIG_HIBERNATE_CALLBACKS */
 291	default:
 292		error = -EINVAL;
 293	}
 294
 295	initcall_debug_report(dev, calltime, error);
 296
 297#ifdef CONFIG_MSM_SM_EVENT
 298	rettime = ktime_get();
 299	delta_time = (unsigned long)(ktime_to_ns(ktime_sub(rettime, calltime)) >> 10);
 300	if(delta_time > 10 && suspend_type != 0)
 301		sm_add_event(suspend_type, (uint32_t)(delta_time), 0, (void *)dev_name(dev), strlen(dev_name(dev)) + 1);
 302#endif
 303	return error;
 304}
 305
 306/**
 307 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 308 * @dev: Device to handle.
 309 * @ops: PM operations to choose from.
 310 * @state: PM transition of the system being carried out.
 311 *
 312 * The driver of @dev will not receive interrupts while this function is being
 313 * executed.
 314 */
 315static int pm_noirq_op(struct device *dev,
 316			const struct dev_pm_ops *ops,
 317			pm_message_t state)
 318{
 319	int error = 0;
 320#ifdef CONFIG_MSM_SM_EVENT
 321	uint32_t suspend_type = 0, delta_time = 0;
 322#endif
 323	ktime_t calltime = ktime_set(0, 0), delta, rettime;
 324
 325	if (initcall_debug) {
 326		pr_info("calling  %s+ @ %i, parent: %s\n",
 327				dev_name(dev), task_pid_nr(current),
 328				dev->parent ? dev_name(dev->parent) : "none");
 329		calltime = ktime_get();
 330	}
 331
 332#ifdef CONFIG_MSM_SM_EVENT
 333	calltime = ktime_get();
 334#endif
 335
 336	switch (state.event) {
 337#ifdef CONFIG_SUSPEND
 338	case PM_EVENT_SUSPEND:
 339		if (ops->suspend_noirq) {
 340			error = ops->suspend_noirq(dev);
 341			suspend_report_result(ops->suspend_noirq, error);
 342#ifdef CONFIG_MSM_SM_EVENT
 343			suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_SUSPEND;
 344#endif
 345		}
 346		break;
 347	case PM_EVENT_RESUME:
 348		if (ops->resume_noirq) {
 349			error = ops->resume_noirq(dev);
 350			suspend_report_result(ops->resume_noirq, error);
 351#ifdef CONFIG_MSM_SM_EVENT
 352			suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_RESUME;
 353#endif
 354		}
 355		break;
 356#endif /* CONFIG_SUSPEND */
 357#ifdef CONFIG_HIBERNATE_CALLBACKS
 358	case PM_EVENT_FREEZE:
 359	case PM_EVENT_QUIESCE:
 360		if (ops->freeze_noirq) {
 361			error = ops->freeze_noirq(dev);
 362			suspend_report_result(ops->freeze_noirq, error);
 363		}
 364		break;
 365	case PM_EVENT_HIBERNATE:
 366		if (ops->poweroff_noirq) {
 367			error = ops->poweroff_noirq(dev);
 368			suspend_report_result(ops->poweroff_noirq, error);
 369		}
 370		break;
 371	case PM_EVENT_THAW:
 372	case PM_EVENT_RECOVER:
 373		if (ops->thaw_noirq) {
 374			error = ops->thaw_noirq(dev);
 375			suspend_report_result(ops->thaw_noirq, error);
 376		}
 377		break;
 378	case PM_EVENT_RESTORE:
 379		if (ops->restore_noirq) {
 380			error = ops->restore_noirq(dev);
 381			suspend_report_result(ops->restore_noirq, error);
 382		}
 383		break;
 384#endif /* CONFIG_HIBERNATE_CALLBACKS */
 385	default:
 386		error = -EINVAL;
 387	}
 388
 389	if (initcall_debug) {
 390		rettime = ktime_get();
 391		delta = ktime_sub(rettime, calltime);
 392		printk("initcall %s_i+ returned %d after %Ld usecs\n",
 393			dev_name(dev), error,
 394			(unsigned long long)ktime_to_ns(delta) >> 10);
 395	}
 396
 397#ifdef CONFIG_MSM_SM_EVENT
 398	rettime = ktime_get();
 399	delta_time = (unsigned long)(ktime_to_ns(ktime_sub(rettime, calltime)) >> 10);
 400	if(delta_time > 10 && suspend_type != 0)
 401		sm_add_event(suspend_type, (uint32_t)(delta_time), 0, (void *)dev_name(dev), strlen(dev_name(dev))+1);
 402#endif
 403	return error;
 404}
 405
 406static char *pm_verb(int event)
 407{
 408	switch (event) {
 409	case PM_EVENT_SUSPEND:
 410		return "suspend";
 411	case PM_EVENT_RESUME:
 412		return "resume";
 413	case PM_EVENT_FREEZE:
 414		return "freeze";
 415	case PM_EVENT_QUIESCE:
 416		return "quiesce";
 417	case PM_EVENT_HIBERNATE:
 418		return "hibernate";
 419	case PM_EVENT_THAW:
 420		return "thaw";
 421	case PM_EVENT_RESTORE:
 422		return "restore";
 423	case PM_EVENT_RECOVER:
 424		return "recover";
 425	default:
 426		return "(unknown PM event)";
 427	}
 428}
 429
 430static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 431{
 432	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 433		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 434		", may wakeup" : "");
 435}
 436
 437static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 438			int error)
 439{
 440	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 441		dev_name(dev), pm_verb(state.event), info, error);
 442}
 443
 444static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 445{
 446	ktime_t calltime;
 447	u64 usecs64;
 448	int usecs;
 449
 450	calltime = ktime_get();
 451	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 452	do_div(usecs64, NSEC_PER_USEC);
 453	usecs = usecs64;
 454	if (usecs == 0)
 455		usecs = 1;
 456	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 457		info ?: "", info ? " " : "", pm_verb(state.event),
 458		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 459}
 460
 461/*------------------------- Resume routines -------------------------*/
 462
 463/**
 464 * device_resume_noirq - Execute an "early resume" callback for given device.
 465 * @dev: Device to handle.
 466 * @state: PM transition of the system being carried out.
 467 *
 468 * The driver of @dev will not receive interrupts while this function is being
 469 * executed.
 470 */
 471static int device_resume_noirq(struct device *dev, pm_message_t state)
 472{
 473	int error = 0;
 474
 475	TRACE_DEVICE(dev);
 476	TRACE_RESUME(0);
 477
 478	if (dev->pwr_domain) {
 479		pm_dev_dbg(dev, state, "EARLY power domain ");
 480		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
 481	} else if (dev->type && dev->type->pm) {
 482		pm_dev_dbg(dev, state, "EARLY type ");
 483		error = pm_noirq_op(dev, dev->type->pm, state);
 484	} else if (dev->class && dev->class->pm) {
 485		pm_dev_dbg(dev, state, "EARLY class ");
 486		error = pm_noirq_op(dev, dev->class->pm, state);
 487	} else if (dev->bus && dev->bus->pm) {
 488		pm_dev_dbg(dev, state, "EARLY ");
 489		error = pm_noirq_op(dev, dev->bus->pm, state);
 490	}
 491
 492	TRACE_RESUME(error);
 493	return error;
 494}
 495
 496/**
 497 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 498 * @state: PM transition of the system being carried out.
 499 *
 500 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 501 * enable device drivers to receive interrupts.
 502 */
 503void dpm_resume_noirq(pm_message_t state)
 504{
 505	ktime_t starttime = ktime_get();
 506
 507	mutex_lock(&dpm_list_mtx);
 508	while (!list_empty(&dpm_noirq_list)) {
 509		struct device *dev = to_device(dpm_noirq_list.next);
 510		int error;
 511
 512		get_device(dev);
 513		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 514		mutex_unlock(&dpm_list_mtx);
 515
 516		error = device_resume_noirq(dev, state);
 517		if (error)
 518			pm_dev_err(dev, state, " early", error);
 519
 520		mutex_lock(&dpm_list_mtx);
 521		put_device(dev);
 522	}
 523	mutex_unlock(&dpm_list_mtx);
 524	dpm_show_time(starttime, state, "early");
 525	resume_device_irqs();
 526}
 527EXPORT_SYMBOL_GPL(dpm_resume_noirq);
 528
 529/**
 530 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
 531 * @dev: Device to resume.
 532 * @cb: Resume callback to execute.
 533 */
 534static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
 535{
 536	int error;
 537	ktime_t calltime;
 538
 539	calltime = initcall_debug_start(dev);
 540
 541	error = cb(dev);
 542	suspend_report_result(cb, error);
 543
 544	initcall_debug_report(dev, calltime, error);
 545
 546	return error;
 547}
 548
 549/**
 550 * device_resume - Execute "resume" callbacks for given device.
 551 * @dev: Device to handle.
 552 * @state: PM transition of the system being carried out.
 553 * @async: If true, the device is being resumed asynchronously.
 554 */
 555static int device_resume(struct device *dev, pm_message_t state, bool async)
 556{
 557	int error = 0;
 558	bool put = false;
 559
 560	TRACE_DEVICE(dev);
 561	TRACE_RESUME(0);
 562
 563	dpm_wait(dev->parent, async);
 564	device_lock(dev);
 565
 566	/*
 567	 * This is a fib.  But we'll allow new children to be added below
 568	 * a resumed device, even if the device hasn't been completed yet.
 569	 */
 570	dev->power.is_prepared = false;
 571
 572	if (!dev->power.is_suspended)
 573		goto Unlock;
 574
 575	pm_runtime_enable(dev);
 576	put = true;
 577
 578	if (dev->pwr_domain) {
 579		pm_dev_dbg(dev, state, "power domain ");
 580		error = pm_op(dev, &dev->pwr_domain->ops, state);
 581		goto End;
 582	}
 583
 584	if (dev->type && dev->type->pm) {
 585		pm_dev_dbg(dev, state, "type ");
 586		error = pm_op(dev, dev->type->pm, state);
 587		goto End;
 588	}
 589
 590	if (dev->class) {
 591		if (dev->class->pm) {
 592			pm_dev_dbg(dev, state, "class ");
 593			error = pm_op(dev, dev->class->pm, state);
 594			goto End;
 595		} else if (dev->class->resume) {
 596			pm_dev_dbg(dev, state, "legacy class ");
 597			error = legacy_resume(dev, dev->class->resume);
 598			goto End;
 599		}
 600	}
 601
 602	if (dev->bus) {
 603		if (dev->bus->pm) {
 604			pm_dev_dbg(dev, state, "");
 605			error = pm_op(dev, dev->bus->pm, state);
 606		} else if (dev->bus->resume) {
 607			pm_dev_dbg(dev, state, "legacy ");
 608			error = legacy_resume(dev, dev->bus->resume);
 609		}
 610	}
 611
 612 End:
 613	dev->power.is_suspended = false;
 614
 615 Unlock:
 616	device_unlock(dev);
 617	complete_all(&dev->power.completion);
 618
 619	TRACE_RESUME(error);
 620
 621	if (put)
 622		pm_runtime_put_sync(dev);
 623
 624	return error;
 625}
 626
 627static void async_resume(void *data, async_cookie_t cookie)
 628{
 629	struct device *dev = (struct device *)data;
 630	int error;
 631
 632	error = device_resume(dev, pm_transition, true);
 633	if (error)
 634		pm_dev_err(dev, pm_transition, " async", error);
 635	put_device(dev);
 636}
 637
 638static bool is_async(struct device *dev)
 639{
 640	return dev->power.async_suspend && pm_async_enabled
 641		&& !pm_trace_is_enabled();
 642}
 643
 644/**
 645 *	dpm_drv_timeout - Driver suspend / resume watchdog handler
 646 *	@data: struct device which timed out
 647 *
 648 * 	Called when a driver has timed out suspending or resuming.
 649 * 	There's not much we can do here to recover so
 650 * 	BUG() out for a crash-dump
 651 *
 652 */
 653static void dpm_drv_timeout(unsigned long data)
 654{
 655	struct dpm_drv_wd_data *wd_data = (void *)data;
 656	struct device *dev = wd_data->dev;
 657	struct task_struct *tsk = wd_data->tsk;
 658
 659	printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
 660	       (dev->driver ? dev->driver->name : "no driver"));
 661
 662	printk(KERN_EMERG "dpm suspend stack:\n");
 663	show_stack(tsk, NULL);
 664
 665	BUG();
 666}
 667
 668/**
 669 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 670 * @state: PM transition of the system being carried out.
 671 *
 672 * Execute the appropriate "resume" callback for all devices whose status
 673 * indicates that they are suspended.
 674 */
 675void dpm_resume(pm_message_t state)
 676{
 677	struct device *dev;
 678	ktime_t starttime = ktime_get();
 679
 680	might_sleep();
 681
 682	mutex_lock(&dpm_list_mtx);
 683	pm_transition = state;
 684	async_error = 0;
 685
 686	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 687		INIT_COMPLETION(dev->power.completion);
 688		if (is_async(dev)) {
 689			get_device(dev);
 690			async_schedule(async_resume, dev);
 691		}
 692	}
 693
 694	while (!list_empty(&dpm_suspended_list)) {
 695		dev = to_device(dpm_suspended_list.next);
 696		get_device(dev);
 697		if (!is_async(dev)) {
 698			int error;
 699
 700			mutex_unlock(&dpm_list_mtx);
 701
 702			error = device_resume(dev, state, false);
 703			if (error)
 704				pm_dev_err(dev, state, "", error);
 705
 706			mutex_lock(&dpm_list_mtx);
 707		}
 708		if (!list_empty(&dev->power.entry))
 709			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 710		put_device(dev);
 711	}
 712	mutex_unlock(&dpm_list_mtx);
 713	async_synchronize_full();
 714	dpm_show_time(starttime, state, NULL);
 715}
 716
 717/**
 718 * device_complete - Complete a PM transition for given device.
 719 * @dev: Device to handle.
 720 * @state: PM transition of the system being carried out.
 721 */
 722static void device_complete(struct device *dev, pm_message_t state)
 723{
 724	device_lock(dev);
 725
 726	if (dev->pwr_domain) {
 727		pm_dev_dbg(dev, state, "completing power domain ");
 728		if (dev->pwr_domain->ops.complete)
 729			dev->pwr_domain->ops.complete(dev);
 730	} else if (dev->type && dev->type->pm) {
 731		pm_dev_dbg(dev, state, "completing type ");
 732		if (dev->type->pm->complete)
 733			dev->type->pm->complete(dev);
 734	} else if (dev->class && dev->class->pm) {
 735		pm_dev_dbg(dev, state, "completing class ");
 736		if (dev->class->pm->complete)
 737			dev->class->pm->complete(dev);
 738	} else if (dev->bus && dev->bus->pm) {
 739		pm_dev_dbg(dev, state, "completing ");
 740		if (dev->bus->pm->complete)
 741			dev->bus->pm->complete(dev);
 742	}
 743
 744	device_unlock(dev);
 745}
 746
 747/**
 748 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 749 * @state: PM transition of the system being carried out.
 750 *
 751 * Execute the ->complete() callbacks for all devices whose PM status is not
 752 * DPM_ON (this allows new devices to be registered).
 753 */
 754void dpm_complete(pm_message_t state)
 755{
 756	struct list_head list;
 757
 758	might_sleep();
 759
 760	INIT_LIST_HEAD(&list);
 761	mutex_lock(&dpm_list_mtx);
 762	while (!list_empty(&dpm_prepared_list)) {
 763		struct device *dev = to_device(dpm_prepared_list.prev);
 764
 765		get_device(dev);
 766		dev->power.is_prepared = false;
 767		list_move(&dev->power.entry, &list);
 768		mutex_unlock(&dpm_list_mtx);
 769
 770		device_complete(dev, state);
 771
 772		mutex_lock(&dpm_list_mtx);
 773		put_device(dev);
 774	}
 775	list_splice(&list, &dpm_list);
 776	mutex_unlock(&dpm_list_mtx);
 777}
 778
 779/**
 780 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 781 * @state: PM transition of the system being carried out.
 782 *
 783 * Execute "resume" callbacks for all devices and complete the PM transition of
 784 * the system.
 785 */
 786void dpm_resume_end(pm_message_t state)
 787{
 788	dpm_resume(state);
 789	dpm_complete(state);
 790}
 791EXPORT_SYMBOL_GPL(dpm_resume_end);
 792
 793
 794/*------------------------- Suspend routines -------------------------*/
 795
 796/**
 797 * resume_event - Return a "resume" message for given "suspend" sleep state.
 798 * @sleep_state: PM message representing a sleep state.
 799 *
 800 * Return a PM message representing the resume event corresponding to given
 801 * sleep state.
 802 */
 803static pm_message_t resume_event(pm_message_t sleep_state)
 804{
 805	switch (sleep_state.event) {
 806	case PM_EVENT_SUSPEND:
 807		return PMSG_RESUME;
 808	case PM_EVENT_FREEZE:
 809	case PM_EVENT_QUIESCE:
 810		return PMSG_RECOVER;
 811	case PM_EVENT_HIBERNATE:
 812		return PMSG_RESTORE;
 813	}
 814	return PMSG_ON;
 815}
 816
 817/**
 818 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 819 * @dev: Device to handle.
 820 * @state: PM transition of the system being carried out.
 821 *
 822 * The driver of @dev will not receive interrupts while this function is being
 823 * executed.
 824 */
 825static int device_suspend_noirq(struct device *dev, pm_message_t state)
 826{
 827	int error;
 828
 829	if (dev->pwr_domain) {
 830		pm_dev_dbg(dev, state, "LATE power domain ");
 831		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
 832		if (error)
 833			return error;
 834	} else if (dev->type && dev->type->pm) {
 835		pm_dev_dbg(dev, state, "LATE type ");
 836		error = pm_noirq_op(dev, dev->type->pm, state);
 837		if (error)
 838			return error;
 839	} else if (dev->class && dev->class->pm) {
 840		pm_dev_dbg(dev, state, "LATE class ");
 841		error = pm_noirq_op(dev, dev->class->pm, state);
 842		if (error)
 843			return error;
 844	} else if (dev->bus && dev->bus->pm) {
 845		pm_dev_dbg(dev, state, "LATE ");
 846		error = pm_noirq_op(dev, dev->bus->pm, state);
 847		if (error)
 848			return error;
 849	}
 850
 851	return 0;
 852}
 853
 854/**
 855 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 856 * @state: PM transition of the system being carried out.
 857 *
 858 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 859 * handlers for all non-sysdev devices.
 860 */
 861int dpm_suspend_noirq(pm_message_t state)
 862{
 863	ktime_t starttime = ktime_get();
 864	int error = 0;
 865
 866	suspend_device_irqs();
 867	mutex_lock(&dpm_list_mtx);
 868	while (!list_empty(&dpm_suspended_list)) {
 869		struct device *dev = to_device(dpm_suspended_list.prev);
 870
 871		get_device(dev);
 872		mutex_unlock(&dpm_list_mtx);
 873
 874		error = device_suspend_noirq(dev, state);
 875
 876		mutex_lock(&dpm_list_mtx);
 877		if (error) {
 878			pm_dev_err(dev, state, " late", error);
 879			put_device(dev);
 880			break;
 881		}
 882		if (!list_empty(&dev->power.entry))
 883			list_move(&dev->power.entry, &dpm_noirq_list);
 884		put_device(dev);
 885	}
 886	mutex_unlock(&dpm_list_mtx);
 887	if (error)
 888		dpm_resume_noirq(resume_event(state));
 889	else
 890		dpm_show_time(starttime, state, "late");
 891	return error;
 892}
 893EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
 894
 895/**
 896 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
 897 * @dev: Device to suspend.
 898 * @state: PM transition of the system being carried out.
 899 * @cb: Suspend callback to execute.
 900 */
 901static int legacy_suspend(struct device *dev, pm_message_t state,
 902			  int (*cb)(struct device *dev, pm_message_t state))
 903{
 904	int error;
 905	ktime_t calltime;
 906
 907	calltime = initcall_debug_start(dev);
 908
 909	error = cb(dev, state);
 910	suspend_report_result(cb, error);
 911
 912	initcall_debug_report(dev, calltime, error);
 913
 914	return error;
 915}
 916
 917/**
 918 * device_suspend - Execute "suspend" callbacks for given device.
 919 * @dev: Device to handle.
 920 * @state: PM transition of the system being carried out.
 921 * @async: If true, the device is being suspended asynchronously.
 922 */
 923static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 924{
 925	int error = 0;
 926	struct timer_list timer;
 927	struct dpm_drv_wd_data data;
 928
 929	dpm_wait_for_children(dev, async);
 930
 931	if (async_error)
 932		return 0;
 933
 934	pm_runtime_get_noresume(dev);
 935	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
 936		pm_wakeup_event(dev, 0);
 937
 938	if (pm_wakeup_pending()) {
 939		pm_runtime_put_sync(dev);
 940		async_error = -EBUSY;
 941		return 0;
 942	}
 943
 944	data.dev = dev;
 945	data.tsk = get_current();
 946	init_timer_on_stack(&timer);
 947	timer.expires = jiffies + HZ * 12;
 948	timer.function = dpm_drv_timeout;
 949	timer.data = (unsigned long)&data;
 950	add_timer(&timer);
 951
 952	device_lock(dev);
 953
 954	if (dev->pwr_domain) {
 955		pm_dev_dbg(dev, state, "power domain ");
 956		error = pm_op(dev, &dev->pwr_domain->ops, state);
 957		goto End;
 958	}
 959
 960	if (dev->type && dev->type->pm) {
 961		pm_dev_dbg(dev, state, "type ");
 962		error = pm_op(dev, dev->type->pm, state);
 963		goto End;
 964	}
 965
 966	if (dev->class) {
 967		if (dev->class->pm) {
 968			pm_dev_dbg(dev, state, "class ");
 969			error = pm_op(dev, dev->class->pm, state);
 970			goto End;
 971		} else if (dev->class->suspend) {
 972			pm_dev_dbg(dev, state, "legacy class ");
 973			error = legacy_suspend(dev, state, dev->class->suspend);
 974			goto End;
 975		}
 976	}
 977
 978	if (dev->bus) {
 979		if (dev->bus->pm) {
 980			pm_dev_dbg(dev, state, "");
 981			error = pm_op(dev, dev->bus->pm, state);
 982		} else if (dev->bus->suspend) {
 983			pm_dev_dbg(dev, state, "legacy ");
 984			error = legacy_suspend(dev, state, dev->bus->suspend);
 985		}
 986	}
 987
 988 End:
 989	dev->power.is_suspended = !error;
 990
 991	device_unlock(dev);
 992
 993	del_timer_sync(&timer);
 994	destroy_timer_on_stack(&timer);
 995
 996	complete_all(&dev->power.completion);
 997
 998	if (error) {
 999		pm_runtime_put_sync(dev);
1000		async_error = error;
1001	} else if (dev->power.is_suspended) {
1002		__pm_runtime_disable(dev, false);
1003	}
1004
1005	return error;
1006}
1007
1008static void async_suspend(void *data, async_cookie_t cookie)
1009{
1010	struct device *dev = (struct device *)data;
1011	int error;
1012
1013	error = __device_suspend(dev, pm_transition, true);
1014	if (error)
1015		pm_dev_err(dev, pm_transition, " async", error);
1016
1017	put_device(dev);
1018}
1019
1020static int device_suspend(struct device *dev)
1021{
1022	INIT_COMPLETION(dev->power.completion);
1023
1024	if (pm_async_enabled && dev->power.async_suspend) {
1025		get_device(dev);
1026		async_schedule(async_suspend, dev);
1027		return 0;
1028	}
1029
1030	return __device_suspend(dev, pm_transition, false);
1031}
1032
1033/**
1034 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1035 * @state: PM transition of the system being carried out.
1036 */
1037int dpm_suspend(pm_message_t state)
1038{
1039	ktime_t starttime = ktime_get();
1040	int error = 0;
1041
1042	might_sleep();
1043
1044	mutex_lock(&dpm_list_mtx);
1045	pm_transition = state;
1046	async_error = 0;
1047	while (!list_empty(&dpm_prepared_list)) {
1048		struct device *dev = to_device(dpm_prepared_list.prev);
1049
1050		get_device(dev);
1051		mutex_unlock(&dpm_list_mtx);
1052
1053		error = device_suspend(dev);
1054
1055		mutex_lock(&dpm_list_mtx);
1056		if (error) {
1057			pm_dev_err(dev, state, "", error);
1058			put_device(dev);
1059			break;
1060		}
1061		if (!list_empty(&dev->power.entry))
1062			list_move(&dev->power.entry, &dpm_suspended_list);
1063		put_device(dev);
1064		if (async_error)
1065			break;
1066	}
1067	mutex_unlock(&dpm_list_mtx);
1068	async_synchronize_full();
1069	if (!error)
1070		error = async_error;
1071	if (!error)
1072		dpm_show_time(starttime, state, NULL);
1073	return error;
1074}
1075
1076/**
1077 * device_prepare - Prepare a device for system power transition.
1078 * @dev: Device to handle.
1079 * @state: PM transition of the system being carried out.
1080 *
1081 * Execute the ->prepare() callback(s) for given device.  No new children of the
1082 * device may be registered after this function has returned.
1083 */
1084static int device_prepare(struct device *dev, pm_message_t state)
1085{
1086	int error = 0;
1087
1088	device_lock(dev);
1089
1090	if (dev->pwr_domain) {
1091		pm_dev_dbg(dev, state, "preparing power domain ");
1092		if (dev->pwr_domain->ops.prepare)
1093			error = dev->pwr_domain->ops.prepare(dev);
1094		suspend_report_result(dev->pwr_domain->ops.prepare, error);
1095		if (error)
1096			goto End;
1097	} else if (dev->type && dev->type->pm) {
1098		pm_dev_dbg(dev, state, "preparing type ");
1099		if (dev->type->pm->prepare)
1100			error = dev->type->pm->prepare(dev);
1101		suspend_report_result(dev->type->pm->prepare, error);
1102		if (error)
1103			goto End;
1104	} else if (dev->class && dev->class->pm) {
1105		pm_dev_dbg(dev, state, "preparing class ");
1106		if (dev->class->pm->prepare)
1107			error = dev->class->pm->prepare(dev);
1108		suspend_report_result(dev->class->pm->prepare, error);
1109		if (error)
1110			goto End;
1111	} else if (dev->bus && dev->bus->pm) {
1112		pm_dev_dbg(dev, state, "preparing ");
1113		if (dev->bus->pm->prepare)
1114			error = dev->bus->pm->prepare(dev);
1115		suspend_report_result(dev->bus->pm->prepare, error);
1116	}
1117
1118 End:
1119	device_unlock(dev);
1120
1121	return error;
1122}
1123
1124/**
1125 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1126 * @state: PM transition of the system being carried out.
1127 *
1128 * Execute the ->prepare() callback(s) for all devices.
1129 */
1130int dpm_prepare(pm_message_t state)
1131{
1132	int error = 0;
1133
1134	might_sleep();
1135
1136	mutex_lock(&dpm_list_mtx);
1137	while (!list_empty(&dpm_list)) {
1138		struct device *dev = to_device(dpm_list.next);
1139
1140		get_device(dev);
1141		mutex_unlock(&dpm_list_mtx);
1142
1143		error = device_prepare(dev, state);
1144
1145		mutex_lock(&dpm_list_mtx);
1146		if (error) {
1147			if (error == -EAGAIN) {
1148				put_device(dev);
1149				error = 0;
1150				continue;
1151			}
1152			printk(KERN_INFO "PM: Device %s not prepared "
1153				"for power transition: code %d\n",
1154				dev_name(dev), error);
1155			put_device(dev);
1156			break;
1157		}
1158		dev->power.is_prepared = true;
1159		if (!list_empty(&dev->power.entry))
1160			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1161		put_device(dev);
1162	}
1163	mutex_unlock(&dpm_list_mtx);
1164	return error;
1165}
1166
1167/**
1168 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1169 * @state: PM transition of the system being carried out.
1170 *
1171 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1172 * callbacks for them.
1173 */
1174int dpm_suspend_start(pm_message_t state)
1175{
1176	int error;
1177
1178	error = dpm_prepare(state);
1179	if (!error)
1180		error = dpm_suspend(state);
1181	return error;
1182}
1183EXPORT_SYMBOL_GPL(dpm_suspend_start);
1184
1185void __suspend_report_result(const char *function, void *fn, int ret)
1186{
1187	if (ret)
1188		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1189}
1190EXPORT_SYMBOL_GPL(__suspend_report_result);
1191
1192/**
1193 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1194 * @dev: Device to wait for.
1195 * @subordinate: Device that needs to wait for @dev.
1196 */
1197int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1198{
1199	dpm_wait(dev, subordinate->power.async_suspend);
1200	return async_error;
1201}
1202EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);