/drivers/base/power/main.c
C | 1155 lines | 792 code | 151 blank | 212 comment | 143 complexity | b65741297cb1ee58ca483d4e83bc3ec5 MD5 | raw file
1/* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20#include <linux/device.h> 21#include <linux/kallsyms.h> 22#include <linux/mutex.h> 23#include <linux/pm.h> 24#include <linux/pm_runtime.h> 25#include <linux/resume-trace.h> 26#include <linux/interrupt.h> 27#include <linux/sched.h> 28#include <linux/async.h> 29#include <linux/suspend.h> 30#include <linux/timer.h> 31#ifdef CONFIG_PLAT_RK 32#include <linux/console.h> 33#endif 34 35#include "../base.h" 36#include "power.h" 37 38/* 39 * The entries in the dpm_list list are in a depth first order, simply 40 * because children are guaranteed to be discovered after parents, and 41 * are inserted at the back of the list on discovery. 42 * 43 * Since device_pm_add() may be called with a device lock held, 44 * we must never try to acquire a device lock while holding 45 * dpm_list_mutex. 46 */ 47 48LIST_HEAD(dpm_list); 49LIST_HEAD(dpm_prepared_list); 50LIST_HEAD(dpm_suspended_list); 51LIST_HEAD(dpm_noirq_list); 52 53static DEFINE_MUTEX(dpm_list_mtx); 54static pm_message_t pm_transition; 55 56static void dpm_drv_timeout(unsigned long data); 57struct dpm_drv_wd_data { 58 struct device *dev; 59 struct task_struct *tsk; 60}; 61 62static int async_error; 63 64/** 65 * device_pm_init - Initialize the PM-related part of a device object. 66 * @dev: Device object being initialized. 67 */ 68void device_pm_init(struct device *dev) 69{ 70 dev->power.is_prepared = false; 71 dev->power.is_suspended = false; 72 init_completion(&dev->power.completion); 73 complete_all(&dev->power.completion); 74 dev->power.wakeup = NULL; 75 spin_lock_init(&dev->power.lock); 76 pm_runtime_init(dev); 77 INIT_LIST_HEAD(&dev->power.entry); 78} 79 80/** 81 * device_pm_lock - Lock the list of active devices used by the PM core. 82 */ 83void device_pm_lock(void) 84{ 85 mutex_lock(&dpm_list_mtx); 86} 87 88/** 89 * device_pm_unlock - Unlock the list of active devices used by the PM core. 90 */ 91void device_pm_unlock(void) 92{ 93 mutex_unlock(&dpm_list_mtx); 94} 95 96/** 97 * device_pm_add - Add a device to the PM core's list of active devices. 98 * @dev: Device to add to the list. 99 */ 100void device_pm_add(struct device *dev) 101{ 102 pr_debug("PM: Adding info for %s:%s\n", 103 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 104 mutex_lock(&dpm_list_mtx); 105 if (dev->parent && dev->parent->power.is_prepared) 106 dev_warn(dev, "parent %s should not be sleeping\n", 107 dev_name(dev->parent)); 108 list_add_tail(&dev->power.entry, &dpm_list); 109 mutex_unlock(&dpm_list_mtx); 110} 111 112/** 113 * device_pm_remove - Remove a device from the PM core's list of active devices. 114 * @dev: Device to be removed from the list. 115 */ 116void device_pm_remove(struct device *dev) 117{ 118 pr_debug("PM: Removing info for %s:%s\n", 119 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 120 complete_all(&dev->power.completion); 121 mutex_lock(&dpm_list_mtx); 122 list_del_init(&dev->power.entry); 123 mutex_unlock(&dpm_list_mtx); 124 device_wakeup_disable(dev); 125 pm_runtime_remove(dev); 126} 127 128/** 129 * device_pm_move_before - Move device in the PM core's list of active devices. 130 * @deva: Device to move in dpm_list. 131 * @devb: Device @deva should come before. 132 */ 133void device_pm_move_before(struct device *deva, struct device *devb) 134{ 135 pr_debug("PM: Moving %s:%s before %s:%s\n", 136 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 137 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 138 /* Delete deva from dpm_list and reinsert before devb. */ 139 list_move_tail(&deva->power.entry, &devb->power.entry); 140} 141 142/** 143 * device_pm_move_after - Move device in the PM core's list of active devices. 144 * @deva: Device to move in dpm_list. 145 * @devb: Device @deva should come after. 146 */ 147void device_pm_move_after(struct device *deva, struct device *devb) 148{ 149 pr_debug("PM: Moving %s:%s after %s:%s\n", 150 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 151 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 152 /* Delete deva from dpm_list and reinsert after devb. */ 153 list_move(&deva->power.entry, &devb->power.entry); 154} 155 156/** 157 * device_pm_move_last - Move device to end of the PM core's list of devices. 158 * @dev: Device to move in dpm_list. 159 */ 160void device_pm_move_last(struct device *dev) 161{ 162 pr_debug("PM: Moving %s:%s to end of list\n", 163 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 164 list_move_tail(&dev->power.entry, &dpm_list); 165} 166 167static ktime_t initcall_debug_start(struct device *dev) 168{ 169 ktime_t calltime = ktime_set(0, 0); 170 171 if (initcall_debug) { 172 pr_info("calling %s+ @ %i\n", 173 dev_name(dev), task_pid_nr(current)); 174 calltime = ktime_get(); 175 } 176 177 return calltime; 178} 179 180static void initcall_debug_report(struct device *dev, ktime_t calltime, 181 int error) 182{ 183 ktime_t delta, rettime; 184 185 if (initcall_debug) { 186 rettime = ktime_get(); 187 delta = ktime_sub(rettime, calltime); 188 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 189 error, (unsigned long long)ktime_to_ns(delta) >> 10); 190 } 191} 192 193/** 194 * dpm_wait - Wait for a PM operation to complete. 195 * @dev: Device to wait for. 196 * @async: If unset, wait only if the device's power.async_suspend flag is set. 197 */ 198static void dpm_wait(struct device *dev, bool async) 199{ 200 if (!dev) 201 return; 202 203 if (async || (pm_async_enabled && dev->power.async_suspend)) 204 wait_for_completion(&dev->power.completion); 205} 206 207static int dpm_wait_fn(struct device *dev, void *async_ptr) 208{ 209 dpm_wait(dev, *((bool *)async_ptr)); 210 return 0; 211} 212 213static void dpm_wait_for_children(struct device *dev, bool async) 214{ 215 device_for_each_child(dev, &async, dpm_wait_fn); 216} 217 218/** 219 * pm_op - Execute the PM operation appropriate for given PM event. 220 * @dev: Device to handle. 221 * @ops: PM operations to choose from. 222 * @state: PM transition of the system being carried out. 223 */ 224static int pm_op(struct device *dev, 225 const struct dev_pm_ops *ops, 226 pm_message_t state) 227{ 228 int error = 0; 229 ktime_t calltime; 230 231 calltime = initcall_debug_start(dev); 232 233 switch (state.event) { 234#ifdef CONFIG_SUSPEND 235 case PM_EVENT_SUSPEND: 236 if (ops->suspend) { 237 error = ops->suspend(dev); 238 suspend_report_result(ops->suspend, error); 239 } 240 break; 241 case PM_EVENT_RESUME: 242 if (ops->resume) { 243 error = ops->resume(dev); 244 suspend_report_result(ops->resume, error); 245 } 246 break; 247#endif /* CONFIG_SUSPEND */ 248#ifdef CONFIG_HIBERNATE_CALLBACKS 249 case PM_EVENT_FREEZE: 250 case PM_EVENT_QUIESCE: 251 if (ops->freeze) { 252 error = ops->freeze(dev); 253 suspend_report_result(ops->freeze, error); 254 } 255 break; 256 case PM_EVENT_HIBERNATE: 257 if (ops->poweroff) { 258 error = ops->poweroff(dev); 259 suspend_report_result(ops->poweroff, error); 260 } 261 break; 262 case PM_EVENT_THAW: 263 case PM_EVENT_RECOVER: 264 if (ops->thaw) { 265 error = ops->thaw(dev); 266 suspend_report_result(ops->thaw, error); 267 } 268 break; 269 case PM_EVENT_RESTORE: 270 if (ops->restore) { 271 error = ops->restore(dev); 272 suspend_report_result(ops->restore, error); 273 } 274 break; 275#endif /* CONFIG_HIBERNATE_CALLBACKS */ 276 default: 277 error = -EINVAL; 278 } 279 280 initcall_debug_report(dev, calltime, error); 281 282 return error; 283} 284 285/** 286 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 287 * @dev: Device to handle. 288 * @ops: PM operations to choose from. 289 * @state: PM transition of the system being carried out. 290 * 291 * The driver of @dev will not receive interrupts while this function is being 292 * executed. 293 */ 294static int pm_noirq_op(struct device *dev, 295 const struct dev_pm_ops *ops, 296 pm_message_t state) 297{ 298 int error = 0; 299 ktime_t calltime = ktime_set(0, 0), delta, rettime; 300 301 if (initcall_debug) { 302 pr_info("calling %s+ @ %i, parent: %s\n", 303 dev_name(dev), task_pid_nr(current), 304 dev->parent ? dev_name(dev->parent) : "none"); 305 calltime = ktime_get(); 306 } 307 308 switch (state.event) { 309#ifdef CONFIG_SUSPEND 310 case PM_EVENT_SUSPEND: 311 if (ops->suspend_noirq) { 312 error = ops->suspend_noirq(dev); 313 suspend_report_result(ops->suspend_noirq, error); 314 } 315 break; 316 case PM_EVENT_RESUME: 317 if (ops->resume_noirq) { 318 error = ops->resume_noirq(dev); 319 suspend_report_result(ops->resume_noirq, error); 320 } 321 break; 322#endif /* CONFIG_SUSPEND */ 323#ifdef CONFIG_HIBERNATE_CALLBACKS 324 case PM_EVENT_FREEZE: 325 case PM_EVENT_QUIESCE: 326 if (ops->freeze_noirq) { 327 error = ops->freeze_noirq(dev); 328 suspend_report_result(ops->freeze_noirq, error); 329 } 330 break; 331 case PM_EVENT_HIBERNATE: 332 if (ops->poweroff_noirq) { 333 error = ops->poweroff_noirq(dev); 334 suspend_report_result(ops->poweroff_noirq, error); 335 } 336 break; 337 case PM_EVENT_THAW: 338 case PM_EVENT_RECOVER: 339 if (ops->thaw_noirq) { 340 error = ops->thaw_noirq(dev); 341 suspend_report_result(ops->thaw_noirq, error); 342 } 343 break; 344 case PM_EVENT_RESTORE: 345 if (ops->restore_noirq) { 346 error = ops->restore_noirq(dev); 347 suspend_report_result(ops->restore_noirq, error); 348 } 349 break; 350#endif /* CONFIG_HIBERNATE_CALLBACKS */ 351 default: 352 error = -EINVAL; 353 } 354 355 if (initcall_debug) { 356 rettime = ktime_get(); 357 delta = ktime_sub(rettime, calltime); 358 printk("initcall %s_i+ returned %d after %Ld usecs\n", 359 dev_name(dev), error, 360 (unsigned long long)ktime_to_ns(delta) >> 10); 361 } 362 363 return error; 364} 365 366static char *pm_verb(int event) 367{ 368 switch (event) { 369 case PM_EVENT_SUSPEND: 370 return "suspend"; 371 case PM_EVENT_RESUME: 372 return "resume"; 373 case PM_EVENT_FREEZE: 374 return "freeze"; 375 case PM_EVENT_QUIESCE: 376 return "quiesce"; 377 case PM_EVENT_HIBERNATE: 378 return "hibernate"; 379 case PM_EVENT_THAW: 380 return "thaw"; 381 case PM_EVENT_RESTORE: 382 return "restore"; 383 case PM_EVENT_RECOVER: 384 return "recover"; 385 default: 386 return "(unknown PM event)"; 387 } 388} 389 390static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 391{ 392 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 393 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 394 ", may wakeup" : ""); 395} 396 397static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 398 int error) 399{ 400 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 401 dev_name(dev), pm_verb(state.event), info, error); 402} 403 404static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 405{ 406 ktime_t calltime; 407 u64 usecs64; 408 int usecs; 409 410 calltime = ktime_get(); 411 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 412 do_div(usecs64, NSEC_PER_USEC); 413 usecs = usecs64; 414 if (usecs == 0) 415 usecs = 1; 416 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 417 info ?: "", info ? " " : "", pm_verb(state.event), 418 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 419} 420 421/*------------------------- Resume routines -------------------------*/ 422 423/** 424 * device_resume_noirq - Execute an "early resume" callback for given device. 425 * @dev: Device to handle. 426 * @state: PM transition of the system being carried out. 427 * 428 * The driver of @dev will not receive interrupts while this function is being 429 * executed. 430 */ 431static int device_resume_noirq(struct device *dev, pm_message_t state) 432{ 433 int error = 0; 434 435 TRACE_DEVICE(dev); 436 TRACE_RESUME(0); 437 438 if (dev->pwr_domain) { 439 pm_dev_dbg(dev, state, "EARLY power domain "); 440 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 441 } else if (dev->type && dev->type->pm) { 442 pm_dev_dbg(dev, state, "EARLY type "); 443 error = pm_noirq_op(dev, dev->type->pm, state); 444 } else if (dev->class && dev->class->pm) { 445 pm_dev_dbg(dev, state, "EARLY class "); 446 error = pm_noirq_op(dev, dev->class->pm, state); 447 } else if (dev->bus && dev->bus->pm) { 448 pm_dev_dbg(dev, state, "EARLY "); 449 error = pm_noirq_op(dev, dev->bus->pm, state); 450 } 451 452 TRACE_RESUME(error); 453 return error; 454} 455 456/** 457 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 458 * @state: PM transition of the system being carried out. 459 * 460 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 461 * enable device drivers to receive interrupts. 462 */ 463void dpm_resume_noirq(pm_message_t state) 464{ 465 ktime_t starttime = ktime_get(); 466 467 mutex_lock(&dpm_list_mtx); 468 while (!list_empty(&dpm_noirq_list)) { 469 struct device *dev = to_device(dpm_noirq_list.next); 470 int error; 471 472 get_device(dev); 473 list_move_tail(&dev->power.entry, &dpm_suspended_list); 474 mutex_unlock(&dpm_list_mtx); 475 476 error = device_resume_noirq(dev, state); 477 if (error) 478 pm_dev_err(dev, state, " early", error); 479 480 mutex_lock(&dpm_list_mtx); 481 put_device(dev); 482 } 483 mutex_unlock(&dpm_list_mtx); 484 dpm_show_time(starttime, state, "early"); 485 resume_device_irqs(); 486} 487EXPORT_SYMBOL_GPL(dpm_resume_noirq); 488 489/** 490 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 491 * @dev: Device to resume. 492 * @cb: Resume callback to execute. 493 */ 494static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 495{ 496 int error; 497 ktime_t calltime; 498 499 calltime = initcall_debug_start(dev); 500 501 error = cb(dev); 502 suspend_report_result(cb, error); 503 504 initcall_debug_report(dev, calltime, error); 505 506 return error; 507} 508 509/** 510 * device_resume - Execute "resume" callbacks for given device. 511 * @dev: Device to handle. 512 * @state: PM transition of the system being carried out. 513 * @async: If true, the device is being resumed asynchronously. 514 */ 515static int device_resume(struct device *dev, pm_message_t state, bool async) 516{ 517 int error = 0; 518 519 TRACE_DEVICE(dev); 520 TRACE_RESUME(0); 521 522 dpm_wait(dev->parent, async); 523 device_lock(dev); 524 525 /* 526 * This is a fib. But we'll allow new children to be added below 527 * a resumed device, even if the device hasn't been completed yet. 528 */ 529 dev->power.is_prepared = false; 530 531 if (!dev->power.is_suspended) 532 goto Unlock; 533 534 if (dev->pwr_domain) { 535 pm_dev_dbg(dev, state, "power domain "); 536 error = pm_op(dev, &dev->pwr_domain->ops, state); 537 goto End; 538 } 539 540 if (dev->type && dev->type->pm) { 541 pm_dev_dbg(dev, state, "type "); 542 error = pm_op(dev, dev->type->pm, state); 543 goto End; 544 } 545 546 if (dev->class) { 547 if (dev->class->pm) { 548 pm_dev_dbg(dev, state, "class "); 549 error = pm_op(dev, dev->class->pm, state); 550 goto End; 551 } else if (dev->class->resume) { 552 pm_dev_dbg(dev, state, "legacy class "); 553 error = legacy_resume(dev, dev->class->resume); 554 goto End; 555 } 556 } 557 558 if (dev->bus) { 559 if (dev->bus->pm) { 560 pm_dev_dbg(dev, state, ""); 561 error = pm_op(dev, dev->bus->pm, state); 562 } else if (dev->bus->resume) { 563 pm_dev_dbg(dev, state, "legacy "); 564 error = legacy_resume(dev, dev->bus->resume); 565 } 566 } 567 568 End: 569 dev->power.is_suspended = false; 570 571 Unlock: 572 device_unlock(dev); 573 complete_all(&dev->power.completion); 574 575 TRACE_RESUME(error); 576 return error; 577} 578 579static void async_resume(void *data, async_cookie_t cookie) 580{ 581 struct device *dev = (struct device *)data; 582 int error; 583 584 error = device_resume(dev, pm_transition, true); 585 if (error) 586 pm_dev_err(dev, pm_transition, " async", error); 587 put_device(dev); 588} 589 590static bool is_async(struct device *dev) 591{ 592 return dev->power.async_suspend && pm_async_enabled 593 && !pm_trace_is_enabled(); 594} 595 596/** 597 * dpm_drv_timeout - Driver suspend / resume watchdog handler 598 * @data: struct device which timed out 599 * 600 * Called when a driver has timed out suspending or resuming. 601 * There's not much we can do here to recover so 602 * BUG() out for a crash-dump 603 * 604 */ 605static void dpm_drv_timeout(unsigned long data) 606{ 607 struct dpm_drv_wd_data *wd_data = (void *)data; 608 struct device *dev = wd_data->dev; 609 struct task_struct *tsk = wd_data->tsk; 610 611 printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), 612 (dev->driver ? dev->driver->name : "no driver")); 613 614#ifdef CONFIG_PLAT_RK 615 resume_console(); 616#endif 617 printk(KERN_EMERG "dpm suspend stack:\n"); 618 show_stack(tsk, NULL); 619 620 BUG(); 621} 622 623/** 624 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 625 * @state: PM transition of the system being carried out. 626 * 627 * Execute the appropriate "resume" callback for all devices whose status 628 * indicates that they are suspended. 629 */ 630void dpm_resume(pm_message_t state) 631{ 632 struct device *dev; 633 ktime_t starttime = ktime_get(); 634 635 might_sleep(); 636 637 mutex_lock(&dpm_list_mtx); 638 pm_transition = state; 639 async_error = 0; 640 641 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 642 INIT_COMPLETION(dev->power.completion); 643 if (is_async(dev)) { 644 get_device(dev); 645 async_schedule(async_resume, dev); 646 } 647 } 648 649 while (!list_empty(&dpm_suspended_list)) { 650 dev = to_device(dpm_suspended_list.next); 651 get_device(dev); 652 if (!is_async(dev)) { 653 int error; 654 655 mutex_unlock(&dpm_list_mtx); 656 657 error = device_resume(dev, state, false); 658 if (error) 659 pm_dev_err(dev, state, "", error); 660 661 mutex_lock(&dpm_list_mtx); 662 } 663 if (!list_empty(&dev->power.entry)) 664 list_move_tail(&dev->power.entry, &dpm_prepared_list); 665 put_device(dev); 666 } 667 mutex_unlock(&dpm_list_mtx); 668 async_synchronize_full(); 669 dpm_show_time(starttime, state, NULL); 670} 671 672/** 673 * device_complete - Complete a PM transition for given device. 674 * @dev: Device to handle. 675 * @state: PM transition of the system being carried out. 676 */ 677static void device_complete(struct device *dev, pm_message_t state) 678{ 679 device_lock(dev); 680 681 if (dev->pwr_domain) { 682 pm_dev_dbg(dev, state, "completing power domain "); 683 if (dev->pwr_domain->ops.complete) 684 dev->pwr_domain->ops.complete(dev); 685 } else if (dev->type && dev->type->pm) { 686 pm_dev_dbg(dev, state, "completing type "); 687 if (dev->type->pm->complete) 688 dev->type->pm->complete(dev); 689 } else if (dev->class && dev->class->pm) { 690 pm_dev_dbg(dev, state, "completing class "); 691 if (dev->class->pm->complete) 692 dev->class->pm->complete(dev); 693 } else if (dev->bus && dev->bus->pm) { 694 pm_dev_dbg(dev, state, "completing "); 695 if (dev->bus->pm->complete) 696 dev->bus->pm->complete(dev); 697 } 698 699 device_unlock(dev); 700} 701 702/** 703 * dpm_complete - Complete a PM transition for all non-sysdev devices. 704 * @state: PM transition of the system being carried out. 705 * 706 * Execute the ->complete() callbacks for all devices whose PM status is not 707 * DPM_ON (this allows new devices to be registered). 708 */ 709void dpm_complete(pm_message_t state) 710{ 711 struct list_head list; 712 713 might_sleep(); 714 715 INIT_LIST_HEAD(&list); 716 mutex_lock(&dpm_list_mtx); 717 while (!list_empty(&dpm_prepared_list)) { 718 struct device *dev = to_device(dpm_prepared_list.prev); 719 720 get_device(dev); 721 dev->power.is_prepared = false; 722 list_move(&dev->power.entry, &list); 723 mutex_unlock(&dpm_list_mtx); 724 725 device_complete(dev, state); 726 727 mutex_lock(&dpm_list_mtx); 728 put_device(dev); 729 } 730 list_splice(&list, &dpm_list); 731 mutex_unlock(&dpm_list_mtx); 732} 733 734/** 735 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 736 * @state: PM transition of the system being carried out. 737 * 738 * Execute "resume" callbacks for all devices and complete the PM transition of 739 * the system. 740 */ 741void dpm_resume_end(pm_message_t state) 742{ 743 dpm_resume(state); 744 dpm_complete(state); 745} 746EXPORT_SYMBOL_GPL(dpm_resume_end); 747 748 749/*------------------------- Suspend routines -------------------------*/ 750 751/** 752 * resume_event - Return a "resume" message for given "suspend" sleep state. 753 * @sleep_state: PM message representing a sleep state. 754 * 755 * Return a PM message representing the resume event corresponding to given 756 * sleep state. 757 */ 758static pm_message_t resume_event(pm_message_t sleep_state) 759{ 760 switch (sleep_state.event) { 761 case PM_EVENT_SUSPEND: 762 return PMSG_RESUME; 763 case PM_EVENT_FREEZE: 764 case PM_EVENT_QUIESCE: 765 return PMSG_RECOVER; 766 case PM_EVENT_HIBERNATE: 767 return PMSG_RESTORE; 768 } 769 return PMSG_ON; 770} 771 772/** 773 * device_suspend_noirq - Execute a "late suspend" callback for given device. 774 * @dev: Device to handle. 775 * @state: PM transition of the system being carried out. 776 * 777 * The driver of @dev will not receive interrupts while this function is being 778 * executed. 779 */ 780static int device_suspend_noirq(struct device *dev, pm_message_t state) 781{ 782 int error; 783 784 if (dev->pwr_domain) { 785 pm_dev_dbg(dev, state, "LATE power domain "); 786 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 787 if (error) 788 return error; 789 } else if (dev->type && dev->type->pm) { 790 pm_dev_dbg(dev, state, "LATE type "); 791 error = pm_noirq_op(dev, dev->type->pm, state); 792 if (error) 793 return error; 794 } else if (dev->class && dev->class->pm) { 795 pm_dev_dbg(dev, state, "LATE class "); 796 error = pm_noirq_op(dev, dev->class->pm, state); 797 if (error) 798 return error; 799 } else if (dev->bus && dev->bus->pm) { 800 pm_dev_dbg(dev, state, "LATE "); 801 error = pm_noirq_op(dev, dev->bus->pm, state); 802 if (error) 803 return error; 804 } 805 806 return 0; 807} 808 809/** 810 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 811 * @state: PM transition of the system being carried out. 812 * 813 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 814 * handlers for all non-sysdev devices. 815 */ 816int dpm_suspend_noirq(pm_message_t state) 817{ 818 ktime_t starttime = ktime_get(); 819 int error = 0; 820 821 suspend_device_irqs(); 822 mutex_lock(&dpm_list_mtx); 823 while (!list_empty(&dpm_suspended_list)) { 824 struct device *dev = to_device(dpm_suspended_list.prev); 825 826 get_device(dev); 827 mutex_unlock(&dpm_list_mtx); 828 829 error = device_suspend_noirq(dev, state); 830 831 mutex_lock(&dpm_list_mtx); 832 if (error) { 833 pm_dev_err(dev, state, " late", error); 834 put_device(dev); 835 break; 836 } 837 if (!list_empty(&dev->power.entry)) 838 list_move(&dev->power.entry, &dpm_noirq_list); 839 put_device(dev); 840 } 841 mutex_unlock(&dpm_list_mtx); 842 if (error) 843 dpm_resume_noirq(resume_event(state)); 844 else 845 dpm_show_time(starttime, state, "late"); 846 return error; 847} 848EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 849 850/** 851 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 852 * @dev: Device to suspend. 853 * @state: PM transition of the system being carried out. 854 * @cb: Suspend callback to execute. 855 */ 856static int legacy_suspend(struct device *dev, pm_message_t state, 857 int (*cb)(struct device *dev, pm_message_t state)) 858{ 859 int error; 860 ktime_t calltime; 861 862 calltime = initcall_debug_start(dev); 863 864 error = cb(dev, state); 865 suspend_report_result(cb, error); 866 867 initcall_debug_report(dev, calltime, error); 868 869 return error; 870} 871 872/** 873 * device_suspend - Execute "suspend" callbacks for given device. 874 * @dev: Device to handle. 875 * @state: PM transition of the system being carried out. 876 * @async: If true, the device is being suspended asynchronously. 877 */ 878static int __device_suspend(struct device *dev, pm_message_t state, bool async) 879{ 880 int error = 0; 881 struct timer_list timer; 882 struct dpm_drv_wd_data data; 883 884 dpm_wait_for_children(dev, async); 885 886 data.dev = dev; 887 data.tsk = get_current(); 888 init_timer_on_stack(&timer); 889 timer.expires = jiffies + HZ * 12; 890 timer.function = dpm_drv_timeout; 891 timer.data = (unsigned long)&data; 892 add_timer(&timer); 893 894 device_lock(dev); 895 896 if (async_error) 897 goto Unlock; 898 899 if (pm_wakeup_pending()) { 900 async_error = -EBUSY; 901 goto Unlock; 902 } 903 904 if (dev->pwr_domain) { 905 pm_dev_dbg(dev, state, "power domain "); 906 error = pm_op(dev, &dev->pwr_domain->ops, state); 907 goto End; 908 } 909 910 if (dev->type && dev->type->pm) { 911 pm_dev_dbg(dev, state, "type "); 912 error = pm_op(dev, dev->type->pm, state); 913 goto End; 914 } 915 916 if (dev->class) { 917 if (dev->class->pm) { 918 pm_dev_dbg(dev, state, "class "); 919 error = pm_op(dev, dev->class->pm, state); 920 goto End; 921 } else if (dev->class->suspend) { 922 pm_dev_dbg(dev, state, "legacy class "); 923 error = legacy_suspend(dev, state, dev->class->suspend); 924 goto End; 925 } 926 } 927 928 if (dev->bus) { 929 if (dev->bus->pm) { 930 pm_dev_dbg(dev, state, ""); 931 error = pm_op(dev, dev->bus->pm, state); 932 } else if (dev->bus->suspend) { 933 pm_dev_dbg(dev, state, "legacy "); 934 error = legacy_suspend(dev, state, dev->bus->suspend); 935 } 936 } 937 938 End: 939 dev->power.is_suspended = !error; 940 941 Unlock: 942 device_unlock(dev); 943 944 del_timer_sync(&timer); 945 destroy_timer_on_stack(&timer); 946 947 complete_all(&dev->power.completion); 948 949 if (error) 950 async_error = error; 951 952 return error; 953} 954 955static void async_suspend(void *data, async_cookie_t cookie) 956{ 957 struct device *dev = (struct device *)data; 958 int error; 959 960 error = __device_suspend(dev, pm_transition, true); 961 if (error) 962 pm_dev_err(dev, pm_transition, " async", error); 963 964 put_device(dev); 965} 966 967static int device_suspend(struct device *dev) 968{ 969 INIT_COMPLETION(dev->power.completion); 970 971 if (pm_async_enabled && dev->power.async_suspend) { 972 get_device(dev); 973 async_schedule(async_suspend, dev); 974 return 0; 975 } 976 977 return __device_suspend(dev, pm_transition, false); 978} 979 980/** 981 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 982 * @state: PM transition of the system being carried out. 983 */ 984int dpm_suspend(pm_message_t state) 985{ 986 ktime_t starttime = ktime_get(); 987 int error = 0; 988 989 might_sleep(); 990 991 mutex_lock(&dpm_list_mtx); 992 pm_transition = state; 993 async_error = 0; 994 while (!list_empty(&dpm_prepared_list)) { 995 struct device *dev = to_device(dpm_prepared_list.prev); 996 997 get_device(dev); 998 mutex_unlock(&dpm_list_mtx); 999 1000 error = device_suspend(dev); 1001 1002 mutex_lock(&dpm_list_mtx); 1003 if (error) { 1004 pm_dev_err(dev, state, "", error); 1005 put_device(dev); 1006 break; 1007 } 1008 if (!list_empty(&dev->power.entry)) 1009 list_move(&dev->power.entry, &dpm_suspended_list); 1010 put_device(dev); 1011 if (async_error) 1012 break; 1013 } 1014 mutex_unlock(&dpm_list_mtx); 1015 async_synchronize_full(); 1016 if (!error) 1017 error = async_error; 1018 if (!error) 1019 dpm_show_time(starttime, state, NULL); 1020 return error; 1021} 1022 1023/** 1024 * device_prepare - Prepare a device for system power transition. 1025 * @dev: Device to handle. 1026 * @state: PM transition of the system being carried out. 1027 * 1028 * Execute the ->prepare() callback(s) for given device. No new children of the 1029 * device may be registered after this function has returned. 1030 */ 1031static int device_prepare(struct device *dev, pm_message_t state) 1032{ 1033 int error = 0; 1034 1035 device_lock(dev); 1036 1037 if (dev->pwr_domain) { 1038 pm_dev_dbg(dev, state, "preparing power domain "); 1039 if (dev->pwr_domain->ops.prepare) 1040 error = dev->pwr_domain->ops.prepare(dev); 1041 suspend_report_result(dev->pwr_domain->ops.prepare, error); 1042 if (error) 1043 goto End; 1044 } else if (dev->type && dev->type->pm) { 1045 pm_dev_dbg(dev, state, "preparing type "); 1046 if (dev->type->pm->prepare) 1047 error = dev->type->pm->prepare(dev); 1048 suspend_report_result(dev->type->pm->prepare, error); 1049 if (error) 1050 goto End; 1051 } else if (dev->class && dev->class->pm) { 1052 pm_dev_dbg(dev, state, "preparing class "); 1053 if (dev->class->pm->prepare) 1054 error = dev->class->pm->prepare(dev); 1055 suspend_report_result(dev->class->pm->prepare, error); 1056 if (error) 1057 goto End; 1058 } else if (dev->bus && dev->bus->pm) { 1059 pm_dev_dbg(dev, state, "preparing "); 1060 if (dev->bus->pm->prepare) 1061 error = dev->bus->pm->prepare(dev); 1062 suspend_report_result(dev->bus->pm->prepare, error); 1063 } 1064 1065 End: 1066 device_unlock(dev); 1067 1068 return error; 1069} 1070 1071/** 1072 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1073 * @state: PM transition of the system being carried out. 1074 * 1075 * Execute the ->prepare() callback(s) for all devices. 1076 */ 1077int dpm_prepare(pm_message_t state) 1078{ 1079 int error = 0; 1080 1081 might_sleep(); 1082 1083 mutex_lock(&dpm_list_mtx); 1084 while (!list_empty(&dpm_list)) { 1085 struct device *dev = to_device(dpm_list.next); 1086 1087 get_device(dev); 1088 mutex_unlock(&dpm_list_mtx); 1089 1090 pm_runtime_get_noresume(dev); 1091 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1092 pm_wakeup_event(dev, 0); 1093 1094 pm_runtime_put_sync(dev); 1095 error = pm_wakeup_pending() ? 1096 -EBUSY : device_prepare(dev, state); 1097 1098 mutex_lock(&dpm_list_mtx); 1099 if (error) { 1100 if (error == -EAGAIN) { 1101 put_device(dev); 1102 error = 0; 1103 continue; 1104 } 1105 printk(KERN_INFO "PM: Device %s not prepared " 1106 "for power transition: code %d\n", 1107 dev_name(dev), error); 1108 put_device(dev); 1109 break; 1110 } 1111 dev->power.is_prepared = true; 1112 if (!list_empty(&dev->power.entry)) 1113 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1114 put_device(dev); 1115 } 1116 mutex_unlock(&dpm_list_mtx); 1117 return error; 1118} 1119 1120/** 1121 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1122 * @state: PM transition of the system being carried out. 1123 * 1124 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1125 * callbacks for them. 1126 */ 1127int dpm_suspend_start(pm_message_t state) 1128{ 1129 int error; 1130 1131 error = dpm_prepare(state); 1132 if (!error) 1133 error = dpm_suspend(state); 1134 return error; 1135} 1136EXPORT_SYMBOL_GPL(dpm_suspend_start); 1137 1138void __suspend_report_result(const char *function, void *fn, int ret) 1139{ 1140 if (ret) 1141 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1142} 1143EXPORT_SYMBOL_GPL(__suspend_report_result); 1144 1145/** 1146 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1147 * @dev: Device to wait for. 1148 * @subordinate: Device that needs to wait for @dev. 1149 */ 1150int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1151{ 1152 dpm_wait(dev, subordinate->power.async_suspend); 1153 return async_error; 1154} 1155EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);