/drivers/base/power/main.c
C | 1210 lines | 842 code | 156 blank | 212 comment | 147 complexity | 4ea2c50071bf63b9e1a827183ddd181c MD5 | raw file
1/* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20#include <linux/device.h> 21#include <linux/kallsyms.h> 22#include <linux/mutex.h> 23#include <linux/pm.h> 24#include <linux/pm_runtime.h> 25#include <linux/resume-trace.h> 26#include <linux/interrupt.h> 27#include <linux/sched.h> 28#include <linux/async.h> 29#include <linux/suspend.h> 30#include <linux/timer.h> 31 32#include "../base.h" 33#include "power.h" 34 35/* 36 * The entries in the dpm_list list are in a depth first order, simply 37 * because children are guaranteed to be discovered after parents, and 38 * are inserted at the back of the list on discovery. 39 * 40 * Since device_pm_add() may be called with a device lock held, 41 * we must never try to acquire a device lock while holding 42 * dpm_list_mutex. 43 */ 44 45LIST_HEAD(dpm_list); 46LIST_HEAD(dpm_prepared_list); 47LIST_HEAD(dpm_suspended_list); 48LIST_HEAD(dpm_noirq_list); 49 50static DEFINE_MUTEX(dpm_list_mtx); 51static pm_message_t pm_transition; 52 53static void dpm_drv_timeout(unsigned long data); 54struct dpm_drv_wd_data { 55 struct device *dev; 56 struct task_struct *tsk; 57}; 58 59#ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 60void print_dpm_list(void) 61{ 62 int i=0; 63 struct device *dev; 64 65 mutex_lock(&dpm_list_mtx); 66 printk("HW debug "); 67 68 list_for_each_entry(dev, &dpm_list, power.entry){ 69 printk("%s,",dev_name(dev)); 70 if(++i%20 == 0){ 71 printk("\n"); 72 } 73 } 74 mutex_unlock(&dpm_list_mtx); 75 return; 76} 77EXPORT_SYMBOL_GPL(print_dpm_list); 78#endif 79 80static int async_error; 81 82/** 83 * device_pm_init - Initialize the PM-related part of a device object. 84 * @dev: Device object being initialized. 85 */ 86void device_pm_init(struct device *dev) 87{ 88 dev->power.is_prepared = false; 89 dev->power.is_suspended = false; 90 init_completion(&dev->power.completion); 91 complete_all(&dev->power.completion); 92 dev->power.wakeup = NULL; 93 spin_lock_init(&dev->power.lock); 94 pm_runtime_init(dev); 95 INIT_LIST_HEAD(&dev->power.entry); 96} 97 98/** 99 * device_pm_lock - Lock the list of active devices used by the PM core. 100 */ 101void device_pm_lock(void) 102{ 103 mutex_lock(&dpm_list_mtx); 104} 105 106/** 107 * device_pm_unlock - Unlock the list of active devices used by the PM core. 108 */ 109void device_pm_unlock(void) 110{ 111 mutex_unlock(&dpm_list_mtx); 112} 113 114/** 115 * device_pm_add - Add a device to the PM core's list of active devices. 116 * @dev: Device to add to the list. 117 */ 118void device_pm_add(struct device *dev) 119{ 120 pr_debug("PM: Adding info for %s:%s\n", 121 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 122 mutex_lock(&dpm_list_mtx); 123 if (dev->parent && dev->parent->power.is_prepared) 124 dev_warn(dev, "parent %s should not be sleeping\n", 125 dev_name(dev->parent)); 126 list_add_tail(&dev->power.entry, &dpm_list); 127 mutex_unlock(&dpm_list_mtx); 128} 129 130/** 131 * device_pm_remove - Remove a device from the PM core's list of active devices. 132 * @dev: Device to be removed from the list. 133 */ 134void device_pm_remove(struct device *dev) 135{ 136 pr_debug("PM: Removing info for %s:%s\n", 137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 138 complete_all(&dev->power.completion); 139 mutex_lock(&dpm_list_mtx); 140 list_del_init(&dev->power.entry); 141 mutex_unlock(&dpm_list_mtx); 142 device_wakeup_disable(dev); 143 pm_runtime_remove(dev); 144} 145 146/** 147 * device_pm_move_before - Move device in the PM core's list of active devices. 148 * @deva: Device to move in dpm_list. 149 * @devb: Device @deva should come before. 150 */ 151void device_pm_move_before(struct device *deva, struct device *devb) 152{ 153 pr_debug("PM: Moving %s:%s before %s:%s\n", 154 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 155 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 156 /* Delete deva from dpm_list and reinsert before devb. */ 157 list_move_tail(&deva->power.entry, &devb->power.entry); 158} 159 160/** 161 * device_pm_move_after - Move device in the PM core's list of active devices. 162 * @deva: Device to move in dpm_list. 163 * @devb: Device @deva should come after. 164 */ 165void device_pm_move_after(struct device *deva, struct device *devb) 166{ 167 pr_debug("PM: Moving %s:%s after %s:%s\n", 168 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 169 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 170 /* Delete deva from dpm_list and reinsert after devb. */ 171 list_move(&deva->power.entry, &devb->power.entry); 172} 173 174/** 175 * device_pm_move_last - Move device to end of the PM core's list of devices. 176 * @dev: Device to move in dpm_list. 177 */ 178void device_pm_move_last(struct device *dev) 179{ 180 pr_debug("PM: Moving %s:%s to end of list\n", 181 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 182 list_move_tail(&dev->power.entry, &dpm_list); 183} 184 185static ktime_t initcall_debug_start(struct device *dev) 186{ 187 ktime_t calltime = ktime_set(0, 0); 188 189 if (initcall_debug) { 190 pr_info("calling %s+ @ %i\n", 191 dev_name(dev), task_pid_nr(current)); 192 calltime = ktime_get(); 193 } 194 195 return calltime; 196} 197 198static void initcall_debug_report(struct device *dev, ktime_t calltime, 199 int error) 200{ 201 ktime_t delta, rettime; 202 203 if (initcall_debug) { 204 rettime = ktime_get(); 205 delta = ktime_sub(rettime, calltime); 206 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 207 error, (unsigned long long)ktime_to_ns(delta) >> 10); 208 } 209} 210 211/** 212 * dpm_wait - Wait for a PM operation to complete. 213 * @dev: Device to wait for. 214 * @async: If unset, wait only if the device's power.async_suspend flag is set. 215 */ 216static void dpm_wait(struct device *dev, bool async) 217{ 218 if (!dev) 219 return; 220 221 if (async || (pm_async_enabled && dev->power.async_suspend)) 222 wait_for_completion(&dev->power.completion); 223} 224 225static int dpm_wait_fn(struct device *dev, void *async_ptr) 226{ 227 dpm_wait(dev, *((bool *)async_ptr)); 228 return 0; 229} 230 231static void dpm_wait_for_children(struct device *dev, bool async) 232{ 233 device_for_each_child(dev, &async, dpm_wait_fn); 234} 235 236/** 237 * pm_op - Execute the PM operation appropriate for given PM event. 238 * @dev: Device to handle. 239 * @ops: PM operations to choose from. 240 * @state: PM transition of the system being carried out. 241 */ 242static int pm_op(struct device *dev, 243 const struct dev_pm_ops *ops, 244 pm_message_t state) 245{ 246 int error = 0; 247 ktime_t calltime; 248 249 calltime = initcall_debug_start(dev); 250 251 switch (state.event) { 252#ifdef CONFIG_SUSPEND 253 case PM_EVENT_SUSPEND: 254 if (ops->suspend) { 255 error = ops->suspend(dev); 256 suspend_report_result(ops->suspend, error); 257 } 258 break; 259 case PM_EVENT_RESUME: 260 if (ops->resume) { 261 error = ops->resume(dev); 262 suspend_report_result(ops->resume, error); 263 } 264 break; 265#endif /* CONFIG_SUSPEND */ 266#ifdef CONFIG_HIBERNATE_CALLBACKS 267 case PM_EVENT_FREEZE: 268 case PM_EVENT_QUIESCE: 269 if (ops->freeze) { 270 error = ops->freeze(dev); 271 suspend_report_result(ops->freeze, error); 272 } 273 break; 274 case PM_EVENT_HIBERNATE: 275 if (ops->poweroff) { 276 error = ops->poweroff(dev); 277 suspend_report_result(ops->poweroff, error); 278 } 279 break; 280 case PM_EVENT_THAW: 281 case PM_EVENT_RECOVER: 282 if (ops->thaw) { 283 error = ops->thaw(dev); 284 suspend_report_result(ops->thaw, error); 285 } 286 break; 287 case PM_EVENT_RESTORE: 288 if (ops->restore) { 289 error = ops->restore(dev); 290 suspend_report_result(ops->restore, error); 291 } 292 break; 293#endif /* CONFIG_HIBERNATE_CALLBACKS */ 294 default: 295 error = -EINVAL; 296 } 297 298 initcall_debug_report(dev, calltime, error); 299 300 return error; 301} 302 303/** 304 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 305 * @dev: Device to handle. 306 * @ops: PM operations to choose from. 307 * @state: PM transition of the system being carried out. 308 * 309 * The driver of @dev will not receive interrupts while this function is being 310 * executed. 311 */ 312static int pm_noirq_op(struct device *dev, 313 const struct dev_pm_ops *ops, 314 pm_message_t state) 315{ 316 int error = 0; 317 ktime_t calltime = ktime_set(0, 0), delta, rettime; 318 319 if (initcall_debug) { 320 pr_info("calling %s+ @ %i, parent: %s\n", 321 dev_name(dev), task_pid_nr(current), 322 dev->parent ? dev_name(dev->parent) : "none"); 323 calltime = ktime_get(); 324 } 325 326 switch (state.event) { 327#ifdef CONFIG_SUSPEND 328 case PM_EVENT_SUSPEND: 329 if (ops->suspend_noirq) { 330 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 331 printk("HUAWEI RPC DEBUG: action: suspend_noirq dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->suspend_noirq); 332 333 if(dev &&(dev->driver)&& (dev->driver->pm)&&(dev->driver->pm->suspend_noirq)) 334 { 335 print_symbol("driver->pm->suspend_noirq: %x\n", (unsigned int)(dev->driver->pm->suspend_noirq)); 336 } 337 #endif 338 error = ops->suspend_noirq(dev); 339 suspend_report_result(ops->suspend_noirq, error); 340 } 341 break; 342 case PM_EVENT_RESUME: 343 if (ops->resume_noirq) { 344 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 345 printk("HUAWEI RPC DEBUG: action: resume_noirq dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->resume_noirq); 346 if(dev &&(dev->driver)&& (dev->driver->pm)&&(dev->driver->pm->resume_noirq)) 347 { 348 print_symbol("driver->pm->resume_noirq %x\n", (unsigned int)(dev->driver->pm->resume_noirq)); 349 } 350 #endif 351 error = ops->resume_noirq(dev); 352 suspend_report_result(ops->resume_noirq, error); 353 } 354 break; 355#endif /* CONFIG_SUSPEND */ 356#ifdef CONFIG_HIBERNATE_CALLBACKS 357 case PM_EVENT_FREEZE: 358 case PM_EVENT_QUIESCE: 359 if (ops->freeze_noirq) { 360 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 361 printk("HUAWEI RPC DEBUG: action: freeze_noirqdev dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->freeze_noirq); 362 #endif 363 error = ops->freeze_noirq(dev); 364 suspend_report_result(ops->freeze_noirq, error); 365 } 366 break; 367 case PM_EVENT_HIBERNATE: 368 if (ops->poweroff_noirq) { 369 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 370 printk("HUAWEI RPC DEBUG: action: poweroff_noirq dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->poweroff_noirq); 371 #endif 372 error = ops->poweroff_noirq(dev); 373 suspend_report_result(ops->poweroff_noirq, error); 374 } 375 break; 376 case PM_EVENT_THAW: 377 case PM_EVENT_RECOVER: 378 if (ops->thaw_noirq) { 379 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 380 printk("HUAWEI RPC DEBUG: action: thaw_noirq dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->thaw_noirq); 381 #endif 382 error = ops->thaw_noirq(dev); 383 suspend_report_result(ops->thaw_noirq, error); 384 } 385 break; 386 case PM_EVENT_RESTORE: 387 if (ops->restore_noirq) { 388 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 389 printk("HUAWEI RPC DEBUG: action: restore_noirq dev name=%s, func=0x%x\n", dev_name(dev), (unsigned int)ops->restore_noirq); 390 #endif 391 error = ops->restore_noirq(dev); 392 suspend_report_result(ops->restore_noirq, error); 393 } 394 break; 395#endif /* CONFIG_HIBERNATE_CALLBACKS */ 396 default: 397 error = -EINVAL; 398 } 399 400 if (initcall_debug) { 401 rettime = ktime_get(); 402 delta = ktime_sub(rettime, calltime); 403 printk("initcall %s_i+ returned %d after %Ld usecs\n", 404 dev_name(dev), error, 405 (unsigned long long)ktime_to_ns(delta) >> 10); 406 } 407 408 return error; 409} 410 411static char *pm_verb(int event) 412{ 413 switch (event) { 414 case PM_EVENT_SUSPEND: 415 return "suspend"; 416 case PM_EVENT_RESUME: 417 return "resume"; 418 case PM_EVENT_FREEZE: 419 return "freeze"; 420 case PM_EVENT_QUIESCE: 421 return "quiesce"; 422 case PM_EVENT_HIBERNATE: 423 return "hibernate"; 424 case PM_EVENT_THAW: 425 return "thaw"; 426 case PM_EVENT_RESTORE: 427 return "restore"; 428 case PM_EVENT_RECOVER: 429 return "recover"; 430 default: 431 return "(unknown PM event)"; 432 } 433} 434 435static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 436{ 437 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 438 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 439 ", may wakeup" : ""); 440} 441 442static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 443 int error) 444{ 445 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 446 dev_name(dev), pm_verb(state.event), info, error); 447} 448 449static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 450{ 451 ktime_t calltime; 452 u64 usecs64; 453 int usecs; 454 455 calltime = ktime_get(); 456 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 457 do_div(usecs64, NSEC_PER_USEC); 458 usecs = usecs64; 459 if (usecs == 0) 460 usecs = 1; 461 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 462 info ?: "", info ? " " : "", pm_verb(state.event), 463 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 464} 465 466/*------------------------- Resume routines -------------------------*/ 467 468/** 469 * device_resume_noirq - Execute an "early resume" callback for given device. 470 * @dev: Device to handle. 471 * @state: PM transition of the system being carried out. 472 * 473 * The driver of @dev will not receive interrupts while this function is being 474 * executed. 475 */ 476static int device_resume_noirq(struct device *dev, pm_message_t state) 477{ 478 int error = 0; 479 480 TRACE_DEVICE(dev); 481 TRACE_RESUME(0); 482 483 if (dev->pwr_domain) { 484 pm_dev_dbg(dev, state, "EARLY power domain "); 485 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 486 } else if (dev->type && dev->type->pm) { 487 pm_dev_dbg(dev, state, "EARLY type "); 488 error = pm_noirq_op(dev, dev->type->pm, state); 489 } else if (dev->class && dev->class->pm) { 490 pm_dev_dbg(dev, state, "EARLY class "); 491 error = pm_noirq_op(dev, dev->class->pm, state); 492 } else if (dev->bus && dev->bus->pm) { 493 pm_dev_dbg(dev, state, "EARLY "); 494 error = pm_noirq_op(dev, dev->bus->pm, state); 495 } 496 497 TRACE_RESUME(error); 498 return error; 499} 500 501/** 502 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 503 * @state: PM transition of the system being carried out. 504 * 505 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 506 * enable device drivers to receive interrupts. 507 */ 508void dpm_resume_noirq(pm_message_t state) 509{ 510 ktime_t starttime = ktime_get(); 511 512 mutex_lock(&dpm_list_mtx); 513 while (!list_empty(&dpm_noirq_list)) { 514 struct device *dev = to_device(dpm_noirq_list.next); 515 int error; 516 517 get_device(dev); 518 list_move_tail(&dev->power.entry, &dpm_suspended_list); 519 mutex_unlock(&dpm_list_mtx); 520 521 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 522 printk(KERN_ERR "HUAWEI RPC DEBUG: resuming_noirq %s\n", dev_name(dev)); 523 #endif 524 error = device_resume_noirq(dev, state); 525 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 526 printk(KERN_ERR "HUAWEI RPC DEBUG: resumed_noirq %s error = %d\n", dev_name(dev), error); 527 #endif 528 if (error) 529 pm_dev_err(dev, state, " early", error); 530 531 mutex_lock(&dpm_list_mtx); 532 put_device(dev); 533 } 534 mutex_unlock(&dpm_list_mtx); 535 dpm_show_time(starttime, state, "early"); 536 resume_device_irqs(); 537} 538EXPORT_SYMBOL_GPL(dpm_resume_noirq); 539 540/** 541 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 542 * @dev: Device to resume. 543 * @cb: Resume callback to execute. 544 */ 545static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 546{ 547 int error; 548 ktime_t calltime; 549 550 calltime = initcall_debug_start(dev); 551 552 error = cb(dev); 553 suspend_report_result(cb, error); 554 555 initcall_debug_report(dev, calltime, error); 556 557 return error; 558} 559 560/** 561 * device_resume - Execute "resume" callbacks for given device. 562 * @dev: Device to handle. 563 * @state: PM transition of the system being carried out. 564 * @async: If true, the device is being resumed asynchronously. 565 */ 566static int device_resume(struct device *dev, pm_message_t state, bool async) 567{ 568 int error = 0; 569 570 TRACE_DEVICE(dev); 571 TRACE_RESUME(0); 572 573 dpm_wait(dev->parent, async); 574 device_lock(dev); 575 576 /* 577 * This is a fib. But we'll allow new children to be added below 578 * a resumed device, even if the device hasn't been completed yet. 579 */ 580 dev->power.is_prepared = false; 581 582 if (!dev->power.is_suspended) 583 goto Unlock; 584 585 if (dev->pwr_domain) { 586 pm_dev_dbg(dev, state, "power domain "); 587 error = pm_op(dev, &dev->pwr_domain->ops, state); 588 goto End; 589 } 590 591 if (dev->type && dev->type->pm) { 592 pm_dev_dbg(dev, state, "type "); 593 error = pm_op(dev, dev->type->pm, state); 594 goto End; 595 } 596 597 if (dev->class) { 598 if (dev->class->pm) { 599 pm_dev_dbg(dev, state, "class "); 600 error = pm_op(dev, dev->class->pm, state); 601 goto End; 602 } else if (dev->class->resume) { 603 pm_dev_dbg(dev, state, "legacy class "); 604 error = legacy_resume(dev, dev->class->resume); 605 goto End; 606 } 607 } 608 609 if (dev->bus) { 610 if (dev->bus->pm) { 611 pm_dev_dbg(dev, state, ""); 612 error = pm_op(dev, dev->bus->pm, state); 613 } else if (dev->bus->resume) { 614 pm_dev_dbg(dev, state, "legacy "); 615 error = legacy_resume(dev, dev->bus->resume); 616 } 617 } 618 619 End: 620 dev->power.is_suspended = false; 621 622 Unlock: 623 device_unlock(dev); 624 complete_all(&dev->power.completion); 625 626 TRACE_RESUME(error); 627 return error; 628} 629 630static void async_resume(void *data, async_cookie_t cookie) 631{ 632 struct device *dev = (struct device *)data; 633 int error; 634 635 error = device_resume(dev, pm_transition, true); 636 if (error) 637 pm_dev_err(dev, pm_transition, " async", error); 638 put_device(dev); 639} 640 641static bool is_async(struct device *dev) 642{ 643 return dev->power.async_suspend && pm_async_enabled 644 && !pm_trace_is_enabled(); 645} 646 647/** 648 * dpm_drv_timeout - Driver suspend / resume watchdog handler 649 * @data: struct device which timed out 650 * 651 * Called when a driver has timed out suspending or resuming. 652 * There's not much we can do here to recover so 653 * BUG() out for a crash-dump 654 * 655 */ 656static void dpm_drv_timeout(unsigned long data) 657{ 658 struct dpm_drv_wd_data *wd_data = (void *)data; 659 struct device *dev = wd_data->dev; 660 struct task_struct *tsk = wd_data->tsk; 661 662 printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), 663 (dev->driver ? dev->driver->name : "no driver")); 664 665 printk(KERN_EMERG "dpm suspend stack:\n"); 666 show_stack(tsk, NULL); 667 668 BUG(); 669} 670 671/** 672 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 673 * @state: PM transition of the system being carried out. 674 * 675 * Execute the appropriate "resume" callback for all devices whose status 676 * indicates that they are suspended. 677 */ 678void dpm_resume(pm_message_t state) 679{ 680 struct device *dev; 681 ktime_t starttime = ktime_get(); 682 683 might_sleep(); 684 685 mutex_lock(&dpm_list_mtx); 686 pm_transition = state; 687 async_error = 0; 688 689 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 690 INIT_COMPLETION(dev->power.completion); 691 if (is_async(dev)) { 692 get_device(dev); 693 async_schedule(async_resume, dev); 694 } 695 } 696 697 while (!list_empty(&dpm_suspended_list)) { 698 dev = to_device(dpm_suspended_list.next); 699 get_device(dev); 700 if (!is_async(dev)) { 701 int error; 702 703 mutex_unlock(&dpm_list_mtx); 704 705 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 706 printk(KERN_ERR "HUAWEI RPC DEBUG: resuming %s\n", dev_name(dev)); 707 #endif 708 error = device_resume(dev, state, false); 709 710 #ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG 711 printk(KERN_ERR "HUAWEI RPC DEBUG: resumed %s error=%d\n", dev_name(dev), error); 712 #endif 713 if (error) 714 pm_dev_err(dev, state, "", error); 715 716 mutex_lock(&dpm_list_mtx); 717 } 718 if (!list_empty(&dev->power.entry)) 719 list_move_tail(&dev->power.entry, &dpm_prepared_list); 720 put_device(dev); 721 } 722 mutex_unlock(&dpm_list_mtx); 723 async_synchronize_full(); 724 dpm_show_time(starttime, state, NULL); 725} 726 727/** 728 * device_complete - Complete a PM transition for given device. 729 * @dev: Device to handle. 730 * @state: PM transition of the system being carried out. 731 */ 732static void device_complete(struct device *dev, pm_message_t state) 733{ 734 device_lock(dev); 735 736 if (dev->pwr_domain) { 737 pm_dev_dbg(dev, state, "completing power domain "); 738 if (dev->pwr_domain->ops.complete) 739 dev->pwr_domain->ops.complete(dev); 740 } else if (dev->type && dev->type->pm) { 741 pm_dev_dbg(dev, state, "completing type "); 742 if (dev->type->pm->complete) 743 dev->type->pm->complete(dev); 744 } else if (dev->class && dev->class->pm) { 745 pm_dev_dbg(dev, state, "completing class "); 746 if (dev->class->pm->complete) 747 dev->class->pm->complete(dev); 748 } else if (dev->bus && dev->bus->pm) { 749 pm_dev_dbg(dev, state, "completing "); 750 if (dev->bus->pm->complete) 751 dev->bus->pm->complete(dev); 752 } 753 754 device_unlock(dev); 755} 756 757/** 758 * dpm_complete - Complete a PM transition for all non-sysdev devices. 759 * @state: PM transition of the system being carried out. 760 * 761 * Execute the ->complete() callbacks for all devices whose PM status is not 762 * DPM_ON (this allows new devices to be registered). 763 */ 764void dpm_complete(pm_message_t state) 765{ 766 struct list_head list; 767 768 might_sleep(); 769 770 INIT_LIST_HEAD(&list); 771 mutex_lock(&dpm_list_mtx); 772 while (!list_empty(&dpm_prepared_list)) { 773 struct device *dev = to_device(dpm_prepared_list.prev); 774 775 get_device(dev); 776 dev->power.is_prepared = false; 777 list_move(&dev->power.entry, &list); 778 mutex_unlock(&dpm_list_mtx); 779 780 device_complete(dev, state); 781 782 mutex_lock(&dpm_list_mtx); 783 put_device(dev); 784 } 785 list_splice(&list, &dpm_list); 786 mutex_unlock(&dpm_list_mtx); 787} 788 789/** 790 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 791 * @state: PM transition of the system being carried out. 792 * 793 * Execute "resume" callbacks for all devices and complete the PM transition of 794 * the system. 795 */ 796void dpm_resume_end(pm_message_t state) 797{ 798 dpm_resume(state); 799 dpm_complete(state); 800} 801EXPORT_SYMBOL_GPL(dpm_resume_end); 802 803 804/*------------------------- Suspend routines -------------------------*/ 805 806/** 807 * resume_event - Return a "resume" message for given "suspend" sleep state. 808 * @sleep_state: PM message representing a sleep state. 809 * 810 * Return a PM message representing the resume event corresponding to given 811 * sleep state. 812 */ 813static pm_message_t resume_event(pm_message_t sleep_state) 814{ 815 switch (sleep_state.event) { 816 case PM_EVENT_SUSPEND: 817 return PMSG_RESUME; 818 case PM_EVENT_FREEZE: 819 case PM_EVENT_QUIESCE: 820 return PMSG_RECOVER; 821 case PM_EVENT_HIBERNATE: 822 return PMSG_RESTORE; 823 } 824 return PMSG_ON; 825} 826 827/** 828 * device_suspend_noirq - Execute a "late suspend" callback for given device. 829 * @dev: Device to handle. 830 * @state: PM transition of the system being carried out. 831 * 832 * The driver of @dev will not receive interrupts while this function is being 833 * executed. 834 */ 835static int device_suspend_noirq(struct device *dev, pm_message_t state) 836{ 837 int error; 838 839 if (dev->pwr_domain) { 840 pm_dev_dbg(dev, state, "LATE power domain "); 841 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 842 if (error) 843 return error; 844 } else if (dev->type && dev->type->pm) { 845 pm_dev_dbg(dev, state, "LATE type "); 846 error = pm_noirq_op(dev, dev->type->pm, state); 847 if (error) 848 return error; 849 } else if (dev->class && dev->class->pm) { 850 pm_dev_dbg(dev, state, "LATE class "); 851 error = pm_noirq_op(dev, dev->class->pm, state); 852 if (error) 853 return error; 854 } else if (dev->bus && dev->bus->pm) { 855 pm_dev_dbg(dev, state, "LATE "); 856 error = pm_noirq_op(dev, dev->bus->pm, state); 857 if (error) 858 return error; 859 } 860 861 return 0; 862} 863 864/** 865 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 866 * @state: PM transition of the system being carried out. 867 * 868 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 869 * handlers for all non-sysdev devices. 870 */ 871int dpm_suspend_noirq(pm_message_t state) 872{ 873 ktime_t starttime = ktime_get(); 874 int error = 0; 875 876 suspend_device_irqs(); 877 mutex_lock(&dpm_list_mtx); 878 while (!list_empty(&dpm_suspended_list)) { 879 struct device *dev = to_device(dpm_suspended_list.prev); 880 881 get_device(dev); 882 mutex_unlock(&dpm_list_mtx); 883 884 error = device_suspend_noirq(dev, state); 885 886 mutex_lock(&dpm_list_mtx); 887 if (error) { 888 pm_dev_err(dev, state, " late", error); 889 put_device(dev); 890 break; 891 } 892 if (!list_empty(&dev->power.entry)) 893 list_move(&dev->power.entry, &dpm_noirq_list); 894 put_device(dev); 895 } 896 mutex_unlock(&dpm_list_mtx); 897 if (error) 898 dpm_resume_noirq(resume_event(state)); 899 else 900 dpm_show_time(starttime, state, "late"); 901 return error; 902} 903EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 904 905/** 906 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 907 * @dev: Device to suspend. 908 * @state: PM transition of the system being carried out. 909 * @cb: Suspend callback to execute. 910 */ 911static int legacy_suspend(struct device *dev, pm_message_t state, 912 int (*cb)(struct device *dev, pm_message_t state)) 913{ 914 int error; 915 ktime_t calltime; 916 917 calltime = initcall_debug_start(dev); 918 919 error = cb(dev, state); 920 suspend_report_result(cb, error); 921 922 initcall_debug_report(dev, calltime, error); 923 924 return error; 925} 926 927/** 928 * device_suspend - Execute "suspend" callbacks for given device. 929 * @dev: Device to handle. 930 * @state: PM transition of the system being carried out. 931 * @async: If true, the device is being suspended asynchronously. 932 */ 933static int __device_suspend(struct device *dev, pm_message_t state, bool async) 934{ 935 int error = 0; 936 struct timer_list timer; 937 struct dpm_drv_wd_data data; 938 939 dpm_wait_for_children(dev, async); 940 941 data.dev = dev; 942 data.tsk = get_current(); 943 init_timer_on_stack(&timer); 944 timer.expires = jiffies + HZ * 12; 945 timer.function = dpm_drv_timeout; 946 timer.data = (unsigned long)&data; 947 add_timer(&timer); 948 949 device_lock(dev); 950 951 if (async_error) 952 goto Unlock; 953 954 if (pm_wakeup_pending()) { 955 async_error = -EBUSY; 956 goto Unlock; 957 } 958 959 if (dev->pwr_domain) { 960 pm_dev_dbg(dev, state, "power domain "); 961 error = pm_op(dev, &dev->pwr_domain->ops, state); 962 goto End; 963 } 964 965 if (dev->type && dev->type->pm) { 966 pm_dev_dbg(dev, state, "type "); 967 error = pm_op(dev, dev->type->pm, state); 968 goto End; 969 } 970 971 if (dev->class) { 972 if (dev->class->pm) { 973 pm_dev_dbg(dev, state, "class "); 974 error = pm_op(dev, dev->class->pm, state); 975 goto End; 976 } else if (dev->class->suspend) { 977 pm_dev_dbg(dev, state, "legacy class "); 978 error = legacy_suspend(dev, state, dev->class->suspend); 979 goto End; 980 } 981 } 982 983 if (dev->bus) { 984 if (dev->bus->pm) { 985 pm_dev_dbg(dev, state, ""); 986 error = pm_op(dev, dev->bus->pm, state); 987 } else if (dev->bus->suspend) { 988 pm_dev_dbg(dev, state, "legacy "); 989 error = legacy_suspend(dev, state, dev->bus->suspend); 990 } 991 } 992 993 End: 994 dev->power.is_suspended = !error; 995 996 Unlock: 997 device_unlock(dev); 998 999 del_timer_sync(&timer); 1000 destroy_timer_on_stack(&timer); 1001 1002 complete_all(&dev->power.completion); 1003 1004 if (error) 1005 async_error = error; 1006 1007 return error; 1008} 1009 1010static void async_suspend(void *data, async_cookie_t cookie) 1011{ 1012 struct device *dev = (struct device *)data; 1013 int error; 1014 1015 error = __device_suspend(dev, pm_transition, true); 1016 if (error) 1017 pm_dev_err(dev, pm_transition, " async", error); 1018 1019 put_device(dev); 1020} 1021 1022static int device_suspend(struct device *dev) 1023{ 1024 INIT_COMPLETION(dev->power.completion); 1025 1026 if (pm_async_enabled && dev->power.async_suspend) { 1027 get_device(dev); 1028 async_schedule(async_suspend, dev); 1029 return 0; 1030 } 1031 1032 return __device_suspend(dev, pm_transition, false); 1033} 1034 1035/** 1036 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1037 * @state: PM transition of the system being carried out. 1038 */ 1039int dpm_suspend(pm_message_t state) 1040{ 1041 ktime_t starttime = ktime_get(); 1042 int error = 0; 1043 1044 might_sleep(); 1045 1046 mutex_lock(&dpm_list_mtx); 1047 pm_transition = state; 1048 async_error = 0; 1049 while (!list_empty(&dpm_prepared_list)) { 1050 struct device *dev = to_device(dpm_prepared_list.prev); 1051 1052 get_device(dev); 1053 mutex_unlock(&dpm_list_mtx); 1054 1055 error = device_suspend(dev); 1056 1057 mutex_lock(&dpm_list_mtx); 1058 if (error) { 1059 pm_dev_err(dev, state, "", error); 1060 put_device(dev); 1061 break; 1062 } 1063 if (!list_empty(&dev->power.entry)) 1064 list_move(&dev->power.entry, &dpm_suspended_list); 1065 put_device(dev); 1066 if (async_error) 1067 break; 1068 } 1069 mutex_unlock(&dpm_list_mtx); 1070 async_synchronize_full(); 1071 if (!error) 1072 error = async_error; 1073 if (!error) 1074 dpm_show_time(starttime, state, NULL); 1075 return error; 1076} 1077 1078/** 1079 * device_prepare - Prepare a device for system power transition. 1080 * @dev: Device to handle. 1081 * @state: PM transition of the system being carried out. 1082 * 1083 * Execute the ->prepare() callback(s) for given device. No new children of the 1084 * device may be registered after this function has returned. 1085 */ 1086static int device_prepare(struct device *dev, pm_message_t state) 1087{ 1088 int error = 0; 1089 1090 device_lock(dev); 1091 1092 if (dev->pwr_domain) { 1093 pm_dev_dbg(dev, state, "preparing power domain "); 1094 if (dev->pwr_domain->ops.prepare) 1095 error = dev->pwr_domain->ops.prepare(dev); 1096 suspend_report_result(dev->pwr_domain->ops.prepare, error); 1097 if (error) 1098 goto End; 1099 } else if (dev->type && dev->type->pm) { 1100 pm_dev_dbg(dev, state, "preparing type "); 1101 if (dev->type->pm->prepare) 1102 error = dev->type->pm->prepare(dev); 1103 suspend_report_result(dev->type->pm->prepare, error); 1104 if (error) 1105 goto End; 1106 } else if (dev->class && dev->class->pm) { 1107 pm_dev_dbg(dev, state, "preparing class "); 1108 if (dev->class->pm->prepare) 1109 error = dev->class->pm->prepare(dev); 1110 suspend_report_result(dev->class->pm->prepare, error); 1111 if (error) 1112 goto End; 1113 } else if (dev->bus && dev->bus->pm) { 1114 pm_dev_dbg(dev, state, "preparing "); 1115 if (dev->bus->pm->prepare) 1116 error = dev->bus->pm->prepare(dev); 1117 suspend_report_result(dev->bus->pm->prepare, error); 1118 } 1119 1120 End: 1121 device_unlock(dev); 1122 1123 return error; 1124} 1125 1126/** 1127 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1128 * @state: PM transition of the system being carried out. 1129 * 1130 * Execute the ->prepare() callback(s) for all devices. 1131 */ 1132int dpm_prepare(pm_message_t state) 1133{ 1134 int error = 0; 1135 1136 might_sleep(); 1137 1138 mutex_lock(&dpm_list_mtx); 1139 while (!list_empty(&dpm_list)) { 1140 struct device *dev = to_device(dpm_list.next); 1141 1142 get_device(dev); 1143 mutex_unlock(&dpm_list_mtx); 1144 1145 pm_runtime_get_noresume(dev); 1146 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1147 pm_wakeup_event(dev, 0); 1148 1149 pm_runtime_put_sync(dev); 1150 error = pm_wakeup_pending() ? 1151 -EBUSY : device_prepare(dev, state); 1152 1153 mutex_lock(&dpm_list_mtx); 1154 if (error) { 1155 if (error == -EAGAIN) { 1156 put_device(dev); 1157 error = 0; 1158 continue; 1159 } 1160 printk(KERN_INFO "PM: Device %s not prepared " 1161 "for power transition: code %d\n", 1162 dev_name(dev), error); 1163 put_device(dev); 1164 break; 1165 } 1166 dev->power.is_prepared = true; 1167 if (!list_empty(&dev->power.entry)) 1168 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1169 put_device(dev); 1170 } 1171 mutex_unlock(&dpm_list_mtx); 1172 return error; 1173} 1174 1175/** 1176 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1177 * @state: PM transition of the system being carried out. 1178 * 1179 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1180 * callbacks for them. 1181 */ 1182int dpm_suspend_start(pm_message_t state) 1183{ 1184 int error; 1185 1186 error = dpm_prepare(state); 1187 if (!error) 1188 error = dpm_suspend(state); 1189 return error; 1190} 1191EXPORT_SYMBOL_GPL(dpm_suspend_start); 1192 1193void __suspend_report_result(const char *function, void *fn, int ret) 1194{ 1195 if (ret) 1196 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1197} 1198EXPORT_SYMBOL_GPL(__suspend_report_result); 1199 1200/** 1201 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1202 * @dev: Device to wait for. 1203 * @subordinate: Device that needs to wait for @dev. 1204 */ 1205int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1206{ 1207 dpm_wait(dev, subordinate->power.async_suspend); 1208 return async_error; 1209} 1210EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);