PageRenderTime 48ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/base/power/main.c

https://bitbucket.org/zossso/android-kernel-2.6.34-motus
C | 1136 lines | 767 code | 140 blank | 229 comment | 134 complexity | ed8af789fc37cfc7e798b7b74ce6c914 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will intialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/mutex.h>
  22. #include <linux/pm.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/resume-trace.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/sched.h>
  27. #include <linux/async.h>
  28. #include <linux/timer.h>
  29. #include "../base.h"
  30. #include "power.h"
  31. /*
  32. * The entries in the dpm_list list are in a depth first order, simply
  33. * because children are guaranteed to be discovered after parents, and
  34. * are inserted at the back of the list on discovery.
  35. *
  36. * Since device_pm_add() may be called with a device lock held,
  37. * we must never try to acquire a device lock while holding
  38. * dpm_list_mutex.
  39. */
  40. LIST_HEAD(dpm_list);
  41. static DEFINE_MUTEX(dpm_list_mtx);
  42. static pm_message_t pm_transition;
  43. static void dpm_drv_timeout(unsigned long data);
  44. static DEFINE_TIMER(dpm_drv_wd, dpm_drv_timeout, 0, 0);
  45. /*
  46. * Set once the preparation of devices for a PM transition has started, reset
  47. * before starting to resume devices. Protected by dpm_list_mtx.
  48. */
  49. static bool transition_started;
  50. /**
  51. * device_pm_init - Initialize the PM-related part of a device object.
  52. * @dev: Device object being initialized.
  53. */
  54. void device_pm_init(struct device *dev)
  55. {
  56. dev->power.status = DPM_ON;
  57. init_completion(&dev->power.completion);
  58. pm_runtime_init(dev);
  59. }
  60. /**
  61. * device_pm_lock - Lock the list of active devices used by the PM core.
  62. */
  63. void device_pm_lock(void)
  64. {
  65. mutex_lock(&dpm_list_mtx);
  66. }
  67. /**
  68. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  69. */
  70. void device_pm_unlock(void)
  71. {
  72. mutex_unlock(&dpm_list_mtx);
  73. }
  74. /**
  75. * device_pm_add - Add a device to the PM core's list of active devices.
  76. * @dev: Device to add to the list.
  77. */
  78. void device_pm_add(struct device *dev)
  79. {
  80. pr_debug("PM: Adding info for %s:%s\n",
  81. dev->bus ? dev->bus->name : "No Bus",
  82. kobject_name(&dev->kobj));
  83. mutex_lock(&dpm_list_mtx);
  84. if (dev->parent) {
  85. if (dev->parent->power.status >= DPM_SUSPENDING)
  86. dev_warn(dev, "parent %s should not be sleeping\n",
  87. dev_name(dev->parent));
  88. } else if (transition_started) {
  89. /*
  90. * We refuse to register parentless devices while a PM
  91. * transition is in progress in order to avoid leaving them
  92. * unhandled down the road
  93. */
  94. dev_WARN(dev, "Parentless device registered during a PM transaction\n");
  95. }
  96. list_add_tail(&dev->power.entry, &dpm_list);
  97. mutex_unlock(&dpm_list_mtx);
  98. }
  99. /**
  100. * device_pm_remove - Remove a device from the PM core's list of active devices.
  101. * @dev: Device to be removed from the list.
  102. */
  103. void device_pm_remove(struct device *dev)
  104. {
  105. pr_debug("PM: Removing info for %s:%s\n",
  106. dev->bus ? dev->bus->name : "No Bus",
  107. kobject_name(&dev->kobj));
  108. complete_all(&dev->power.completion);
  109. mutex_lock(&dpm_list_mtx);
  110. list_del_init(&dev->power.entry);
  111. mutex_unlock(&dpm_list_mtx);
  112. pm_runtime_remove(dev);
  113. }
  114. /**
  115. * device_pm_move_before - Move device in the PM core's list of active devices.
  116. * @deva: Device to move in dpm_list.
  117. * @devb: Device @deva should come before.
  118. */
  119. void device_pm_move_before(struct device *deva, struct device *devb)
  120. {
  121. pr_debug("PM: Moving %s:%s before %s:%s\n",
  122. deva->bus ? deva->bus->name : "No Bus",
  123. kobject_name(&deva->kobj),
  124. devb->bus ? devb->bus->name : "No Bus",
  125. kobject_name(&devb->kobj));
  126. /* Delete deva from dpm_list and reinsert before devb. */
  127. list_move_tail(&deva->power.entry, &devb->power.entry);
  128. }
  129. /**
  130. * device_pm_move_after - Move device in the PM core's list of active devices.
  131. * @deva: Device to move in dpm_list.
  132. * @devb: Device @deva should come after.
  133. */
  134. void device_pm_move_after(struct device *deva, struct device *devb)
  135. {
  136. pr_debug("PM: Moving %s:%s after %s:%s\n",
  137. deva->bus ? deva->bus->name : "No Bus",
  138. kobject_name(&deva->kobj),
  139. devb->bus ? devb->bus->name : "No Bus",
  140. kobject_name(&devb->kobj));
  141. /* Delete deva from dpm_list and reinsert after devb. */
  142. list_move(&deva->power.entry, &devb->power.entry);
  143. }
  144. /**
  145. * device_pm_move_last - Move device to end of the PM core's list of devices.
  146. * @dev: Device to move in dpm_list.
  147. */
  148. void device_pm_move_last(struct device *dev)
  149. {
  150. pr_debug("PM: Moving %s:%s to end of list\n",
  151. dev->bus ? dev->bus->name : "No Bus",
  152. kobject_name(&dev->kobj));
  153. list_move_tail(&dev->power.entry, &dpm_list);
  154. }
  155. static ktime_t initcall_debug_start(struct device *dev)
  156. {
  157. ktime_t calltime = ktime_set(0, 0);
  158. if (initcall_debug) {
  159. pr_info("calling %s+ @ %i\n",
  160. dev_name(dev), task_pid_nr(current));
  161. calltime = ktime_get();
  162. }
  163. return calltime;
  164. }
  165. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  166. int error)
  167. {
  168. ktime_t delta, rettime;
  169. if (initcall_debug) {
  170. rettime = ktime_get();
  171. delta = ktime_sub(rettime, calltime);
  172. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  173. error, (unsigned long long)ktime_to_ns(delta) >> 10);
  174. }
  175. }
  176. /**
  177. * dpm_wait - Wait for a PM operation to complete.
  178. * @dev: Device to wait for.
  179. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  180. */
  181. static void dpm_wait(struct device *dev, bool async)
  182. {
  183. if (!dev)
  184. return;
  185. if (async || (pm_async_enabled && dev->power.async_suspend))
  186. wait_for_completion(&dev->power.completion);
  187. }
  188. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  189. {
  190. dpm_wait(dev, *((bool *)async_ptr));
  191. return 0;
  192. }
  193. static void dpm_wait_for_children(struct device *dev, bool async)
  194. {
  195. device_for_each_child(dev, &async, dpm_wait_fn);
  196. }
  197. /**
  198. * pm_op - Execute the PM operation appropriate for given PM event.
  199. * @dev: Device to handle.
  200. * @ops: PM operations to choose from.
  201. * @state: PM transition of the system being carried out.
  202. */
  203. static int pm_op(struct device *dev,
  204. const struct dev_pm_ops *ops,
  205. pm_message_t state)
  206. {
  207. int error = 0;
  208. ktime_t calltime;
  209. calltime = initcall_debug_start(dev);
  210. switch (state.event) {
  211. #ifdef CONFIG_SUSPEND
  212. case PM_EVENT_SUSPEND:
  213. if (ops->suspend) {
  214. error = ops->suspend(dev);
  215. suspend_report_result(ops->suspend, error);
  216. }
  217. break;
  218. case PM_EVENT_RESUME:
  219. if (ops->resume) {
  220. error = ops->resume(dev);
  221. suspend_report_result(ops->resume, error);
  222. }
  223. break;
  224. #endif /* CONFIG_SUSPEND */
  225. #ifdef CONFIG_HIBERNATION
  226. case PM_EVENT_FREEZE:
  227. case PM_EVENT_QUIESCE:
  228. if (ops->freeze) {
  229. error = ops->freeze(dev);
  230. suspend_report_result(ops->freeze, error);
  231. }
  232. break;
  233. case PM_EVENT_HIBERNATE:
  234. if (ops->poweroff) {
  235. error = ops->poweroff(dev);
  236. suspend_report_result(ops->poweroff, error);
  237. }
  238. break;
  239. case PM_EVENT_THAW:
  240. case PM_EVENT_RECOVER:
  241. if (ops->thaw) {
  242. error = ops->thaw(dev);
  243. suspend_report_result(ops->thaw, error);
  244. }
  245. break;
  246. case PM_EVENT_RESTORE:
  247. if (ops->restore) {
  248. error = ops->restore(dev);
  249. suspend_report_result(ops->restore, error);
  250. }
  251. break;
  252. #endif /* CONFIG_HIBERNATION */
  253. default:
  254. error = -EINVAL;
  255. }
  256. initcall_debug_report(dev, calltime, error);
  257. return error;
  258. }
  259. /**
  260. * pm_noirq_op - Execute the PM operation appropriate for given PM event.
  261. * @dev: Device to handle.
  262. * @ops: PM operations to choose from.
  263. * @state: PM transition of the system being carried out.
  264. *
  265. * The driver of @dev will not receive interrupts while this function is being
  266. * executed.
  267. */
  268. static int pm_noirq_op(struct device *dev,
  269. const struct dev_pm_ops *ops,
  270. pm_message_t state)
  271. {
  272. int error = 0;
  273. ktime_t calltime, delta, rettime;
  274. if (initcall_debug) {
  275. pr_info("calling %s+ @ %i, parent: %s\n",
  276. dev_name(dev), task_pid_nr(current),
  277. dev->parent ? dev_name(dev->parent) : "none");
  278. calltime = ktime_get();
  279. }
  280. switch (state.event) {
  281. #ifdef CONFIG_SUSPEND
  282. case PM_EVENT_SUSPEND:
  283. if (ops->suspend_noirq) {
  284. error = ops->suspend_noirq(dev);
  285. suspend_report_result(ops->suspend_noirq, error);
  286. }
  287. break;
  288. case PM_EVENT_RESUME:
  289. if (ops->resume_noirq) {
  290. error = ops->resume_noirq(dev);
  291. suspend_report_result(ops->resume_noirq, error);
  292. }
  293. break;
  294. #endif /* CONFIG_SUSPEND */
  295. #ifdef CONFIG_HIBERNATION
  296. case PM_EVENT_FREEZE:
  297. case PM_EVENT_QUIESCE:
  298. if (ops->freeze_noirq) {
  299. error = ops->freeze_noirq(dev);
  300. suspend_report_result(ops->freeze_noirq, error);
  301. }
  302. break;
  303. case PM_EVENT_HIBERNATE:
  304. if (ops->poweroff_noirq) {
  305. error = ops->poweroff_noirq(dev);
  306. suspend_report_result(ops->poweroff_noirq, error);
  307. }
  308. break;
  309. case PM_EVENT_THAW:
  310. case PM_EVENT_RECOVER:
  311. if (ops->thaw_noirq) {
  312. error = ops->thaw_noirq(dev);
  313. suspend_report_result(ops->thaw_noirq, error);
  314. }
  315. break;
  316. case PM_EVENT_RESTORE:
  317. if (ops->restore_noirq) {
  318. error = ops->restore_noirq(dev);
  319. suspend_report_result(ops->restore_noirq, error);
  320. }
  321. break;
  322. #endif /* CONFIG_HIBERNATION */
  323. default:
  324. error = -EINVAL;
  325. }
  326. if (initcall_debug) {
  327. rettime = ktime_get();
  328. delta = ktime_sub(rettime, calltime);
  329. printk("initcall %s_i+ returned %d after %Ld usecs\n",
  330. dev_name(dev), error,
  331. (unsigned long long)ktime_to_ns(delta) >> 10);
  332. }
  333. return error;
  334. }
  335. static char *pm_verb(int event)
  336. {
  337. switch (event) {
  338. case PM_EVENT_SUSPEND:
  339. return "suspend";
  340. case PM_EVENT_RESUME:
  341. return "resume";
  342. case PM_EVENT_FREEZE:
  343. return "freeze";
  344. case PM_EVENT_QUIESCE:
  345. return "quiesce";
  346. case PM_EVENT_HIBERNATE:
  347. return "hibernate";
  348. case PM_EVENT_THAW:
  349. return "thaw";
  350. case PM_EVENT_RESTORE:
  351. return "restore";
  352. case PM_EVENT_RECOVER:
  353. return "recover";
  354. default:
  355. return "(unknown PM event)";
  356. }
  357. }
  358. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  359. {
  360. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  361. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  362. ", may wakeup" : "");
  363. }
  364. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  365. int error)
  366. {
  367. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  368. kobject_name(&dev->kobj), pm_verb(state.event), info, error);
  369. }
  370. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  371. {
  372. ktime_t calltime;
  373. s64 usecs64;
  374. int usecs;
  375. calltime = ktime_get();
  376. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  377. do_div(usecs64, NSEC_PER_USEC);
  378. usecs = usecs64;
  379. if (usecs == 0)
  380. usecs = 1;
  381. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  382. info ?: "", info ? " " : "", pm_verb(state.event),
  383. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  384. }
  385. /*------------------------- Resume routines -------------------------*/
  386. /**
  387. * device_resume_noirq - Execute an "early resume" callback for given device.
  388. * @dev: Device to handle.
  389. * @state: PM transition of the system being carried out.
  390. *
  391. * The driver of @dev will not receive interrupts while this function is being
  392. * executed.
  393. */
  394. static int device_resume_noirq(struct device *dev, pm_message_t state)
  395. {
  396. int error = 0;
  397. TRACE_DEVICE(dev);
  398. TRACE_RESUME(0);
  399. if (dev->bus && dev->bus->pm) {
  400. pm_dev_dbg(dev, state, "EARLY ");
  401. error = pm_noirq_op(dev, dev->bus->pm, state);
  402. if (error)
  403. goto End;
  404. }
  405. if (dev->type && dev->type->pm) {
  406. pm_dev_dbg(dev, state, "EARLY type ");
  407. error = pm_noirq_op(dev, dev->type->pm, state);
  408. if (error)
  409. goto End;
  410. }
  411. if (dev->class && dev->class->pm) {
  412. pm_dev_dbg(dev, state, "EARLY class ");
  413. error = pm_noirq_op(dev, dev->class->pm, state);
  414. }
  415. End:
  416. TRACE_RESUME(error);
  417. return error;
  418. }
  419. /**
  420. * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
  421. * @state: PM transition of the system being carried out.
  422. *
  423. * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
  424. * enable device drivers to receive interrupts.
  425. */
  426. void dpm_resume_noirq(pm_message_t state)
  427. {
  428. struct device *dev;
  429. ktime_t starttime = ktime_get();
  430. mutex_lock(&dpm_list_mtx);
  431. transition_started = false;
  432. list_for_each_entry(dev, &dpm_list, power.entry)
  433. if (dev->power.status > DPM_OFF) {
  434. int error;
  435. dev->power.status = DPM_OFF;
  436. error = device_resume_noirq(dev, state);
  437. if (error)
  438. pm_dev_err(dev, state, " early", error);
  439. }
  440. mutex_unlock(&dpm_list_mtx);
  441. dpm_show_time(starttime, state, "early");
  442. resume_device_irqs();
  443. }
  444. EXPORT_SYMBOL_GPL(dpm_resume_noirq);
  445. /**
  446. * legacy_resume - Execute a legacy (bus or class) resume callback for device.
  447. * @dev: Device to resume.
  448. * @cb: Resume callback to execute.
  449. */
  450. static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
  451. {
  452. int error;
  453. ktime_t calltime;
  454. calltime = initcall_debug_start(dev);
  455. error = cb(dev);
  456. suspend_report_result(cb, error);
  457. initcall_debug_report(dev, calltime, error);
  458. return error;
  459. }
  460. /**
  461. * device_resume - Execute "resume" callbacks for given device.
  462. * @dev: Device to handle.
  463. * @state: PM transition of the system being carried out.
  464. * @async: If true, the device is being resumed asynchronously.
  465. */
  466. static int device_resume(struct device *dev, pm_message_t state, bool async)
  467. {
  468. int error = 0;
  469. TRACE_DEVICE(dev);
  470. TRACE_RESUME(0);
  471. if (dev->parent && dev->parent->power.status >= DPM_OFF)
  472. dpm_wait(dev->parent, async);
  473. device_lock(dev);
  474. dev->power.status = DPM_RESUMING;
  475. if (dev->bus) {
  476. if (dev->bus->pm) {
  477. pm_dev_dbg(dev, state, "");
  478. error = pm_op(dev, dev->bus->pm, state);
  479. } else if (dev->bus->resume) {
  480. pm_dev_dbg(dev, state, "legacy ");
  481. error = legacy_resume(dev, dev->bus->resume);
  482. }
  483. if (error)
  484. goto End;
  485. }
  486. if (dev->type) {
  487. if (dev->type->pm) {
  488. pm_dev_dbg(dev, state, "type ");
  489. error = pm_op(dev, dev->type->pm, state);
  490. }
  491. if (error)
  492. goto End;
  493. }
  494. if (dev->class) {
  495. if (dev->class->pm) {
  496. pm_dev_dbg(dev, state, "class ");
  497. error = pm_op(dev, dev->class->pm, state);
  498. } else if (dev->class->resume) {
  499. pm_dev_dbg(dev, state, "legacy class ");
  500. error = legacy_resume(dev, dev->class->resume);
  501. }
  502. }
  503. End:
  504. device_unlock(dev);
  505. complete_all(&dev->power.completion);
  506. TRACE_RESUME(error);
  507. return error;
  508. }
  509. static void async_resume(void *data, async_cookie_t cookie)
  510. {
  511. struct device *dev = (struct device *)data;
  512. int error;
  513. error = device_resume(dev, pm_transition, true);
  514. if (error)
  515. pm_dev_err(dev, pm_transition, " async", error);
  516. put_device(dev);
  517. }
  518. static bool is_async(struct device *dev)
  519. {
  520. return dev->power.async_suspend && pm_async_enabled
  521. && !pm_trace_is_enabled();
  522. }
  523. /**
  524. * dpm_drv_timeout - Driver suspend / resume watchdog handler
  525. * @data: struct device which timed out
  526. *
  527. * Called when a driver has timed out suspending or resuming.
  528. * There's not much we can do here to recover so
  529. * BUG() out for a crash-dump
  530. *
  531. */
  532. static void dpm_drv_timeout(unsigned long data)
  533. {
  534. struct device *dev = (struct device *) data;
  535. printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
  536. (dev->driver ? dev->driver->name : "no driver"));
  537. BUG();
  538. }
  539. /**
  540. * dpm_drv_wdset - Sets up driver suspend/resume watchdog timer.
  541. * @dev: struct device which we're guarding.
  542. *
  543. */
  544. static void dpm_drv_wdset(struct device *dev)
  545. {
  546. dpm_drv_wd.data = (unsigned long) dev;
  547. mod_timer(&dpm_drv_wd, jiffies + (HZ * 3));
  548. }
  549. /**
  550. * dpm_drv_wdclr - clears driver suspend/resume watchdog timer.
  551. * @dev: struct device which we're no longer guarding.
  552. *
  553. */
  554. static void dpm_drv_wdclr(struct device *dev)
  555. {
  556. del_timer_sync(&dpm_drv_wd);
  557. }
  558. /**
  559. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  560. * @state: PM transition of the system being carried out.
  561. *
  562. * Execute the appropriate "resume" callback for all devices whose status
  563. * indicates that they are suspended.
  564. */
  565. static void dpm_resume(pm_message_t state)
  566. {
  567. struct list_head list;
  568. struct device *dev;
  569. ktime_t starttime = ktime_get();
  570. INIT_LIST_HEAD(&list);
  571. mutex_lock(&dpm_list_mtx);
  572. pm_transition = state;
  573. list_for_each_entry(dev, &dpm_list, power.entry) {
  574. if (dev->power.status < DPM_OFF)
  575. continue;
  576. INIT_COMPLETION(dev->power.completion);
  577. if (is_async(dev)) {
  578. get_device(dev);
  579. async_schedule(async_resume, dev);
  580. }
  581. }
  582. while (!list_empty(&dpm_list)) {
  583. dev = to_device(dpm_list.next);
  584. get_device(dev);
  585. if (dev->power.status >= DPM_OFF && !is_async(dev)) {
  586. int error;
  587. mutex_unlock(&dpm_list_mtx);
  588. error = device_resume(dev, state, false);
  589. mutex_lock(&dpm_list_mtx);
  590. if (error)
  591. pm_dev_err(dev, state, "", error);
  592. } else if (dev->power.status == DPM_SUSPENDING) {
  593. /* Allow new children of the device to be registered */
  594. dev->power.status = DPM_RESUMING;
  595. }
  596. if (!list_empty(&dev->power.entry))
  597. list_move_tail(&dev->power.entry, &list);
  598. put_device(dev);
  599. }
  600. list_splice(&list, &dpm_list);
  601. mutex_unlock(&dpm_list_mtx);
  602. async_synchronize_full();
  603. dpm_show_time(starttime, state, NULL);
  604. }
  605. /**
  606. * device_complete - Complete a PM transition for given device.
  607. * @dev: Device to handle.
  608. * @state: PM transition of the system being carried out.
  609. */
  610. static void device_complete(struct device *dev, pm_message_t state)
  611. {
  612. device_lock(dev);
  613. if (dev->class && dev->class->pm && dev->class->pm->complete) {
  614. pm_dev_dbg(dev, state, "completing class ");
  615. dev->class->pm->complete(dev);
  616. }
  617. if (dev->type && dev->type->pm && dev->type->pm->complete) {
  618. pm_dev_dbg(dev, state, "completing type ");
  619. dev->type->pm->complete(dev);
  620. }
  621. if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
  622. pm_dev_dbg(dev, state, "completing ");
  623. dev->bus->pm->complete(dev);
  624. }
  625. device_unlock(dev);
  626. }
  627. /**
  628. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  629. * @state: PM transition of the system being carried out.
  630. *
  631. * Execute the ->complete() callbacks for all devices whose PM status is not
  632. * DPM_ON (this allows new devices to be registered).
  633. */
  634. static void dpm_complete(pm_message_t state)
  635. {
  636. struct list_head list;
  637. INIT_LIST_HEAD(&list);
  638. mutex_lock(&dpm_list_mtx);
  639. transition_started = false;
  640. while (!list_empty(&dpm_list)) {
  641. struct device *dev = to_device(dpm_list.prev);
  642. get_device(dev);
  643. if (dev->power.status > DPM_ON) {
  644. dev->power.status = DPM_ON;
  645. mutex_unlock(&dpm_list_mtx);
  646. device_complete(dev, state);
  647. pm_runtime_put_sync(dev);
  648. mutex_lock(&dpm_list_mtx);
  649. }
  650. if (!list_empty(&dev->power.entry))
  651. list_move(&dev->power.entry, &list);
  652. put_device(dev);
  653. }
  654. list_splice(&list, &dpm_list);
  655. mutex_unlock(&dpm_list_mtx);
  656. }
  657. /**
  658. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  659. * @state: PM transition of the system being carried out.
  660. *
  661. * Execute "resume" callbacks for all devices and complete the PM transition of
  662. * the system.
  663. */
  664. void dpm_resume_end(pm_message_t state)
  665. {
  666. might_sleep();
  667. dpm_resume(state);
  668. dpm_complete(state);
  669. }
  670. EXPORT_SYMBOL_GPL(dpm_resume_end);
  671. /*------------------------- Suspend routines -------------------------*/
  672. /**
  673. * resume_event - Return a "resume" message for given "suspend" sleep state.
  674. * @sleep_state: PM message representing a sleep state.
  675. *
  676. * Return a PM message representing the resume event corresponding to given
  677. * sleep state.
  678. */
  679. static pm_message_t resume_event(pm_message_t sleep_state)
  680. {
  681. switch (sleep_state.event) {
  682. case PM_EVENT_SUSPEND:
  683. return PMSG_RESUME;
  684. case PM_EVENT_FREEZE:
  685. case PM_EVENT_QUIESCE:
  686. return PMSG_RECOVER;
  687. case PM_EVENT_HIBERNATE:
  688. return PMSG_RESTORE;
  689. }
  690. return PMSG_ON;
  691. }
  692. /**
  693. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  694. * @dev: Device to handle.
  695. * @state: PM transition of the system being carried out.
  696. *
  697. * The driver of @dev will not receive interrupts while this function is being
  698. * executed.
  699. */
  700. static int device_suspend_noirq(struct device *dev, pm_message_t state)
  701. {
  702. int error = 0;
  703. if (dev->class && dev->class->pm) {
  704. pm_dev_dbg(dev, state, "LATE class ");
  705. error = pm_noirq_op(dev, dev->class->pm, state);
  706. if (error)
  707. goto End;
  708. }
  709. if (dev->type && dev->type->pm) {
  710. pm_dev_dbg(dev, state, "LATE type ");
  711. error = pm_noirq_op(dev, dev->type->pm, state);
  712. if (error)
  713. goto End;
  714. }
  715. if (dev->bus && dev->bus->pm) {
  716. pm_dev_dbg(dev, state, "LATE ");
  717. error = pm_noirq_op(dev, dev->bus->pm, state);
  718. }
  719. End:
  720. return error;
  721. }
  722. /**
  723. * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
  724. * @state: PM transition of the system being carried out.
  725. *
  726. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  727. * handlers for all non-sysdev devices.
  728. */
  729. int dpm_suspend_noirq(pm_message_t state)
  730. {
  731. struct device *dev;
  732. ktime_t starttime = ktime_get();
  733. int error = 0;
  734. suspend_device_irqs();
  735. mutex_lock(&dpm_list_mtx);
  736. list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
  737. error = device_suspend_noirq(dev, state);
  738. if (error) {
  739. pm_dev_err(dev, state, " late", error);
  740. break;
  741. }
  742. dev->power.status = DPM_OFF_IRQ;
  743. }
  744. mutex_unlock(&dpm_list_mtx);
  745. if (error)
  746. dpm_resume_noirq(resume_event(state));
  747. else
  748. dpm_show_time(starttime, state, "late");
  749. return error;
  750. }
  751. EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
  752. /**
  753. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  754. * @dev: Device to suspend.
  755. * @state: PM transition of the system being carried out.
  756. * @cb: Suspend callback to execute.
  757. */
  758. static int legacy_suspend(struct device *dev, pm_message_t state,
  759. int (*cb)(struct device *dev, pm_message_t state))
  760. {
  761. int error;
  762. ktime_t calltime;
  763. calltime = initcall_debug_start(dev);
  764. error = cb(dev, state);
  765. suspend_report_result(cb, error);
  766. initcall_debug_report(dev, calltime, error);
  767. return error;
  768. }
  769. static int async_error;
  770. /**
  771. * device_suspend - Execute "suspend" callbacks for given device.
  772. * @dev: Device to handle.
  773. * @state: PM transition of the system being carried out.
  774. * @async: If true, the device is being suspended asynchronously.
  775. */
  776. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  777. {
  778. int error = 0;
  779. dpm_wait_for_children(dev, async);
  780. device_lock(dev);
  781. if (async_error)
  782. goto End;
  783. if (dev->class) {
  784. if (dev->class->pm) {
  785. pm_dev_dbg(dev, state, "class ");
  786. error = pm_op(dev, dev->class->pm, state);
  787. } else if (dev->class->suspend) {
  788. pm_dev_dbg(dev, state, "legacy class ");
  789. error = legacy_suspend(dev, state, dev->class->suspend);
  790. }
  791. if (error)
  792. goto End;
  793. }
  794. if (dev->type) {
  795. if (dev->type->pm) {
  796. pm_dev_dbg(dev, state, "type ");
  797. error = pm_op(dev, dev->type->pm, state);
  798. }
  799. if (error)
  800. goto End;
  801. }
  802. if (dev->bus) {
  803. if (dev->bus->pm) {
  804. pm_dev_dbg(dev, state, "");
  805. error = pm_op(dev, dev->bus->pm, state);
  806. } else if (dev->bus->suspend) {
  807. pm_dev_dbg(dev, state, "legacy ");
  808. error = legacy_suspend(dev, state, dev->bus->suspend);
  809. }
  810. }
  811. if (!error)
  812. dev->power.status = DPM_OFF;
  813. End:
  814. device_unlock(dev);
  815. complete_all(&dev->power.completion);
  816. return error;
  817. }
  818. static void async_suspend(void *data, async_cookie_t cookie)
  819. {
  820. struct device *dev = (struct device *)data;
  821. int error;
  822. error = __device_suspend(dev, pm_transition, true);
  823. if (error) {
  824. pm_dev_err(dev, pm_transition, " async", error);
  825. async_error = error;
  826. }
  827. put_device(dev);
  828. }
  829. static int device_suspend(struct device *dev)
  830. {
  831. INIT_COMPLETION(dev->power.completion);
  832. if (pm_async_enabled && dev->power.async_suspend) {
  833. get_device(dev);
  834. async_schedule(async_suspend, dev);
  835. return 0;
  836. }
  837. return __device_suspend(dev, pm_transition, false);
  838. }
  839. /**
  840. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  841. * @state: PM transition of the system being carried out.
  842. */
  843. static int dpm_suspend(pm_message_t state)
  844. {
  845. struct list_head list;
  846. ktime_t starttime = ktime_get();
  847. int error = 0;
  848. INIT_LIST_HEAD(&list);
  849. mutex_lock(&dpm_list_mtx);
  850. pm_transition = state;
  851. async_error = 0;
  852. while (!list_empty(&dpm_list)) {
  853. struct device *dev = to_device(dpm_list.prev);
  854. get_device(dev);
  855. mutex_unlock(&dpm_list_mtx);
  856. dpm_drv_wdset(dev);
  857. error = device_suspend(dev);
  858. dpm_drv_wdclr(dev);
  859. mutex_lock(&dpm_list_mtx);
  860. if (error) {
  861. pm_dev_err(dev, state, "", error);
  862. put_device(dev);
  863. break;
  864. }
  865. if (!list_empty(&dev->power.entry))
  866. list_move(&dev->power.entry, &list);
  867. put_device(dev);
  868. if (async_error)
  869. break;
  870. }
  871. list_splice(&list, dpm_list.prev);
  872. mutex_unlock(&dpm_list_mtx);
  873. async_synchronize_full();
  874. if (!error)
  875. error = async_error;
  876. if (!error)
  877. dpm_show_time(starttime, state, NULL);
  878. return error;
  879. }
  880. /**
  881. * device_prepare - Prepare a device for system power transition.
  882. * @dev: Device to handle.
  883. * @state: PM transition of the system being carried out.
  884. *
  885. * Execute the ->prepare() callback(s) for given device. No new children of the
  886. * device may be registered after this function has returned.
  887. */
  888. static int device_prepare(struct device *dev, pm_message_t state)
  889. {
  890. int error = 0;
  891. device_lock(dev);
  892. if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
  893. pm_dev_dbg(dev, state, "preparing ");
  894. error = dev->bus->pm->prepare(dev);
  895. suspend_report_result(dev->bus->pm->prepare, error);
  896. if (error)
  897. goto End;
  898. }
  899. if (dev->type && dev->type->pm && dev->type->pm->prepare) {
  900. pm_dev_dbg(dev, state, "preparing type ");
  901. error = dev->type->pm->prepare(dev);
  902. suspend_report_result(dev->type->pm->prepare, error);
  903. if (error)
  904. goto End;
  905. }
  906. if (dev->class && dev->class->pm && dev->class->pm->prepare) {
  907. pm_dev_dbg(dev, state, "preparing class ");
  908. error = dev->class->pm->prepare(dev);
  909. suspend_report_result(dev->class->pm->prepare, error);
  910. }
  911. End:
  912. device_unlock(dev);
  913. return error;
  914. }
  915. /**
  916. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  917. * @state: PM transition of the system being carried out.
  918. *
  919. * Execute the ->prepare() callback(s) for all devices.
  920. */
  921. static int dpm_prepare(pm_message_t state)
  922. {
  923. struct list_head list;
  924. int error = 0;
  925. INIT_LIST_HEAD(&list);
  926. mutex_lock(&dpm_list_mtx);
  927. transition_started = true;
  928. while (!list_empty(&dpm_list)) {
  929. struct device *dev = to_device(dpm_list.next);
  930. get_device(dev);
  931. dev->power.status = DPM_PREPARING;
  932. mutex_unlock(&dpm_list_mtx);
  933. pm_runtime_get_noresume(dev);
  934. if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
  935. /* Wake-up requested during system sleep transition. */
  936. pm_runtime_put_sync(dev);
  937. error = -EBUSY;
  938. } else {
  939. error = device_prepare(dev, state);
  940. }
  941. mutex_lock(&dpm_list_mtx);
  942. if (error) {
  943. dev->power.status = DPM_ON;
  944. if (error == -EAGAIN) {
  945. put_device(dev);
  946. error = 0;
  947. continue;
  948. }
  949. printk(KERN_ERR "PM: Failed to prepare device %s "
  950. "for power transition: error %d\n",
  951. kobject_name(&dev->kobj), error);
  952. put_device(dev);
  953. break;
  954. }
  955. dev->power.status = DPM_SUSPENDING;
  956. if (!list_empty(&dev->power.entry))
  957. list_move_tail(&dev->power.entry, &list);
  958. put_device(dev);
  959. }
  960. list_splice(&list, &dpm_list);
  961. mutex_unlock(&dpm_list_mtx);
  962. return error;
  963. }
  964. /**
  965. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  966. * @state: PM transition of the system being carried out.
  967. *
  968. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  969. * callbacks for them.
  970. */
  971. int dpm_suspend_start(pm_message_t state)
  972. {
  973. int error;
  974. might_sleep();
  975. error = dpm_prepare(state);
  976. if (!error)
  977. error = dpm_suspend(state);
  978. return error;
  979. }
  980. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  981. void __suspend_report_result(const char *function, void *fn, int ret)
  982. {
  983. if (ret)
  984. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  985. }
  986. EXPORT_SYMBOL_GPL(__suspend_report_result);
  987. /**
  988. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  989. * @dev: Device to wait for.
  990. * @subordinate: Device that needs to wait for @dev.
  991. */
  992. void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  993. {
  994. dpm_wait(dev, subordinate->power.async_suspend);
  995. }
  996. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);