PageRenderTime 59ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/base/power/main.c

https://bitbucket.org/mifl/android_kernel_qrd_msm
C | 1202 lines | 834 code | 155 blank | 213 comment | 152 complexity | 29e6b3a2b6c465da61f7d862234e5c16 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
  7. *
  8. * This file is released under the GPLv2
  9. *
  10. *
  11. * The driver model core calls device_pm_add() when a device is registered.
  12. * This will initialize the embedded device_pm_info object in the device
  13. * and add it to the list of power-controlled devices. sysfs entries for
  14. * controlling device power management will also be added.
  15. *
  16. * A separate list is used for keeping track of power info, because the power
  17. * domain dependencies may differ from the ancestral dependencies that the
  18. * subsystem list maintains.
  19. */
  20. #include <linux/device.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pm.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/resume-trace.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/sched.h>
  28. #include <linux/async.h>
  29. #include <linux/suspend.h>
  30. #include <linux/timer.h>
  31. #include "../base.h"
  32. #include "power.h"
  33. #ifdef CONFIG_MSM_SM_EVENT
  34. #include <linux/sm_event_log.h>
  35. #include <linux/sm_event.h>
  36. #endif
  37. /*
  38. * The entries in the dpm_list list are in a depth first order, simply
  39. * because children are guaranteed to be discovered after parents, and
  40. * are inserted at the back of the list on discovery.
  41. *
  42. * Since device_pm_add() may be called with a device lock held,
  43. * we must never try to acquire a device lock while holding
  44. * dpm_list_mutex.
  45. */
  46. LIST_HEAD(dpm_list);
  47. LIST_HEAD(dpm_prepared_list);
  48. LIST_HEAD(dpm_suspended_list);
  49. LIST_HEAD(dpm_noirq_list);
  50. static DEFINE_MUTEX(dpm_list_mtx);
  51. static pm_message_t pm_transition;
  52. static void dpm_drv_timeout(unsigned long data);
  53. struct dpm_drv_wd_data {
  54. struct device *dev;
  55. struct task_struct *tsk;
  56. };
  57. static int async_error;
  58. /**
  59. * device_pm_init - Initialize the PM-related part of a device object.
  60. * @dev: Device object being initialized.
  61. */
  62. void device_pm_init(struct device *dev)
  63. {
  64. dev->power.is_prepared = false;
  65. dev->power.is_suspended = false;
  66. init_completion(&dev->power.completion);
  67. complete_all(&dev->power.completion);
  68. dev->power.wakeup = NULL;
  69. spin_lock_init(&dev->power.lock);
  70. pm_runtime_init(dev);
  71. INIT_LIST_HEAD(&dev->power.entry);
  72. }
  73. /**
  74. * device_pm_lock - Lock the list of active devices used by the PM core.
  75. */
  76. void device_pm_lock(void)
  77. {
  78. mutex_lock(&dpm_list_mtx);
  79. }
  80. /**
  81. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  82. */
  83. void device_pm_unlock(void)
  84. {
  85. mutex_unlock(&dpm_list_mtx);
  86. }
  87. /**
  88. * device_pm_add - Add a device to the PM core's list of active devices.
  89. * @dev: Device to add to the list.
  90. */
  91. void device_pm_add(struct device *dev)
  92. {
  93. pr_debug("PM: Adding info for %s:%s\n",
  94. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  95. mutex_lock(&dpm_list_mtx);
  96. if (dev->parent && dev->parent->power.is_prepared)
  97. dev_warn(dev, "parent %s should not be sleeping\n",
  98. dev_name(dev->parent));
  99. list_add_tail(&dev->power.entry, &dpm_list);
  100. mutex_unlock(&dpm_list_mtx);
  101. }
  102. /**
  103. * device_pm_remove - Remove a device from the PM core's list of active devices.
  104. * @dev: Device to be removed from the list.
  105. */
  106. void device_pm_remove(struct device *dev)
  107. {
  108. pr_debug("PM: Removing info for %s:%s\n",
  109. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  110. complete_all(&dev->power.completion);
  111. mutex_lock(&dpm_list_mtx);
  112. list_del_init(&dev->power.entry);
  113. mutex_unlock(&dpm_list_mtx);
  114. device_wakeup_disable(dev);
  115. pm_runtime_remove(dev);
  116. }
  117. /**
  118. * device_pm_move_before - Move device in the PM core's list of active devices.
  119. * @deva: Device to move in dpm_list.
  120. * @devb: Device @deva should come before.
  121. */
  122. void device_pm_move_before(struct device *deva, struct device *devb)
  123. {
  124. pr_debug("PM: Moving %s:%s before %s:%s\n",
  125. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  126. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  127. /* Delete deva from dpm_list and reinsert before devb. */
  128. list_move_tail(&deva->power.entry, &devb->power.entry);
  129. }
  130. /**
  131. * device_pm_move_after - Move device in the PM core's list of active devices.
  132. * @deva: Device to move in dpm_list.
  133. * @devb: Device @deva should come after.
  134. */
  135. void device_pm_move_after(struct device *deva, struct device *devb)
  136. {
  137. pr_debug("PM: Moving %s:%s after %s:%s\n",
  138. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  139. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  140. /* Delete deva from dpm_list and reinsert after devb. */
  141. list_move(&deva->power.entry, &devb->power.entry);
  142. }
  143. /**
  144. * device_pm_move_last - Move device to end of the PM core's list of devices.
  145. * @dev: Device to move in dpm_list.
  146. */
  147. void device_pm_move_last(struct device *dev)
  148. {
  149. pr_debug("PM: Moving %s:%s to end of list\n",
  150. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  151. list_move_tail(&dev->power.entry, &dpm_list);
  152. }
  153. static ktime_t initcall_debug_start(struct device *dev)
  154. {
  155. ktime_t calltime = ktime_set(0, 0);
  156. if (initcall_debug) {
  157. pr_info("calling %s+ @ %i\n",
  158. dev_name(dev), task_pid_nr(current));
  159. calltime = ktime_get();
  160. }
  161. return calltime;
  162. }
  163. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  164. int error)
  165. {
  166. ktime_t delta, rettime;
  167. if (initcall_debug) {
  168. rettime = ktime_get();
  169. delta = ktime_sub(rettime, calltime);
  170. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  171. error, (unsigned long long)ktime_to_ns(delta) >> 10);
  172. }
  173. }
  174. /**
  175. * dpm_wait - Wait for a PM operation to complete.
  176. * @dev: Device to wait for.
  177. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  178. */
  179. static void dpm_wait(struct device *dev, bool async)
  180. {
  181. if (!dev)
  182. return;
  183. if (async || (pm_async_enabled && dev->power.async_suspend))
  184. wait_for_completion(&dev->power.completion);
  185. }
  186. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  187. {
  188. dpm_wait(dev, *((bool *)async_ptr));
  189. return 0;
  190. }
  191. static void dpm_wait_for_children(struct device *dev, bool async)
  192. {
  193. device_for_each_child(dev, &async, dpm_wait_fn);
  194. }
  195. /**
  196. * pm_op - Execute the PM operation appropriate for given PM event.
  197. * @dev: Device to handle.
  198. * @ops: PM operations to choose from.
  199. * @state: PM transition of the system being carried out.
  200. */
  201. static int pm_op(struct device *dev,
  202. const struct dev_pm_ops *ops,
  203. pm_message_t state)
  204. {
  205. int error = 0;
  206. ktime_t calltime;
  207. #ifdef CONFIG_MSM_SM_EVENT
  208. uint32_t suspend_type = 0, delta_time = 0;
  209. ktime_t rettime;
  210. #endif
  211. calltime = initcall_debug_start(dev);
  212. #ifdef CONFIG_MSM_SM_EVENT
  213. calltime = ktime_get();
  214. #endif
  215. switch (state.event) {
  216. #ifdef CONFIG_SUSPEND
  217. case PM_EVENT_SUSPEND:
  218. if (ops->suspend) {
  219. error = ops->suspend(dev);
  220. suspend_report_result(ops->suspend, error);
  221. #ifdef CONFIG_MSM_SM_EVENT
  222. suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_SUSPEND;
  223. #endif
  224. }
  225. break;
  226. case PM_EVENT_RESUME:
  227. if (ops->resume) {
  228. error = ops->resume(dev);
  229. suspend_report_result(ops->resume, error);
  230. #ifdef CONFIG_MSM_SM_EVENT
  231. suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_RESUME;
  232. #endif
  233. }
  234. break;
  235. #endif /* CONFIG_SUSPEND */
  236. #ifdef CONFIG_HIBERNATE_CALLBACKS
  237. case PM_EVENT_FREEZE:
  238. case PM_EVENT_QUIESCE:
  239. if (ops->freeze) {
  240. error = ops->freeze(dev);
  241. suspend_report_result(ops->freeze, error);
  242. }
  243. break;
  244. case PM_EVENT_HIBERNATE:
  245. if (ops->poweroff) {
  246. error = ops->poweroff(dev);
  247. suspend_report_result(ops->poweroff, error);
  248. }
  249. break;
  250. case PM_EVENT_THAW:
  251. case PM_EVENT_RECOVER:
  252. if (ops->thaw) {
  253. error = ops->thaw(dev);
  254. suspend_report_result(ops->thaw, error);
  255. }
  256. break;
  257. case PM_EVENT_RESTORE:
  258. if (ops->restore) {
  259. error = ops->restore(dev);
  260. suspend_report_result(ops->restore, error);
  261. }
  262. break;
  263. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  264. default:
  265. error = -EINVAL;
  266. }
  267. initcall_debug_report(dev, calltime, error);
  268. #ifdef CONFIG_MSM_SM_EVENT
  269. rettime = ktime_get();
  270. delta_time = (unsigned long)(ktime_to_ns(ktime_sub(rettime, calltime)) >> 10);
  271. if(delta_time > 10 && suspend_type != 0)
  272. sm_add_event(suspend_type, (uint32_t)(delta_time), 0, (void *)dev_name(dev), strlen(dev_name(dev)) + 1);
  273. #endif
  274. return error;
  275. }
  276. /**
  277. * pm_noirq_op - Execute the PM operation appropriate for given PM event.
  278. * @dev: Device to handle.
  279. * @ops: PM operations to choose from.
  280. * @state: PM transition of the system being carried out.
  281. *
  282. * The driver of @dev will not receive interrupts while this function is being
  283. * executed.
  284. */
  285. static int pm_noirq_op(struct device *dev,
  286. const struct dev_pm_ops *ops,
  287. pm_message_t state)
  288. {
  289. int error = 0;
  290. #ifdef CONFIG_MSM_SM_EVENT
  291. uint32_t suspend_type = 0, delta_time = 0;
  292. #endif
  293. ktime_t calltime = ktime_set(0, 0), delta, rettime;
  294. if (initcall_debug) {
  295. pr_info("calling %s+ @ %i, parent: %s\n",
  296. dev_name(dev), task_pid_nr(current),
  297. dev->parent ? dev_name(dev->parent) : "none");
  298. calltime = ktime_get();
  299. }
  300. #ifdef CONFIG_MSM_SM_EVENT
  301. calltime = ktime_get();
  302. #endif
  303. switch (state.event) {
  304. #ifdef CONFIG_SUSPEND
  305. case PM_EVENT_SUSPEND:
  306. if (ops->suspend_noirq) {
  307. error = ops->suspend_noirq(dev);
  308. suspend_report_result(ops->suspend_noirq, error);
  309. #ifdef CONFIG_MSM_SM_EVENT
  310. suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_SUSPEND;
  311. #endif
  312. }
  313. break;
  314. case PM_EVENT_RESUME:
  315. if (ops->resume_noirq) {
  316. error = ops->resume_noirq(dev);
  317. suspend_report_result(ops->resume_noirq, error);
  318. #ifdef CONFIG_MSM_SM_EVENT
  319. suspend_type = SM_DEVICE_EVENT | SM_DEVICE_EVENT_RESUME;
  320. #endif
  321. }
  322. break;
  323. #endif /* CONFIG_SUSPEND */
  324. #ifdef CONFIG_HIBERNATE_CALLBACKS
  325. case PM_EVENT_FREEZE:
  326. case PM_EVENT_QUIESCE:
  327. if (ops->freeze_noirq) {
  328. error = ops->freeze_noirq(dev);
  329. suspend_report_result(ops->freeze_noirq, error);
  330. }
  331. break;
  332. case PM_EVENT_HIBERNATE:
  333. if (ops->poweroff_noirq) {
  334. error = ops->poweroff_noirq(dev);
  335. suspend_report_result(ops->poweroff_noirq, error);
  336. }
  337. break;
  338. case PM_EVENT_THAW:
  339. case PM_EVENT_RECOVER:
  340. if (ops->thaw_noirq) {
  341. error = ops->thaw_noirq(dev);
  342. suspend_report_result(ops->thaw_noirq, error);
  343. }
  344. break;
  345. case PM_EVENT_RESTORE:
  346. if (ops->restore_noirq) {
  347. error = ops->restore_noirq(dev);
  348. suspend_report_result(ops->restore_noirq, error);
  349. }
  350. break;
  351. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  352. default:
  353. error = -EINVAL;
  354. }
  355. if (initcall_debug) {
  356. rettime = ktime_get();
  357. delta = ktime_sub(rettime, calltime);
  358. printk("initcall %s_i+ returned %d after %Ld usecs\n",
  359. dev_name(dev), error,
  360. (unsigned long long)ktime_to_ns(delta) >> 10);
  361. }
  362. #ifdef CONFIG_MSM_SM_EVENT
  363. rettime = ktime_get();
  364. delta_time = (unsigned long)(ktime_to_ns(ktime_sub(rettime, calltime)) >> 10);
  365. if(delta_time > 10 && suspend_type != 0)
  366. sm_add_event(suspend_type, (uint32_t)(delta_time), 0, (void *)dev_name(dev), strlen(dev_name(dev))+1);
  367. #endif
  368. return error;
  369. }
  370. static char *pm_verb(int event)
  371. {
  372. switch (event) {
  373. case PM_EVENT_SUSPEND:
  374. return "suspend";
  375. case PM_EVENT_RESUME:
  376. return "resume";
  377. case PM_EVENT_FREEZE:
  378. return "freeze";
  379. case PM_EVENT_QUIESCE:
  380. return "quiesce";
  381. case PM_EVENT_HIBERNATE:
  382. return "hibernate";
  383. case PM_EVENT_THAW:
  384. return "thaw";
  385. case PM_EVENT_RESTORE:
  386. return "restore";
  387. case PM_EVENT_RECOVER:
  388. return "recover";
  389. default:
  390. return "(unknown PM event)";
  391. }
  392. }
  393. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  394. {
  395. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  396. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  397. ", may wakeup" : "");
  398. }
  399. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  400. int error)
  401. {
  402. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  403. dev_name(dev), pm_verb(state.event), info, error);
  404. }
  405. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  406. {
  407. ktime_t calltime;
  408. u64 usecs64;
  409. int usecs;
  410. calltime = ktime_get();
  411. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  412. do_div(usecs64, NSEC_PER_USEC);
  413. usecs = usecs64;
  414. if (usecs == 0)
  415. usecs = 1;
  416. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  417. info ?: "", info ? " " : "", pm_verb(state.event),
  418. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  419. }
  420. /*------------------------- Resume routines -------------------------*/
  421. /**
  422. * device_resume_noirq - Execute an "early resume" callback for given device.
  423. * @dev: Device to handle.
  424. * @state: PM transition of the system being carried out.
  425. *
  426. * The driver of @dev will not receive interrupts while this function is being
  427. * executed.
  428. */
  429. static int device_resume_noirq(struct device *dev, pm_message_t state)
  430. {
  431. int error = 0;
  432. TRACE_DEVICE(dev);
  433. TRACE_RESUME(0);
  434. if (dev->pwr_domain) {
  435. pm_dev_dbg(dev, state, "EARLY power domain ");
  436. error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
  437. } else if (dev->type && dev->type->pm) {
  438. pm_dev_dbg(dev, state, "EARLY type ");
  439. error = pm_noirq_op(dev, dev->type->pm, state);
  440. } else if (dev->class && dev->class->pm) {
  441. pm_dev_dbg(dev, state, "EARLY class ");
  442. error = pm_noirq_op(dev, dev->class->pm, state);
  443. } else if (dev->bus && dev->bus->pm) {
  444. pm_dev_dbg(dev, state, "EARLY ");
  445. error = pm_noirq_op(dev, dev->bus->pm, state);
  446. }
  447. TRACE_RESUME(error);
  448. return error;
  449. }
  450. /**
  451. * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
  452. * @state: PM transition of the system being carried out.
  453. *
  454. * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
  455. * enable device drivers to receive interrupts.
  456. */
  457. void dpm_resume_noirq(pm_message_t state)
  458. {
  459. ktime_t starttime = ktime_get();
  460. mutex_lock(&dpm_list_mtx);
  461. while (!list_empty(&dpm_noirq_list)) {
  462. struct device *dev = to_device(dpm_noirq_list.next);
  463. int error;
  464. get_device(dev);
  465. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  466. mutex_unlock(&dpm_list_mtx);
  467. error = device_resume_noirq(dev, state);
  468. if (error)
  469. pm_dev_err(dev, state, " early", error);
  470. mutex_lock(&dpm_list_mtx);
  471. put_device(dev);
  472. }
  473. mutex_unlock(&dpm_list_mtx);
  474. dpm_show_time(starttime, state, "early");
  475. resume_device_irqs();
  476. }
  477. EXPORT_SYMBOL_GPL(dpm_resume_noirq);
  478. /**
  479. * legacy_resume - Execute a legacy (bus or class) resume callback for device.
  480. * @dev: Device to resume.
  481. * @cb: Resume callback to execute.
  482. */
  483. static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
  484. {
  485. int error;
  486. ktime_t calltime;
  487. calltime = initcall_debug_start(dev);
  488. error = cb(dev);
  489. suspend_report_result(cb, error);
  490. initcall_debug_report(dev, calltime, error);
  491. return error;
  492. }
  493. /**
  494. * device_resume - Execute "resume" callbacks for given device.
  495. * @dev: Device to handle.
  496. * @state: PM transition of the system being carried out.
  497. * @async: If true, the device is being resumed asynchronously.
  498. */
  499. static int device_resume(struct device *dev, pm_message_t state, bool async)
  500. {
  501. int error = 0;
  502. bool put = false;
  503. TRACE_DEVICE(dev);
  504. TRACE_RESUME(0);
  505. dpm_wait(dev->parent, async);
  506. device_lock(dev);
  507. /*
  508. * This is a fib. But we'll allow new children to be added below
  509. * a resumed device, even if the device hasn't been completed yet.
  510. */
  511. dev->power.is_prepared = false;
  512. if (!dev->power.is_suspended)
  513. goto Unlock;
  514. pm_runtime_enable(dev);
  515. put = true;
  516. if (dev->pwr_domain) {
  517. pm_dev_dbg(dev, state, "power domain ");
  518. error = pm_op(dev, &dev->pwr_domain->ops, state);
  519. goto End;
  520. }
  521. if (dev->type && dev->type->pm) {
  522. pm_dev_dbg(dev, state, "type ");
  523. error = pm_op(dev, dev->type->pm, state);
  524. goto End;
  525. }
  526. if (dev->class) {
  527. if (dev->class->pm) {
  528. pm_dev_dbg(dev, state, "class ");
  529. error = pm_op(dev, dev->class->pm, state);
  530. goto End;
  531. } else if (dev->class->resume) {
  532. pm_dev_dbg(dev, state, "legacy class ");
  533. error = legacy_resume(dev, dev->class->resume);
  534. goto End;
  535. }
  536. }
  537. if (dev->bus) {
  538. if (dev->bus->pm) {
  539. pm_dev_dbg(dev, state, "");
  540. error = pm_op(dev, dev->bus->pm, state);
  541. } else if (dev->bus->resume) {
  542. pm_dev_dbg(dev, state, "legacy ");
  543. error = legacy_resume(dev, dev->bus->resume);
  544. }
  545. }
  546. End:
  547. dev->power.is_suspended = false;
  548. Unlock:
  549. device_unlock(dev);
  550. complete_all(&dev->power.completion);
  551. TRACE_RESUME(error);
  552. if (put)
  553. pm_runtime_put_sync(dev);
  554. return error;
  555. }
  556. static void async_resume(void *data, async_cookie_t cookie)
  557. {
  558. struct device *dev = (struct device *)data;
  559. int error;
  560. error = device_resume(dev, pm_transition, true);
  561. if (error)
  562. pm_dev_err(dev, pm_transition, " async", error);
  563. put_device(dev);
  564. }
  565. static bool is_async(struct device *dev)
  566. {
  567. return dev->power.async_suspend && pm_async_enabled
  568. && !pm_trace_is_enabled();
  569. }
  570. /**
  571. * dpm_drv_timeout - Driver suspend / resume watchdog handler
  572. * @data: struct device which timed out
  573. *
  574. * Called when a driver has timed out suspending or resuming.
  575. * There's not much we can do here to recover so
  576. * BUG() out for a crash-dump
  577. *
  578. */
  579. static void dpm_drv_timeout(unsigned long data)
  580. {
  581. struct dpm_drv_wd_data *wd_data = (void *)data;
  582. struct device *dev = wd_data->dev;
  583. struct task_struct *tsk = wd_data->tsk;
  584. printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
  585. (dev->driver ? dev->driver->name : "no driver"));
  586. printk(KERN_EMERG "dpm suspend stack:\n");
  587. show_stack(tsk, NULL);
  588. BUG();
  589. }
  590. /**
  591. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  592. * @state: PM transition of the system being carried out.
  593. *
  594. * Execute the appropriate "resume" callback for all devices whose status
  595. * indicates that they are suspended.
  596. */
  597. void dpm_resume(pm_message_t state)
  598. {
  599. struct device *dev;
  600. ktime_t starttime = ktime_get();
  601. might_sleep();
  602. mutex_lock(&dpm_list_mtx);
  603. pm_transition = state;
  604. async_error = 0;
  605. list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
  606. INIT_COMPLETION(dev->power.completion);
  607. if (is_async(dev)) {
  608. get_device(dev);
  609. async_schedule(async_resume, dev);
  610. }
  611. }
  612. while (!list_empty(&dpm_suspended_list)) {
  613. dev = to_device(dpm_suspended_list.next);
  614. get_device(dev);
  615. if (!is_async(dev)) {
  616. int error;
  617. mutex_unlock(&dpm_list_mtx);
  618. error = device_resume(dev, state, false);
  619. if (error)
  620. pm_dev_err(dev, state, "", error);
  621. mutex_lock(&dpm_list_mtx);
  622. }
  623. if (!list_empty(&dev->power.entry))
  624. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  625. put_device(dev);
  626. }
  627. mutex_unlock(&dpm_list_mtx);
  628. async_synchronize_full();
  629. dpm_show_time(starttime, state, NULL);
  630. }
  631. /**
  632. * device_complete - Complete a PM transition for given device.
  633. * @dev: Device to handle.
  634. * @state: PM transition of the system being carried out.
  635. */
  636. static void device_complete(struct device *dev, pm_message_t state)
  637. {
  638. device_lock(dev);
  639. if (dev->pwr_domain) {
  640. pm_dev_dbg(dev, state, "completing power domain ");
  641. if (dev->pwr_domain->ops.complete)
  642. dev->pwr_domain->ops.complete(dev);
  643. } else if (dev->type && dev->type->pm) {
  644. pm_dev_dbg(dev, state, "completing type ");
  645. if (dev->type->pm->complete)
  646. dev->type->pm->complete(dev);
  647. } else if (dev->class && dev->class->pm) {
  648. pm_dev_dbg(dev, state, "completing class ");
  649. if (dev->class->pm->complete)
  650. dev->class->pm->complete(dev);
  651. } else if (dev->bus && dev->bus->pm) {
  652. pm_dev_dbg(dev, state, "completing ");
  653. if (dev->bus->pm->complete)
  654. dev->bus->pm->complete(dev);
  655. }
  656. device_unlock(dev);
  657. }
  658. /**
  659. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  660. * @state: PM transition of the system being carried out.
  661. *
  662. * Execute the ->complete() callbacks for all devices whose PM status is not
  663. * DPM_ON (this allows new devices to be registered).
  664. */
  665. void dpm_complete(pm_message_t state)
  666. {
  667. struct list_head list;
  668. might_sleep();
  669. INIT_LIST_HEAD(&list);
  670. mutex_lock(&dpm_list_mtx);
  671. while (!list_empty(&dpm_prepared_list)) {
  672. struct device *dev = to_device(dpm_prepared_list.prev);
  673. get_device(dev);
  674. dev->power.is_prepared = false;
  675. list_move(&dev->power.entry, &list);
  676. mutex_unlock(&dpm_list_mtx);
  677. device_complete(dev, state);
  678. mutex_lock(&dpm_list_mtx);
  679. put_device(dev);
  680. }
  681. list_splice(&list, &dpm_list);
  682. mutex_unlock(&dpm_list_mtx);
  683. }
  684. /**
  685. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  686. * @state: PM transition of the system being carried out.
  687. *
  688. * Execute "resume" callbacks for all devices and complete the PM transition of
  689. * the system.
  690. */
  691. void dpm_resume_end(pm_message_t state)
  692. {
  693. dpm_resume(state);
  694. dpm_complete(state);
  695. }
  696. EXPORT_SYMBOL_GPL(dpm_resume_end);
  697. /*------------------------- Suspend routines -------------------------*/
  698. /**
  699. * resume_event - Return a "resume" message for given "suspend" sleep state.
  700. * @sleep_state: PM message representing a sleep state.
  701. *
  702. * Return a PM message representing the resume event corresponding to given
  703. * sleep state.
  704. */
  705. static pm_message_t resume_event(pm_message_t sleep_state)
  706. {
  707. switch (sleep_state.event) {
  708. case PM_EVENT_SUSPEND:
  709. return PMSG_RESUME;
  710. case PM_EVENT_FREEZE:
  711. case PM_EVENT_QUIESCE:
  712. return PMSG_RECOVER;
  713. case PM_EVENT_HIBERNATE:
  714. return PMSG_RESTORE;
  715. }
  716. return PMSG_ON;
  717. }
  718. /**
  719. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  720. * @dev: Device to handle.
  721. * @state: PM transition of the system being carried out.
  722. *
  723. * The driver of @dev will not receive interrupts while this function is being
  724. * executed.
  725. */
  726. static int device_suspend_noirq(struct device *dev, pm_message_t state)
  727. {
  728. int error;
  729. if (dev->pwr_domain) {
  730. pm_dev_dbg(dev, state, "LATE power domain ");
  731. error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
  732. if (error)
  733. return error;
  734. } else if (dev->type && dev->type->pm) {
  735. pm_dev_dbg(dev, state, "LATE type ");
  736. error = pm_noirq_op(dev, dev->type->pm, state);
  737. if (error)
  738. return error;
  739. } else if (dev->class && dev->class->pm) {
  740. pm_dev_dbg(dev, state, "LATE class ");
  741. error = pm_noirq_op(dev, dev->class->pm, state);
  742. if (error)
  743. return error;
  744. } else if (dev->bus && dev->bus->pm) {
  745. pm_dev_dbg(dev, state, "LATE ");
  746. error = pm_noirq_op(dev, dev->bus->pm, state);
  747. if (error)
  748. return error;
  749. }
  750. return 0;
  751. }
  752. /**
  753. * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
  754. * @state: PM transition of the system being carried out.
  755. *
  756. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  757. * handlers for all non-sysdev devices.
  758. */
  759. int dpm_suspend_noirq(pm_message_t state)
  760. {
  761. ktime_t starttime = ktime_get();
  762. int error = 0;
  763. suspend_device_irqs();
  764. mutex_lock(&dpm_list_mtx);
  765. while (!list_empty(&dpm_suspended_list)) {
  766. struct device *dev = to_device(dpm_suspended_list.prev);
  767. get_device(dev);
  768. mutex_unlock(&dpm_list_mtx);
  769. error = device_suspend_noirq(dev, state);
  770. mutex_lock(&dpm_list_mtx);
  771. if (error) {
  772. pm_dev_err(dev, state, " late", error);
  773. put_device(dev);
  774. break;
  775. }
  776. if (!list_empty(&dev->power.entry))
  777. list_move(&dev->power.entry, &dpm_noirq_list);
  778. put_device(dev);
  779. }
  780. mutex_unlock(&dpm_list_mtx);
  781. if (error)
  782. dpm_resume_noirq(resume_event(state));
  783. else
  784. dpm_show_time(starttime, state, "late");
  785. return error;
  786. }
  787. EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
  788. /**
  789. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  790. * @dev: Device to suspend.
  791. * @state: PM transition of the system being carried out.
  792. * @cb: Suspend callback to execute.
  793. */
  794. static int legacy_suspend(struct device *dev, pm_message_t state,
  795. int (*cb)(struct device *dev, pm_message_t state))
  796. {
  797. int error;
  798. ktime_t calltime;
  799. calltime = initcall_debug_start(dev);
  800. error = cb(dev, state);
  801. suspend_report_result(cb, error);
  802. initcall_debug_report(dev, calltime, error);
  803. return error;
  804. }
  805. /**
  806. * device_suspend - Execute "suspend" callbacks for given device.
  807. * @dev: Device to handle.
  808. * @state: PM transition of the system being carried out.
  809. * @async: If true, the device is being suspended asynchronously.
  810. */
  811. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  812. {
  813. int error = 0;
  814. struct timer_list timer;
  815. struct dpm_drv_wd_data data;
  816. dpm_wait_for_children(dev, async);
  817. if (async_error)
  818. return 0;
  819. pm_runtime_get_noresume(dev);
  820. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  821. pm_wakeup_event(dev, 0);
  822. if (pm_wakeup_pending()) {
  823. pm_runtime_put_sync(dev);
  824. async_error = -EBUSY;
  825. return 0;
  826. }
  827. data.dev = dev;
  828. data.tsk = get_current();
  829. init_timer_on_stack(&timer);
  830. timer.expires = jiffies + HZ * 12;
  831. timer.function = dpm_drv_timeout;
  832. timer.data = (unsigned long)&data;
  833. add_timer(&timer);
  834. device_lock(dev);
  835. if (dev->pwr_domain) {
  836. pm_dev_dbg(dev, state, "power domain ");
  837. error = pm_op(dev, &dev->pwr_domain->ops, state);
  838. goto End;
  839. }
  840. if (dev->type && dev->type->pm) {
  841. pm_dev_dbg(dev, state, "type ");
  842. error = pm_op(dev, dev->type->pm, state);
  843. goto End;
  844. }
  845. if (dev->class) {
  846. if (dev->class->pm) {
  847. pm_dev_dbg(dev, state, "class ");
  848. error = pm_op(dev, dev->class->pm, state);
  849. goto End;
  850. } else if (dev->class->suspend) {
  851. pm_dev_dbg(dev, state, "legacy class ");
  852. error = legacy_suspend(dev, state, dev->class->suspend);
  853. goto End;
  854. }
  855. }
  856. if (dev->bus) {
  857. if (dev->bus->pm) {
  858. pm_dev_dbg(dev, state, "");
  859. error = pm_op(dev, dev->bus->pm, state);
  860. } else if (dev->bus->suspend) {
  861. pm_dev_dbg(dev, state, "legacy ");
  862. error = legacy_suspend(dev, state, dev->bus->suspend);
  863. }
  864. }
  865. End:
  866. dev->power.is_suspended = !error;
  867. device_unlock(dev);
  868. del_timer_sync(&timer);
  869. destroy_timer_on_stack(&timer);
  870. complete_all(&dev->power.completion);
  871. if (error) {
  872. pm_runtime_put_sync(dev);
  873. async_error = error;
  874. } else if (dev->power.is_suspended) {
  875. __pm_runtime_disable(dev, false);
  876. }
  877. return error;
  878. }
  879. static void async_suspend(void *data, async_cookie_t cookie)
  880. {
  881. struct device *dev = (struct device *)data;
  882. int error;
  883. error = __device_suspend(dev, pm_transition, true);
  884. if (error)
  885. pm_dev_err(dev, pm_transition, " async", error);
  886. put_device(dev);
  887. }
  888. static int device_suspend(struct device *dev)
  889. {
  890. INIT_COMPLETION(dev->power.completion);
  891. if (pm_async_enabled && dev->power.async_suspend) {
  892. get_device(dev);
  893. async_schedule(async_suspend, dev);
  894. return 0;
  895. }
  896. return __device_suspend(dev, pm_transition, false);
  897. }
  898. /**
  899. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  900. * @state: PM transition of the system being carried out.
  901. */
  902. int dpm_suspend(pm_message_t state)
  903. {
  904. ktime_t starttime = ktime_get();
  905. int error = 0;
  906. might_sleep();
  907. mutex_lock(&dpm_list_mtx);
  908. pm_transition = state;
  909. async_error = 0;
  910. while (!list_empty(&dpm_prepared_list)) {
  911. struct device *dev = to_device(dpm_prepared_list.prev);
  912. get_device(dev);
  913. mutex_unlock(&dpm_list_mtx);
  914. error = device_suspend(dev);
  915. mutex_lock(&dpm_list_mtx);
  916. if (error) {
  917. pm_dev_err(dev, state, "", error);
  918. put_device(dev);
  919. break;
  920. }
  921. if (!list_empty(&dev->power.entry))
  922. list_move(&dev->power.entry, &dpm_suspended_list);
  923. put_device(dev);
  924. if (async_error)
  925. break;
  926. }
  927. mutex_unlock(&dpm_list_mtx);
  928. async_synchronize_full();
  929. if (!error)
  930. error = async_error;
  931. if (!error)
  932. dpm_show_time(starttime, state, NULL);
  933. return error;
  934. }
  935. /**
  936. * device_prepare - Prepare a device for system power transition.
  937. * @dev: Device to handle.
  938. * @state: PM transition of the system being carried out.
  939. *
  940. * Execute the ->prepare() callback(s) for given device. No new children of the
  941. * device may be registered after this function has returned.
  942. */
  943. static int device_prepare(struct device *dev, pm_message_t state)
  944. {
  945. int error = 0;
  946. device_lock(dev);
  947. if (dev->pwr_domain) {
  948. pm_dev_dbg(dev, state, "preparing power domain ");
  949. if (dev->pwr_domain->ops.prepare)
  950. error = dev->pwr_domain->ops.prepare(dev);
  951. suspend_report_result(dev->pwr_domain->ops.prepare, error);
  952. if (error)
  953. goto End;
  954. } else if (dev->type && dev->type->pm) {
  955. pm_dev_dbg(dev, state, "preparing type ");
  956. if (dev->type->pm->prepare)
  957. error = dev->type->pm->prepare(dev);
  958. suspend_report_result(dev->type->pm->prepare, error);
  959. if (error)
  960. goto End;
  961. } else if (dev->class && dev->class->pm) {
  962. pm_dev_dbg(dev, state, "preparing class ");
  963. if (dev->class->pm->prepare)
  964. error = dev->class->pm->prepare(dev);
  965. suspend_report_result(dev->class->pm->prepare, error);
  966. if (error)
  967. goto End;
  968. } else if (dev->bus && dev->bus->pm) {
  969. pm_dev_dbg(dev, state, "preparing ");
  970. if (dev->bus->pm->prepare)
  971. error = dev->bus->pm->prepare(dev);
  972. suspend_report_result(dev->bus->pm->prepare, error);
  973. }
  974. End:
  975. device_unlock(dev);
  976. return error;
  977. }
  978. /**
  979. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  980. * @state: PM transition of the system being carried out.
  981. *
  982. * Execute the ->prepare() callback(s) for all devices.
  983. */
  984. int dpm_prepare(pm_message_t state)
  985. {
  986. int error = 0;
  987. might_sleep();
  988. mutex_lock(&dpm_list_mtx);
  989. while (!list_empty(&dpm_list)) {
  990. struct device *dev = to_device(dpm_list.next);
  991. get_device(dev);
  992. mutex_unlock(&dpm_list_mtx);
  993. error = device_prepare(dev, state);
  994. mutex_lock(&dpm_list_mtx);
  995. if (error) {
  996. if (error == -EAGAIN) {
  997. put_device(dev);
  998. error = 0;
  999. continue;
  1000. }
  1001. printk(KERN_INFO "PM: Device %s not prepared "
  1002. "for power transition: code %d\n",
  1003. dev_name(dev), error);
  1004. put_device(dev);
  1005. break;
  1006. }
  1007. dev->power.is_prepared = true;
  1008. if (!list_empty(&dev->power.entry))
  1009. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1010. put_device(dev);
  1011. }
  1012. mutex_unlock(&dpm_list_mtx);
  1013. return error;
  1014. }
  1015. /**
  1016. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1017. * @state: PM transition of the system being carried out.
  1018. *
  1019. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1020. * callbacks for them.
  1021. */
  1022. int dpm_suspend_start(pm_message_t state)
  1023. {
  1024. int error;
  1025. error = dpm_prepare(state);
  1026. if (!error)
  1027. error = dpm_suspend(state);
  1028. return error;
  1029. }
  1030. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1031. void __suspend_report_result(const char *function, void *fn, int ret)
  1032. {
  1033. if (ret)
  1034. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  1035. }
  1036. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1037. /**
  1038. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1039. * @dev: Device to wait for.
  1040. * @subordinate: Device that needs to wait for @dev.
  1041. */
  1042. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1043. {
  1044. dpm_wait(dev, subordinate->power.async_suspend);
  1045. return async_error;
  1046. }
  1047. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);