PageRenderTime 60ms CodeModel.GetById 29ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/soc/imx/pm-domains.c

https://bitbucket.org/laigui/linux-imx
C | 505 lines | 368 code | 90 blank | 47 comment | 77 complexity | 97f474c32177ee458d1fc3d2499790f4 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * Copyright (C) 2016 Freescale Semiconductor, Inc.
  3. * Copyright 2017 NXP
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clk-provider.h>
  17. #include <linux/console.h>
  18. #include <linux/delay.h>
  19. #include <linux/io.h>
  20. #include <linux/irq.h>
  21. #include <linux/irqchip.h>
  22. #include <linux/irqchip/arm-gic.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_platform.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/pm.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/pm_clock.h>
  30. #include <linux/slab.h>
  31. #include <linux/syscore_ops.h>
  32. #include <soc/imx8/sc/sci.h>
  33. #include "pm-domain-imx8.h"
  34. static sc_ipc_t pm_ipc_handle;
  35. static sc_rsrc_t early_power_on_rsrc[] = {
  36. SC_R_LAST, SC_R_LAST, SC_R_LAST, SC_R_LAST, SC_R_LAST,
  37. SC_R_LAST, SC_R_LAST, SC_R_LAST, SC_R_LAST, SC_R_LAST,
  38. };
  39. static sc_rsrc_t rsrc_debug_console;
  40. #define IMX8_WU_MAX_IRQS (((SC_R_LAST + 31) / 32 ) * 32 )
  41. static sc_rsrc_t irq2rsrc[IMX8_WU_MAX_IRQS];
  42. static sc_rsrc_t wakeup_rsrc_id[IMX8_WU_MAX_IRQS / 32];
  43. static DEFINE_SPINLOCK(imx8_wu_lock);
  44. enum imx_pd_state {
  45. PD_LP,
  46. PD_OFF,
  47. };
  48. struct clk_stat {
  49. struct clk *clk;
  50. struct clk *parent;
  51. unsigned long rate;
  52. };
  53. static int imx8_pd_power(struct generic_pm_domain *domain, bool power_on)
  54. {
  55. struct imx8_pm_domain *pd;
  56. sc_err_t sci_err = SC_ERR_NONE;
  57. pd = container_of(domain, struct imx8_pm_domain, pd);
  58. if (pd->rsrc_id == SC_R_LAST)
  59. return 0;
  60. /* keep uart console power on for no_console_suspend */
  61. if (pd->rsrc_id == rsrc_debug_console &&
  62. !console_suspend_enabled && !power_on)
  63. return 0;
  64. /* keep resource power on if it is a wakeup source */
  65. if (!power_on && ((1 << pd->rsrc_id % 32) &
  66. wakeup_rsrc_id[pd->rsrc_id / 32]))
  67. return 0;
  68. sci_err = sc_pm_set_resource_power_mode(pm_ipc_handle, pd->rsrc_id,
  69. (power_on) ? SC_PM_PW_MODE_ON :
  70. pd->pd.state_idx ? SC_PM_PW_MODE_OFF : SC_PM_PW_MODE_LP);
  71. if (sci_err)
  72. pr_err("Failed power operation on resource %d\n", pd->rsrc_id);
  73. return 0;
  74. }
  75. static int imx8_pd_power_on(struct generic_pm_domain *domain)
  76. {
  77. struct imx8_pm_domain *pd;
  78. struct imx8_pm_rsrc_clks *imx8_rsrc_clk;
  79. int ret = 0;
  80. pd = container_of(domain, struct imx8_pm_domain, pd);
  81. ret = imx8_pd_power(domain, true);
  82. if (!list_empty(&pd->clks) && (pd->pd.state_idx == PD_OFF)) {
  83. if (pd->clk_state_saved) {
  84. /*
  85. * The SS is powered on restore the clock rates that
  86. * may be lost.
  87. */
  88. list_for_each_entry(imx8_rsrc_clk, &pd->clks, node) {
  89. if (imx8_rsrc_clk->parent)
  90. clk_set_parent(imx8_rsrc_clk->clk,
  91. imx8_rsrc_clk->parent);
  92. if (imx8_rsrc_clk->rate) {
  93. /*
  94. * Need to read the clock so that rate in
  95. * Linux is reset.
  96. */
  97. clk_get_rate(imx8_rsrc_clk->clk);
  98. /* Restore the clock rate. */
  99. clk_set_rate(imx8_rsrc_clk->clk,
  100. imx8_rsrc_clk->rate);
  101. }
  102. }
  103. } else if (pd->clk_state_may_lost) {
  104. struct clk_stat *clk_stats;
  105. int count = 0;
  106. int i = 0;
  107. /*
  108. * The SS is powered down before without saving clk rates,
  109. * try to restore the lost clock rates if any
  110. *
  111. * As a parent clk rate restore will cause the clk recalc
  112. * to all possible child clks which may result in child clk
  113. * previous state lost due to power domain lost before, we
  114. * have to first walk through all child clks to retrieve the
  115. * state via clk_hw_get_rate which bypassed the clk recalc,
  116. * then we can restore them one by one.
  117. */
  118. list_for_each_entry(imx8_rsrc_clk, &pd->clks, node)
  119. count++;
  120. clk_stats = kzalloc(count * sizeof(*clk_stats), GFP_KERNEL);
  121. if (!clk_stats) {
  122. pr_warn("%s: failed to alloc mem for clk state recovery\n", pd->name);
  123. return -ENOMEM;
  124. }
  125. list_for_each_entry(imx8_rsrc_clk, &pd->clks, node) {
  126. clk_stats[i].clk = imx8_rsrc_clk->clk;
  127. clk_stats[i].parent = clk_get_parent(imx8_rsrc_clk->clk);
  128. clk_stats[i].rate = clk_hw_get_rate(__clk_get_hw(imx8_rsrc_clk->clk));
  129. i++;
  130. }
  131. for (i = 0; i < count; i++) {
  132. /* restore parent first */
  133. if (clk_stats[i].parent)
  134. clk_set_parent(clk_stats[i].clk, clk_stats[i].parent);
  135. if (clk_stats[i].rate) {
  136. /* invalid cached rate first by get rate once */
  137. clk_get_rate(clk_stats[i].clk);
  138. /* restore the lost rate */
  139. clk_set_rate(clk_stats[i].clk, clk_stats[i].rate);
  140. }
  141. }
  142. kfree(clk_stats);
  143. }
  144. }
  145. return ret;
  146. }
  147. static int imx8_pd_power_off(struct generic_pm_domain *domain)
  148. {
  149. struct imx8_pm_domain *pd;
  150. struct imx8_pm_rsrc_clks *imx8_rsrc_clk;
  151. pd = container_of(domain, struct imx8_pm_domain, pd);
  152. if (!list_empty(&pd->clks) && (pd->pd.state_idx == PD_OFF)) {
  153. /*
  154. * The SS is going to be powered off, store the clock rates
  155. * that may be lost.
  156. */
  157. list_for_each_entry(imx8_rsrc_clk, &pd->clks, node) {
  158. imx8_rsrc_clk->parent = clk_get_parent(imx8_rsrc_clk->clk);
  159. imx8_rsrc_clk->rate = clk_hw_get_rate(__clk_get_hw(imx8_rsrc_clk->clk));
  160. }
  161. pd->clk_state_saved = true;
  162. pd->clk_state_may_lost = false;
  163. } else if (pd->pd.state_idx == PD_OFF) {
  164. pd->clk_state_saved = false;
  165. pd->clk_state_may_lost = true;
  166. } else {
  167. pd->clk_state_saved = false;
  168. pd->clk_state_may_lost = false;
  169. }
  170. return imx8_pd_power(domain, false);
  171. }
  172. static int imx8_attach_dev(struct generic_pm_domain *genpd, struct device *dev)
  173. {
  174. struct imx8_pm_domain *pd;
  175. struct device_node *node = dev->of_node;
  176. struct of_phandle_args clkspec;
  177. int rc, index, num_clks;
  178. pd = container_of(genpd, struct imx8_pm_domain, pd);
  179. num_clks = of_count_phandle_with_args(node, "assigned-clocks",
  180. "#clock-cells");
  181. if (num_clks == -EINVAL)
  182. pr_err("%s: Invalid value of assigned-clocks property at %s\n",
  183. pd->name, node->full_name);
  184. for (index = 0; index < num_clks; index++) {
  185. struct imx8_pm_rsrc_clks *imx8_rsrc_clk;
  186. rc = of_parse_phandle_with_args(node, "assigned-clocks",
  187. "#clock-cells", index, &clkspec);
  188. if (rc < 0) {
  189. /* skip empty (null) phandles */
  190. if (rc == -ENOENT)
  191. continue;
  192. else
  193. return rc;
  194. }
  195. if (clkspec.np == node)
  196. return 0;
  197. imx8_rsrc_clk = devm_kzalloc(dev, sizeof(*imx8_rsrc_clk),
  198. GFP_KERNEL);
  199. if (!imx8_rsrc_clk)
  200. return -ENOMEM;
  201. imx8_rsrc_clk->clk = of_clk_get_from_provider(&clkspec);
  202. if (!IS_ERR(imx8_rsrc_clk->clk))
  203. list_add_tail(&imx8_rsrc_clk->node, &pd->clks);
  204. }
  205. return 0;
  206. }
  207. static void imx8_detach_dev(struct generic_pm_domain *genpd, struct device *dev)
  208. {
  209. struct imx8_pm_domain *pd;
  210. struct imx8_pm_rsrc_clks *imx8_rsrc_clk, *tmp;
  211. pd = container_of(genpd, struct imx8_pm_domain, pd);
  212. /* Free all the clock entry nodes. */
  213. if (list_empty(&pd->clks))
  214. return;
  215. list_for_each_entry_safe(imx8_rsrc_clk, tmp, &pd->clks, node) {
  216. list_del(&imx8_rsrc_clk->node);
  217. devm_kfree(dev, imx8_rsrc_clk);
  218. }
  219. }
  220. static void imx8_pm_domains_resume(void)
  221. {
  222. sc_err_t sci_err = SC_ERR_NONE;
  223. int i;
  224. for (i = 0; i < (sizeof(early_power_on_rsrc) /
  225. sizeof(sc_rsrc_t)); i++) {
  226. if (early_power_on_rsrc[i] != SC_R_LAST) {
  227. sci_err = sc_pm_set_resource_power_mode(pm_ipc_handle,
  228. early_power_on_rsrc[i], SC_PM_PW_MODE_ON);
  229. if (sci_err != SC_ERR_NONE)
  230. pr_err("fail to power on resource %d\n",
  231. early_power_on_rsrc[i]);
  232. }
  233. }
  234. }
  235. struct syscore_ops imx8_pm_domains_syscore_ops = {
  236. .resume = imx8_pm_domains_resume,
  237. };
  238. static void imx8_pd_setup(struct imx8_pm_domain *pd)
  239. {
  240. pd->pd.power_off = imx8_pd_power_off;
  241. pd->pd.power_on = imx8_pd_power_on;
  242. pd->pd.attach_dev = imx8_attach_dev;
  243. pd->pd.detach_dev = imx8_detach_dev;
  244. pd->pd.states[0].power_off_latency_ns = 25000;
  245. pd->pd.states[0].power_on_latency_ns = 25000;
  246. pd->pd.states[1].power_off_latency_ns = 2500000;
  247. pd->pd.states[1].power_on_latency_ns = 2500000;
  248. pd->pd.state_count = 2;
  249. }
  250. static int __init imx8_add_pm_domains(struct device_node *parent,
  251. struct generic_pm_domain *genpd_parent)
  252. {
  253. struct device_node *np;
  254. static int index;
  255. for_each_child_of_node(parent, np) {
  256. struct imx8_pm_domain *imx8_pd;
  257. sc_rsrc_t rsrc_id;
  258. u32 wakeup_irq;
  259. if (!of_device_is_available(np))
  260. continue;
  261. imx8_pd = kzalloc(sizeof(*imx8_pd), GFP_KERNEL);
  262. if (!imx8_pd)
  263. return -ENOMEM;
  264. if (!of_property_read_string(np, "name", &imx8_pd->pd.name))
  265. imx8_pd->name = imx8_pd->pd.name;
  266. if (!of_property_read_u32(np, "reg", &rsrc_id))
  267. imx8_pd->rsrc_id = rsrc_id;
  268. if (imx8_pd->rsrc_id != SC_R_LAST) {
  269. imx8_pd_setup(imx8_pd);
  270. if (of_property_read_bool(np, "early_power_on")
  271. && index < (sizeof(early_power_on_rsrc) /
  272. sizeof(sc_rsrc_t))) {
  273. early_power_on_rsrc[index++] = imx8_pd->rsrc_id;
  274. }
  275. if (of_property_read_bool(np, "debug_console"))
  276. rsrc_debug_console = imx8_pd->rsrc_id;
  277. if (!of_property_read_u32(np, "wakeup-irq",
  278. &wakeup_irq))
  279. irq2rsrc[wakeup_irq] = imx8_pd->rsrc_id;
  280. }
  281. INIT_LIST_HEAD(&imx8_pd->clks);
  282. pm_genpd_init(&imx8_pd->pd, NULL, true);
  283. if (genpd_parent)
  284. pm_genpd_add_subdomain(genpd_parent, &imx8_pd->pd);
  285. of_genpd_add_provider_simple(np, &imx8_pd->pd);
  286. imx8_add_pm_domains(np, &imx8_pd->pd);
  287. }
  288. return 0;
  289. }
  290. static int __init imx8_init_pm_domains(void)
  291. {
  292. struct device_node *np;
  293. sc_err_t sci_err;
  294. sc_rsrc_t rsrc_id;
  295. uint32_t mu_id;
  296. /* skip pm domains for non-SCFW system */
  297. if (!of_find_compatible_node(NULL, NULL, "nxp,imx8-pd"))
  298. return 0;
  299. pr_info("***** imx8_init_pm_domains *****\n");
  300. for_each_compatible_node(np, NULL, "nxp,imx8-pd") {
  301. struct imx8_pm_domain *imx8_pd;
  302. if (!of_device_is_available(np))
  303. continue;
  304. imx8_pd = kzalloc(sizeof(struct imx8_pm_domain), GFP_KERNEL);
  305. if (!imx8_pd) {
  306. pr_err("%s: failed to allocate memory for domain\n",
  307. __func__);
  308. return -ENOMEM;
  309. }
  310. if (!of_property_read_string(np, "name", &imx8_pd->pd.name))
  311. imx8_pd->name = imx8_pd->pd.name;
  312. if (!of_property_read_u32(np, "reg", &rsrc_id))
  313. imx8_pd->rsrc_id = rsrc_id;
  314. if (imx8_pd->rsrc_id != SC_R_LAST)
  315. imx8_pd_setup(imx8_pd);
  316. INIT_LIST_HEAD(&imx8_pd->clks);
  317. pm_genpd_init(&imx8_pd->pd, NULL, true);
  318. of_genpd_add_provider_simple(np, &imx8_pd->pd);
  319. imx8_add_pm_domains(np, &imx8_pd->pd);
  320. }
  321. sci_err = sc_ipc_getMuID(&mu_id);
  322. if (sci_err != SC_ERR_NONE) {
  323. pr_info("Cannot obtain MU ID\n");
  324. return sci_err;
  325. }
  326. sci_err = sc_ipc_open(&pm_ipc_handle, mu_id);
  327. register_syscore_ops(&imx8_pm_domains_syscore_ops);
  328. return 0;
  329. }
  330. early_initcall(imx8_init_pm_domains);
  331. static int imx8_wu_irq_set_wake(struct irq_data *d, unsigned int on)
  332. {
  333. unsigned int idx = irq2rsrc[d->hwirq] / 32;
  334. u32 mask = 1 << irq2rsrc[d->hwirq] % 32;
  335. spin_lock(&imx8_wu_lock);
  336. wakeup_rsrc_id[idx] = on ? wakeup_rsrc_id[idx] | mask :
  337. wakeup_rsrc_id[idx] & ~mask;
  338. spin_unlock(&imx8_wu_lock);
  339. return 0;
  340. }
  341. static struct irq_chip imx8_wu_chip = {
  342. .name = "IMX8-WU",
  343. .irq_eoi = irq_chip_eoi_parent,
  344. .irq_mask = irq_chip_mask_parent,
  345. .irq_unmask = irq_chip_unmask_parent,
  346. .irq_retrigger = irq_chip_retrigger_hierarchy,
  347. .irq_set_wake = imx8_wu_irq_set_wake,
  348. .irq_set_affinity = irq_chip_set_affinity_parent,
  349. };
  350. static int imx8_wu_domain_translate(struct irq_domain *d,
  351. struct irq_fwspec *fwspec,
  352. unsigned long *hwirq,
  353. unsigned int *type)
  354. {
  355. if (is_of_node(fwspec->fwnode)) {
  356. if (fwspec->param_count != 3)
  357. return -EINVAL;
  358. /* No PPI should point to this domain */
  359. if (fwspec->param[0] != 0)
  360. return -EINVAL;
  361. *hwirq = fwspec->param[1];
  362. *type = fwspec->param[2];
  363. return 0;
  364. }
  365. return -EINVAL;
  366. }
  367. static int imx8_wu_domain_alloc(struct irq_domain *domain,
  368. unsigned int irq,
  369. unsigned int nr_irqs, void *data)
  370. {
  371. struct irq_fwspec *fwspec = data;
  372. struct irq_fwspec parent_fwspec;
  373. irq_hw_number_t hwirq;
  374. int i;
  375. if (fwspec->param_count != 3)
  376. return -EINVAL; /* Not GIC compliant */
  377. if (fwspec->param[0] != 0)
  378. return -EINVAL; /* No PPI should point to this domain */
  379. hwirq = fwspec->param[1];
  380. if (hwirq >= IMX8_WU_MAX_IRQS)
  381. return -EINVAL; /* Can't deal with this */
  382. for (i = 0; i < nr_irqs; i++)
  383. irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
  384. &imx8_wu_chip, NULL);
  385. parent_fwspec = *fwspec;
  386. parent_fwspec.fwnode = domain->parent->fwnode;
  387. return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
  388. &parent_fwspec);
  389. }
  390. static const struct irq_domain_ops imx8_wu_domain_ops = {
  391. .translate = imx8_wu_domain_translate,
  392. .alloc = imx8_wu_domain_alloc,
  393. .free = irq_domain_free_irqs_common,
  394. };
  395. static int __init imx8_wu_init(struct device_node *node,
  396. struct device_node *parent)
  397. {
  398. struct irq_domain *parent_domain, *domain;
  399. if (!parent) {
  400. pr_err("%s: no parent, giving up\n", node->full_name);
  401. return -ENODEV;
  402. }
  403. parent_domain = irq_find_host(parent);
  404. if (!parent_domain) {
  405. pr_err("%s: unable to obtain parent domain\n", node->full_name);
  406. return -ENXIO;
  407. }
  408. domain = irq_domain_add_hierarchy(parent_domain, 0, IMX8_WU_MAX_IRQS,
  409. node, &imx8_wu_domain_ops,
  410. NULL);
  411. if (!domain)
  412. return -ENOMEM;
  413. return 0;
  414. }
  415. IRQCHIP_DECLARE(imx8_wakeup_unit, "fsl,imx8-wu", imx8_wu_init);