/arch/arm/mach-mx5/clock-mx51-mx53.c

https://github.com/AICP/kernel_asus_grouper · C · 1611 lines · 1238 code · 271 blank · 102 comment · 155 complexity · b09aff364fdc93aae11d0069b14ff56b MD5 · raw file

  1. /*
  2. * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
  3. * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
  4. *
  5. * The code contained herein is licensed under the GNU General Public
  6. * License. You may obtain a copy of the GNU General Public License
  7. * Version 2 or later at the following locations:
  8. *
  9. * http://www.opensource.org/licenses/gpl-license.html
  10. * http://www.gnu.org/copyleft/gpl.html
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/delay.h>
  14. #include <linux/clk.h>
  15. #include <linux/io.h>
  16. #include <linux/clkdev.h>
  17. #include <asm/div64.h>
  18. #include <mach/hardware.h>
  19. #include <mach/common.h>
  20. #include <mach/clock.h>
  21. #include "crm_regs.h"
  22. /* External clock values passed-in by the board code */
  23. static unsigned long external_high_reference, external_low_reference;
  24. static unsigned long oscillator_reference, ckih2_reference;
  25. static struct clk osc_clk;
  26. static struct clk pll1_main_clk;
  27. static struct clk pll1_sw_clk;
  28. static struct clk pll2_sw_clk;
  29. static struct clk pll3_sw_clk;
  30. static struct clk mx53_pll4_sw_clk;
  31. static struct clk lp_apm_clk;
  32. static struct clk periph_apm_clk;
  33. static struct clk ahb_clk;
  34. static struct clk ipg_clk;
  35. static struct clk usboh3_clk;
  36. static struct clk emi_fast_clk;
  37. static struct clk ipu_clk;
  38. static struct clk mipi_hsc1_clk;
  39. static struct clk esdhc1_clk;
  40. static struct clk esdhc2_clk;
  41. static struct clk esdhc3_mx53_clk;
  42. #define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
  43. /* calculate best pre and post dividers to get the required divider */
  44. static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
  45. u32 max_pre, u32 max_post)
  46. {
  47. if (div >= max_pre * max_post) {
  48. *pre = max_pre;
  49. *post = max_post;
  50. } else if (div >= max_pre) {
  51. u32 min_pre, temp_pre, old_err, err;
  52. min_pre = DIV_ROUND_UP(div, max_post);
  53. old_err = max_pre;
  54. for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
  55. err = div % temp_pre;
  56. if (err == 0) {
  57. *pre = temp_pre;
  58. break;
  59. }
  60. err = temp_pre - err;
  61. if (err < old_err) {
  62. old_err = err;
  63. *pre = temp_pre;
  64. }
  65. }
  66. *post = DIV_ROUND_UP(div, *pre);
  67. } else {
  68. *pre = div;
  69. *post = 1;
  70. }
  71. }
  72. static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
  73. {
  74. u32 reg = __raw_readl(clk->enable_reg);
  75. reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
  76. reg |= mode << clk->enable_shift;
  77. __raw_writel(reg, clk->enable_reg);
  78. }
  79. static int _clk_ccgr_enable(struct clk *clk)
  80. {
  81. _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
  82. return 0;
  83. }
  84. static void _clk_ccgr_disable(struct clk *clk)
  85. {
  86. _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
  87. }
  88. static int _clk_ccgr_enable_inrun(struct clk *clk)
  89. {
  90. _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
  91. return 0;
  92. }
  93. static void _clk_ccgr_disable_inwait(struct clk *clk)
  94. {
  95. _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
  96. }
  97. /*
  98. * For the 4-to-1 muxed input clock
  99. */
  100. static inline u32 _get_mux(struct clk *parent, struct clk *m0,
  101. struct clk *m1, struct clk *m2, struct clk *m3)
  102. {
  103. if (parent == m0)
  104. return 0;
  105. else if (parent == m1)
  106. return 1;
  107. else if (parent == m2)
  108. return 2;
  109. else if (parent == m3)
  110. return 3;
  111. else
  112. BUG();
  113. return -EINVAL;
  114. }
  115. static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
  116. {
  117. if (pll == &pll1_main_clk)
  118. return MX51_DPLL1_BASE;
  119. else if (pll == &pll2_sw_clk)
  120. return MX51_DPLL2_BASE;
  121. else if (pll == &pll3_sw_clk)
  122. return MX51_DPLL3_BASE;
  123. else
  124. BUG();
  125. return NULL;
  126. }
  127. static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
  128. {
  129. if (pll == &pll1_main_clk)
  130. return MX53_DPLL1_BASE;
  131. else if (pll == &pll2_sw_clk)
  132. return MX53_DPLL2_BASE;
  133. else if (pll == &pll3_sw_clk)
  134. return MX53_DPLL3_BASE;
  135. else if (pll == &mx53_pll4_sw_clk)
  136. return MX53_DPLL4_BASE;
  137. else
  138. BUG();
  139. return NULL;
  140. }
  141. static inline void __iomem *_get_pll_base(struct clk *pll)
  142. {
  143. if (cpu_is_mx51())
  144. return _mx51_get_pll_base(pll);
  145. else
  146. return _mx53_get_pll_base(pll);
  147. }
  148. static unsigned long clk_pll_get_rate(struct clk *clk)
  149. {
  150. long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
  151. unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
  152. void __iomem *pllbase;
  153. s64 temp;
  154. unsigned long parent_rate;
  155. parent_rate = clk_get_rate(clk->parent);
  156. pllbase = _get_pll_base(clk);
  157. dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
  158. pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
  159. dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
  160. if (pll_hfsm == 0) {
  161. dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
  162. dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
  163. dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
  164. } else {
  165. dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
  166. dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
  167. dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
  168. }
  169. pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
  170. mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
  171. mfi = (mfi <= 5) ? 5 : mfi;
  172. mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
  173. mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
  174. /* Sign extend to 32-bits */
  175. if (mfn >= 0x04000000) {
  176. mfn |= 0xFC000000;
  177. mfn_abs = -mfn;
  178. }
  179. ref_clk = 2 * parent_rate;
  180. if (dbl != 0)
  181. ref_clk *= 2;
  182. ref_clk /= (pdf + 1);
  183. temp = (u64) ref_clk * mfn_abs;
  184. do_div(temp, mfd + 1);
  185. if (mfn < 0)
  186. temp = -temp;
  187. temp = (ref_clk * mfi) + temp;
  188. return temp;
  189. }
  190. static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
  191. {
  192. u32 reg;
  193. void __iomem *pllbase;
  194. long mfi, pdf, mfn, mfd = 999999;
  195. s64 temp64;
  196. unsigned long quad_parent_rate;
  197. unsigned long pll_hfsm, dp_ctl;
  198. unsigned long parent_rate;
  199. parent_rate = clk_get_rate(clk->parent);
  200. pllbase = _get_pll_base(clk);
  201. quad_parent_rate = 4 * parent_rate;
  202. pdf = mfi = -1;
  203. while (++pdf < 16 && mfi < 5)
  204. mfi = rate * (pdf+1) / quad_parent_rate;
  205. if (mfi > 15)
  206. return -EINVAL;
  207. pdf--;
  208. temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
  209. do_div(temp64, quad_parent_rate/1000000);
  210. mfn = (long)temp64;
  211. dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
  212. /* use dpdck0_2 */
  213. __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
  214. pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
  215. if (pll_hfsm == 0) {
  216. reg = mfi << 4 | pdf;
  217. __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
  218. __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
  219. __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
  220. } else {
  221. reg = mfi << 4 | pdf;
  222. __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
  223. __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
  224. __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
  225. }
  226. return 0;
  227. }
  228. static int _clk_pll_enable(struct clk *clk)
  229. {
  230. u32 reg;
  231. void __iomem *pllbase;
  232. int i = 0;
  233. pllbase = _get_pll_base(clk);
  234. reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
  235. if (reg & MXC_PLL_DP_CTL_UPEN)
  236. return 0;
  237. reg |= MXC_PLL_DP_CTL_UPEN;
  238. __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
  239. /* Wait for lock */
  240. do {
  241. reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
  242. if (reg & MXC_PLL_DP_CTL_LRF)
  243. break;
  244. udelay(1);
  245. } while (++i < MAX_DPLL_WAIT_TRIES);
  246. if (i == MAX_DPLL_WAIT_TRIES) {
  247. pr_err("MX5: pll locking failed\n");
  248. return -EINVAL;
  249. }
  250. return 0;
  251. }
  252. static void _clk_pll_disable(struct clk *clk)
  253. {
  254. u32 reg;
  255. void __iomem *pllbase;
  256. pllbase = _get_pll_base(clk);
  257. reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
  258. __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
  259. }
  260. static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
  261. {
  262. u32 reg, step;
  263. reg = __raw_readl(MXC_CCM_CCSR);
  264. /* When switching from pll_main_clk to a bypass clock, first select a
  265. * multiplexed clock in 'step_sel', then shift the glitchless mux
  266. * 'pll1_sw_clk_sel'.
  267. *
  268. * When switching back, do it in reverse order
  269. */
  270. if (parent == &pll1_main_clk) {
  271. /* Switch to pll1_main_clk */
  272. reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
  273. __raw_writel(reg, MXC_CCM_CCSR);
  274. /* step_clk mux switched to lp_apm, to save power. */
  275. reg = __raw_readl(MXC_CCM_CCSR);
  276. reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
  277. reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
  278. MXC_CCM_CCSR_STEP_SEL_OFFSET);
  279. } else {
  280. if (parent == &lp_apm_clk) {
  281. step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
  282. } else if (parent == &pll2_sw_clk) {
  283. step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
  284. } else if (parent == &pll3_sw_clk) {
  285. step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
  286. } else
  287. return -EINVAL;
  288. reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
  289. reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
  290. __raw_writel(reg, MXC_CCM_CCSR);
  291. /* Switch to step_clk */
  292. reg = __raw_readl(MXC_CCM_CCSR);
  293. reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
  294. }
  295. __raw_writel(reg, MXC_CCM_CCSR);
  296. return 0;
  297. }
  298. static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
  299. {
  300. u32 reg, div;
  301. unsigned long parent_rate;
  302. parent_rate = clk_get_rate(clk->parent);
  303. reg = __raw_readl(MXC_CCM_CCSR);
  304. if (clk->parent == &pll2_sw_clk) {
  305. div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
  306. MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
  307. } else if (clk->parent == &pll3_sw_clk) {
  308. div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
  309. MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
  310. } else
  311. div = 1;
  312. return parent_rate / div;
  313. }
  314. static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
  315. {
  316. u32 reg;
  317. reg = __raw_readl(MXC_CCM_CCSR);
  318. if (parent == &pll2_sw_clk)
  319. reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
  320. else
  321. reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
  322. __raw_writel(reg, MXC_CCM_CCSR);
  323. return 0;
  324. }
  325. static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
  326. {
  327. u32 reg;
  328. if (parent == &osc_clk)
  329. reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
  330. else
  331. return -EINVAL;
  332. __raw_writel(reg, MXC_CCM_CCSR);
  333. return 0;
  334. }
  335. static unsigned long clk_cpu_get_rate(struct clk *clk)
  336. {
  337. u32 cacrr, div;
  338. unsigned long parent_rate;
  339. parent_rate = clk_get_rate(clk->parent);
  340. cacrr = __raw_readl(MXC_CCM_CACRR);
  341. div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
  342. return parent_rate / div;
  343. }
  344. static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
  345. {
  346. u32 reg, cpu_podf;
  347. unsigned long parent_rate;
  348. parent_rate = clk_get_rate(clk->parent);
  349. cpu_podf = parent_rate / rate - 1;
  350. /* use post divider to change freq */
  351. reg = __raw_readl(MXC_CCM_CACRR);
  352. reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
  353. reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
  354. __raw_writel(reg, MXC_CCM_CACRR);
  355. return 0;
  356. }
  357. static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
  358. {
  359. u32 reg, mux;
  360. int i = 0;
  361. mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
  362. reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
  363. reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
  364. __raw_writel(reg, MXC_CCM_CBCMR);
  365. /* Wait for lock */
  366. do {
  367. reg = __raw_readl(MXC_CCM_CDHIPR);
  368. if (!(reg & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
  369. break;
  370. udelay(1);
  371. } while (++i < MAX_DPLL_WAIT_TRIES);
  372. if (i == MAX_DPLL_WAIT_TRIES) {
  373. pr_err("MX5: Set parent for periph_apm clock failed\n");
  374. return -EINVAL;
  375. }
  376. return 0;
  377. }
  378. static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
  379. {
  380. u32 reg;
  381. reg = __raw_readl(MXC_CCM_CBCDR);
  382. if (parent == &pll2_sw_clk)
  383. reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
  384. else if (parent == &periph_apm_clk)
  385. reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
  386. else
  387. return -EINVAL;
  388. __raw_writel(reg, MXC_CCM_CBCDR);
  389. return 0;
  390. }
  391. static struct clk main_bus_clk = {
  392. .parent = &pll2_sw_clk,
  393. .set_parent = _clk_main_bus_set_parent,
  394. };
  395. static unsigned long clk_ahb_get_rate(struct clk *clk)
  396. {
  397. u32 reg, div;
  398. unsigned long parent_rate;
  399. parent_rate = clk_get_rate(clk->parent);
  400. reg = __raw_readl(MXC_CCM_CBCDR);
  401. div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
  402. MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
  403. return parent_rate / div;
  404. }
  405. static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
  406. {
  407. u32 reg, div;
  408. unsigned long parent_rate;
  409. int i = 0;
  410. parent_rate = clk_get_rate(clk->parent);
  411. div = parent_rate / rate;
  412. if (div > 8 || div < 1 || ((parent_rate / div) != rate))
  413. return -EINVAL;
  414. reg = __raw_readl(MXC_CCM_CBCDR);
  415. reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
  416. reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
  417. __raw_writel(reg, MXC_CCM_CBCDR);
  418. /* Wait for lock */
  419. do {
  420. reg = __raw_readl(MXC_CCM_CDHIPR);
  421. if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
  422. break;
  423. udelay(1);
  424. } while (++i < MAX_DPLL_WAIT_TRIES);
  425. if (i == MAX_DPLL_WAIT_TRIES) {
  426. pr_err("MX5: clk_ahb_set_rate failed\n");
  427. return -EINVAL;
  428. }
  429. return 0;
  430. }
  431. static unsigned long _clk_ahb_round_rate(struct clk *clk,
  432. unsigned long rate)
  433. {
  434. u32 div;
  435. unsigned long parent_rate;
  436. parent_rate = clk_get_rate(clk->parent);
  437. div = parent_rate / rate;
  438. if (div > 8)
  439. div = 8;
  440. else if (div == 0)
  441. div++;
  442. return parent_rate / div;
  443. }
  444. static int _clk_max_enable(struct clk *clk)
  445. {
  446. u32 reg;
  447. _clk_ccgr_enable(clk);
  448. /* Handshake with MAX when LPM is entered. */
  449. reg = __raw_readl(MXC_CCM_CLPCR);
  450. if (cpu_is_mx51())
  451. reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
  452. else if (cpu_is_mx53())
  453. reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
  454. __raw_writel(reg, MXC_CCM_CLPCR);
  455. return 0;
  456. }
  457. static void _clk_max_disable(struct clk *clk)
  458. {
  459. u32 reg;
  460. _clk_ccgr_disable_inwait(clk);
  461. /* No Handshake with MAX when LPM is entered as its disabled. */
  462. reg = __raw_readl(MXC_CCM_CLPCR);
  463. if (cpu_is_mx51())
  464. reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
  465. else if (cpu_is_mx53())
  466. reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
  467. __raw_writel(reg, MXC_CCM_CLPCR);
  468. }
  469. static unsigned long clk_ipg_get_rate(struct clk *clk)
  470. {
  471. u32 reg, div;
  472. unsigned long parent_rate;
  473. parent_rate = clk_get_rate(clk->parent);
  474. reg = __raw_readl(MXC_CCM_CBCDR);
  475. div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
  476. MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
  477. return parent_rate / div;
  478. }
  479. static unsigned long clk_ipg_per_get_rate(struct clk *clk)
  480. {
  481. u32 reg, prediv1, prediv2, podf;
  482. unsigned long parent_rate;
  483. parent_rate = clk_get_rate(clk->parent);
  484. if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
  485. /* the main_bus_clk is the one before the DVFS engine */
  486. reg = __raw_readl(MXC_CCM_CBCDR);
  487. prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
  488. MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
  489. prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
  490. MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
  491. podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
  492. MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
  493. return parent_rate / (prediv1 * prediv2 * podf);
  494. } else if (clk->parent == &ipg_clk)
  495. return parent_rate;
  496. else
  497. BUG();
  498. }
  499. static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
  500. {
  501. u32 reg;
  502. reg = __raw_readl(MXC_CCM_CBCMR);
  503. reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
  504. reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
  505. if (parent == &ipg_clk)
  506. reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
  507. else if (parent == &lp_apm_clk)
  508. reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
  509. else if (parent != &main_bus_clk)
  510. return -EINVAL;
  511. __raw_writel(reg, MXC_CCM_CBCMR);
  512. return 0;
  513. }
  514. #define clk_nfc_set_parent NULL
  515. static unsigned long clk_nfc_get_rate(struct clk *clk)
  516. {
  517. unsigned long rate;
  518. u32 reg, div;
  519. reg = __raw_readl(MXC_CCM_CBCDR);
  520. div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
  521. MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
  522. rate = clk_get_rate(clk->parent) / div;
  523. WARN_ON(rate == 0);
  524. return rate;
  525. }
  526. static unsigned long clk_nfc_round_rate(struct clk *clk,
  527. unsigned long rate)
  528. {
  529. u32 div;
  530. unsigned long parent_rate = clk_get_rate(clk->parent);
  531. if (!rate)
  532. return -EINVAL;
  533. div = parent_rate / rate;
  534. if (parent_rate % rate)
  535. div++;
  536. if (div > 8)
  537. return -EINVAL;
  538. return parent_rate / div;
  539. }
  540. static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
  541. {
  542. u32 reg, div;
  543. div = clk_get_rate(clk->parent) / rate;
  544. if (div == 0)
  545. div++;
  546. if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
  547. return -EINVAL;
  548. reg = __raw_readl(MXC_CCM_CBCDR);
  549. reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
  550. reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
  551. __raw_writel(reg, MXC_CCM_CBCDR);
  552. while (__raw_readl(MXC_CCM_CDHIPR) &
  553. MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
  554. }
  555. return 0;
  556. }
  557. static unsigned long get_high_reference_clock_rate(struct clk *clk)
  558. {
  559. return external_high_reference;
  560. }
  561. static unsigned long get_low_reference_clock_rate(struct clk *clk)
  562. {
  563. return external_low_reference;
  564. }
  565. static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
  566. {
  567. return oscillator_reference;
  568. }
  569. static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
  570. {
  571. return ckih2_reference;
  572. }
  573. static unsigned long clk_emi_slow_get_rate(struct clk *clk)
  574. {
  575. u32 reg, div;
  576. reg = __raw_readl(MXC_CCM_CBCDR);
  577. div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
  578. MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
  579. return clk_get_rate(clk->parent) / div;
  580. }
  581. static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
  582. {
  583. unsigned long rate;
  584. u32 reg, div;
  585. reg = __raw_readl(MXC_CCM_CBCDR);
  586. div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
  587. MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
  588. rate = clk_get_rate(clk->parent) / div;
  589. return rate;
  590. }
  591. /* External high frequency clock */
  592. static struct clk ckih_clk = {
  593. .get_rate = get_high_reference_clock_rate,
  594. };
  595. static struct clk ckih2_clk = {
  596. .get_rate = get_ckih2_reference_clock_rate,
  597. };
  598. static struct clk osc_clk = {
  599. .get_rate = get_oscillator_reference_clock_rate,
  600. };
  601. /* External low frequency (32kHz) clock */
  602. static struct clk ckil_clk = {
  603. .get_rate = get_low_reference_clock_rate,
  604. };
  605. static struct clk pll1_main_clk = {
  606. .parent = &osc_clk,
  607. .get_rate = clk_pll_get_rate,
  608. .enable = _clk_pll_enable,
  609. .disable = _clk_pll_disable,
  610. };
  611. /* Clock tree block diagram (WIP):
  612. * CCM: Clock Controller Module
  613. *
  614. * PLL output -> |
  615. * | CCM Switcher -> CCM_CLK_ROOT_GEN ->
  616. * PLL bypass -> |
  617. *
  618. */
  619. /* PLL1 SW supplies to ARM core */
  620. static struct clk pll1_sw_clk = {
  621. .parent = &pll1_main_clk,
  622. .set_parent = _clk_pll1_sw_set_parent,
  623. .get_rate = clk_pll1_sw_get_rate,
  624. };
  625. /* PLL2 SW supplies to AXI/AHB/IP buses */
  626. static struct clk pll2_sw_clk = {
  627. .parent = &osc_clk,
  628. .get_rate = clk_pll_get_rate,
  629. .set_rate = _clk_pll_set_rate,
  630. .set_parent = _clk_pll2_sw_set_parent,
  631. .enable = _clk_pll_enable,
  632. .disable = _clk_pll_disable,
  633. };
  634. /* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
  635. static struct clk pll3_sw_clk = {
  636. .parent = &osc_clk,
  637. .set_rate = _clk_pll_set_rate,
  638. .get_rate = clk_pll_get_rate,
  639. .enable = _clk_pll_enable,
  640. .disable = _clk_pll_disable,
  641. };
  642. /* PLL4 SW supplies to LVDS Display Bridge(LDB) */
  643. static struct clk mx53_pll4_sw_clk = {
  644. .parent = &osc_clk,
  645. .set_rate = _clk_pll_set_rate,
  646. .enable = _clk_pll_enable,
  647. .disable = _clk_pll_disable,
  648. };
  649. /* Low-power Audio Playback Mode clock */
  650. static struct clk lp_apm_clk = {
  651. .parent = &osc_clk,
  652. .set_parent = _clk_lp_apm_set_parent,
  653. };
  654. static struct clk periph_apm_clk = {
  655. .parent = &pll1_sw_clk,
  656. .set_parent = _clk_periph_apm_set_parent,
  657. };
  658. static struct clk cpu_clk = {
  659. .parent = &pll1_sw_clk,
  660. .get_rate = clk_cpu_get_rate,
  661. .set_rate = clk_cpu_set_rate,
  662. };
  663. static struct clk ahb_clk = {
  664. .parent = &main_bus_clk,
  665. .get_rate = clk_ahb_get_rate,
  666. .set_rate = _clk_ahb_set_rate,
  667. .round_rate = _clk_ahb_round_rate,
  668. };
  669. static struct clk iim_clk = {
  670. .parent = &ipg_clk,
  671. .enable_reg = MXC_CCM_CCGR0,
  672. .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
  673. };
  674. /* Main IP interface clock for access to registers */
  675. static struct clk ipg_clk = {
  676. .parent = &ahb_clk,
  677. .get_rate = clk_ipg_get_rate,
  678. };
  679. static struct clk ipg_perclk = {
  680. .parent = &lp_apm_clk,
  681. .get_rate = clk_ipg_per_get_rate,
  682. .set_parent = _clk_ipg_per_set_parent,
  683. };
  684. static struct clk ahb_max_clk = {
  685. .parent = &ahb_clk,
  686. .enable_reg = MXC_CCM_CCGR0,
  687. .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
  688. .enable = _clk_max_enable,
  689. .disable = _clk_max_disable,
  690. };
  691. static struct clk aips_tz1_clk = {
  692. .parent = &ahb_clk,
  693. .secondary = &ahb_max_clk,
  694. .enable_reg = MXC_CCM_CCGR0,
  695. .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
  696. .enable = _clk_ccgr_enable,
  697. .disable = _clk_ccgr_disable_inwait,
  698. };
  699. static struct clk aips_tz2_clk = {
  700. .parent = &ahb_clk,
  701. .secondary = &ahb_max_clk,
  702. .enable_reg = MXC_CCM_CCGR0,
  703. .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
  704. .enable = _clk_ccgr_enable,
  705. .disable = _clk_ccgr_disable_inwait,
  706. };
  707. static struct clk gpc_dvfs_clk = {
  708. .enable_reg = MXC_CCM_CCGR5,
  709. .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
  710. .enable = _clk_ccgr_enable,
  711. .disable = _clk_ccgr_disable,
  712. };
  713. static struct clk gpt_32k_clk = {
  714. .id = 0,
  715. .parent = &ckil_clk,
  716. };
  717. static struct clk dummy_clk = {
  718. .id = 0,
  719. };
  720. static struct clk emi_slow_clk = {
  721. .parent = &pll2_sw_clk,
  722. .enable_reg = MXC_CCM_CCGR5,
  723. .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
  724. .enable = _clk_ccgr_enable,
  725. .disable = _clk_ccgr_disable_inwait,
  726. .get_rate = clk_emi_slow_get_rate,
  727. };
  728. static int clk_ipu_enable(struct clk *clk)
  729. {
  730. u32 reg;
  731. _clk_ccgr_enable(clk);
  732. /* Enable handshake with IPU when certain clock rates are changed */
  733. reg = __raw_readl(MXC_CCM_CCDR);
  734. reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
  735. __raw_writel(reg, MXC_CCM_CCDR);
  736. /* Enable handshake with IPU when LPM is entered */
  737. reg = __raw_readl(MXC_CCM_CLPCR);
  738. reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
  739. __raw_writel(reg, MXC_CCM_CLPCR);
  740. return 0;
  741. }
  742. static void clk_ipu_disable(struct clk *clk)
  743. {
  744. u32 reg;
  745. _clk_ccgr_disable(clk);
  746. /* Disable handshake with IPU whe dividers are changed */
  747. reg = __raw_readl(MXC_CCM_CCDR);
  748. reg |= MXC_CCM_CCDR_IPU_HS_MASK;
  749. __raw_writel(reg, MXC_CCM_CCDR);
  750. /* Disable handshake with IPU when LPM is entered */
  751. reg = __raw_readl(MXC_CCM_CLPCR);
  752. reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
  753. __raw_writel(reg, MXC_CCM_CLPCR);
  754. }
  755. static struct clk ahbmux1_clk = {
  756. .parent = &ahb_clk,
  757. .secondary = &ahb_max_clk,
  758. .enable_reg = MXC_CCM_CCGR0,
  759. .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
  760. .enable = _clk_ccgr_enable,
  761. .disable = _clk_ccgr_disable_inwait,
  762. };
  763. static struct clk ipu_sec_clk = {
  764. .parent = &emi_fast_clk,
  765. .secondary = &ahbmux1_clk,
  766. };
  767. static struct clk ddr_hf_clk = {
  768. .parent = &pll1_sw_clk,
  769. .get_rate = _clk_ddr_hf_get_rate,
  770. };
  771. static struct clk ddr_clk = {
  772. .parent = &ddr_hf_clk,
  773. };
  774. /* clock definitions for MIPI HSC unit which has been removed
  775. * from documentation, but not from hardware
  776. */
  777. static int _clk_hsc_enable(struct clk *clk)
  778. {
  779. u32 reg;
  780. _clk_ccgr_enable(clk);
  781. /* Handshake with IPU when certain clock rates are changed. */
  782. reg = __raw_readl(MXC_CCM_CCDR);
  783. reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
  784. __raw_writel(reg, MXC_CCM_CCDR);
  785. reg = __raw_readl(MXC_CCM_CLPCR);
  786. reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
  787. __raw_writel(reg, MXC_CCM_CLPCR);
  788. return 0;
  789. }
  790. static void _clk_hsc_disable(struct clk *clk)
  791. {
  792. u32 reg;
  793. _clk_ccgr_disable(clk);
  794. /* No handshake with HSC as its not enabled. */
  795. reg = __raw_readl(MXC_CCM_CCDR);
  796. reg |= MXC_CCM_CCDR_HSC_HS_MASK;
  797. __raw_writel(reg, MXC_CCM_CCDR);
  798. reg = __raw_readl(MXC_CCM_CLPCR);
  799. reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
  800. __raw_writel(reg, MXC_CCM_CLPCR);
  801. }
  802. static struct clk mipi_hsp_clk = {
  803. .parent = &ipu_clk,
  804. .enable_reg = MXC_CCM_CCGR4,
  805. .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
  806. .enable = _clk_hsc_enable,
  807. .disable = _clk_hsc_disable,
  808. .secondary = &mipi_hsc1_clk,
  809. };
  810. #define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s) \
  811. static struct clk name = { \
  812. .id = i, \
  813. .enable_reg = er, \
  814. .enable_shift = es, \
  815. .get_rate = pfx##_get_rate, \
  816. .set_rate = pfx##_set_rate, \
  817. .round_rate = pfx##_round_rate, \
  818. .set_parent = pfx##_set_parent, \
  819. .enable = _clk_ccgr_enable, \
  820. .disable = _clk_ccgr_disable, \
  821. .parent = p, \
  822. .secondary = s, \
  823. }
  824. #define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s) \
  825. static struct clk name = { \
  826. .id = i, \
  827. .enable_reg = er, \
  828. .enable_shift = es, \
  829. .get_rate = pfx##_get_rate, \
  830. .set_rate = pfx##_set_rate, \
  831. .set_parent = pfx##_set_parent, \
  832. .enable = _clk_max_enable, \
  833. .disable = _clk_max_disable, \
  834. .parent = p, \
  835. .secondary = s, \
  836. }
  837. #define CLK_GET_RATE(name, nr, bitsname) \
  838. static unsigned long clk_##name##_get_rate(struct clk *clk) \
  839. { \
  840. u32 reg, pred, podf; \
  841. \
  842. reg = __raw_readl(MXC_CCM_CSCDR##nr); \
  843. pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK) \
  844. >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
  845. podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK) \
  846. >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
  847. \
  848. return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), \
  849. (pred + 1) * (podf + 1)); \
  850. }
  851. #define CLK_SET_PARENT(name, nr, bitsname) \
  852. static int clk_##name##_set_parent(struct clk *clk, struct clk *parent) \
  853. { \
  854. u32 reg, mux; \
  855. \
  856. mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, \
  857. &pll3_sw_clk, &lp_apm_clk); \
  858. reg = __raw_readl(MXC_CCM_CSCMR##nr) & \
  859. ~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK; \
  860. reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET; \
  861. __raw_writel(reg, MXC_CCM_CSCMR##nr); \
  862. \
  863. return 0; \
  864. }
  865. #define CLK_SET_RATE(name, nr, bitsname) \
  866. static int clk_##name##_set_rate(struct clk *clk, unsigned long rate) \
  867. { \
  868. u32 reg, div, parent_rate; \
  869. u32 pre = 0, post = 0; \
  870. \
  871. parent_rate = clk_get_rate(clk->parent); \
  872. div = parent_rate / rate; \
  873. \
  874. if ((parent_rate / div) != rate) \
  875. return -EINVAL; \
  876. \
  877. __calc_pre_post_dividers(div, &pre, &post, \
  878. (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >> \
  879. MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1, \
  880. (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >> \
  881. MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
  882. \
  883. /* Set sdhc1 clock divider */ \
  884. reg = __raw_readl(MXC_CCM_CSCDR##nr) & \
  885. ~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK \
  886. | MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK); \
  887. reg |= (post - 1) << \
  888. MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
  889. reg |= (pre - 1) << \
  890. MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
  891. __raw_writel(reg, MXC_CCM_CSCDR##nr); \
  892. \
  893. return 0; \
  894. }
  895. /* UART */
  896. CLK_GET_RATE(uart, 1, UART)
  897. CLK_SET_PARENT(uart, 1, UART)
  898. static struct clk uart_root_clk = {
  899. .parent = &pll2_sw_clk,
  900. .get_rate = clk_uart_get_rate,
  901. .set_parent = clk_uart_set_parent,
  902. };
  903. /* USBOH3 */
  904. CLK_GET_RATE(usboh3, 1, USBOH3)
  905. CLK_SET_PARENT(usboh3, 1, USBOH3)
  906. static struct clk usboh3_clk = {
  907. .parent = &pll2_sw_clk,
  908. .get_rate = clk_usboh3_get_rate,
  909. .set_parent = clk_usboh3_set_parent,
  910. .enable = _clk_ccgr_enable,
  911. .disable = _clk_ccgr_disable,
  912. .enable_reg = MXC_CCM_CCGR2,
  913. .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
  914. };
  915. static struct clk usb_ahb_clk = {
  916. .parent = &ipg_clk,
  917. .enable = _clk_ccgr_enable,
  918. .disable = _clk_ccgr_disable,
  919. .enable_reg = MXC_CCM_CCGR2,
  920. .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
  921. };
  922. static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
  923. {
  924. u32 reg;
  925. reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
  926. if (parent == &pll3_sw_clk)
  927. reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
  928. __raw_writel(reg, MXC_CCM_CSCMR1);
  929. return 0;
  930. }
  931. static struct clk usb_phy1_clk = {
  932. .parent = &pll3_sw_clk,
  933. .set_parent = clk_usb_phy1_set_parent,
  934. .enable = _clk_ccgr_enable,
  935. .enable_reg = MXC_CCM_CCGR2,
  936. .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
  937. .disable = _clk_ccgr_disable,
  938. };
  939. /* eCSPI */
  940. CLK_GET_RATE(ecspi, 2, CSPI)
  941. CLK_SET_PARENT(ecspi, 1, CSPI)
  942. static struct clk ecspi_main_clk = {
  943. .parent = &pll3_sw_clk,
  944. .get_rate = clk_ecspi_get_rate,
  945. .set_parent = clk_ecspi_set_parent,
  946. };
  947. /* eSDHC */
  948. CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
  949. CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
  950. CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
  951. /* mx51 specific */
  952. CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
  953. CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
  954. CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
  955. static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
  956. {
  957. u32 reg;
  958. reg = __raw_readl(MXC_CCM_CSCMR1);
  959. if (parent == &esdhc1_clk)
  960. reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
  961. else if (parent == &esdhc2_clk)
  962. reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
  963. else
  964. return -EINVAL;
  965. __raw_writel(reg, MXC_CCM_CSCMR1);
  966. return 0;
  967. }
  968. static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
  969. {
  970. u32 reg;
  971. reg = __raw_readl(MXC_CCM_CSCMR1);
  972. if (parent == &esdhc1_clk)
  973. reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
  974. else if (parent == &esdhc2_clk)
  975. reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
  976. else
  977. return -EINVAL;
  978. __raw_writel(reg, MXC_CCM_CSCMR1);
  979. return 0;
  980. }
  981. /* mx53 specific */
  982. static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
  983. {
  984. u32 reg;
  985. reg = __raw_readl(MXC_CCM_CSCMR1);
  986. if (parent == &esdhc1_clk)
  987. reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
  988. else if (parent == &esdhc3_mx53_clk)
  989. reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
  990. else
  991. return -EINVAL;
  992. __raw_writel(reg, MXC_CCM_CSCMR1);
  993. return 0;
  994. }
  995. CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
  996. CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
  997. CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
  998. static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
  999. {
  1000. u32 reg;
  1001. reg = __raw_readl(MXC_CCM_CSCMR1);
  1002. if (parent == &esdhc1_clk)
  1003. reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
  1004. else if (parent == &esdhc3_mx53_clk)
  1005. reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
  1006. else
  1007. return -EINVAL;
  1008. __raw_writel(reg, MXC_CCM_CSCMR1);
  1009. return 0;
  1010. }
  1011. #define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
  1012. static struct clk name = { \
  1013. .id = i, \
  1014. .enable_reg = er, \
  1015. .enable_shift = es, \
  1016. .get_rate = gr, \
  1017. .set_rate = sr, \
  1018. .enable = e, \
  1019. .disable = d, \
  1020. .parent = p, \
  1021. .secondary = s, \
  1022. }
  1023. #define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s) \
  1024. DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
  1025. /* Shared peripheral bus arbiter */
  1026. DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
  1027. NULL, NULL, &ipg_clk, NULL);
  1028. /* UART */
  1029. DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
  1030. NULL, NULL, &ipg_clk, &aips_tz1_clk);
  1031. DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
  1032. NULL, NULL, &ipg_clk, &aips_tz1_clk);
  1033. DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
  1034. NULL, NULL, &ipg_clk, &spba_clk);
  1035. DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
  1036. NULL, NULL, &ipg_clk, &spba_clk);
  1037. DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
  1038. NULL, NULL, &ipg_clk, &spba_clk);
  1039. DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
  1040. NULL, NULL, &uart_root_clk, &uart1_ipg_clk);
  1041. DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
  1042. NULL, NULL, &uart_root_clk, &uart2_ipg_clk);
  1043. DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
  1044. NULL, NULL, &uart_root_clk, &uart3_ipg_clk);
  1045. DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
  1046. NULL, NULL, &uart_root_clk, &uart4_ipg_clk);
  1047. DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
  1048. NULL, NULL, &uart_root_clk, &uart5_ipg_clk);
  1049. /* GPT */
  1050. DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
  1051. NULL, NULL, &ipg_clk, NULL);
  1052. DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
  1053. NULL, NULL, &ipg_clk, &gpt_ipg_clk);
  1054. DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
  1055. NULL, NULL, &ipg_clk, NULL);
  1056. DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
  1057. NULL, NULL, &ipg_clk, NULL);
  1058. /* I2C */
  1059. DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
  1060. NULL, NULL, &ipg_perclk, NULL);
  1061. DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
  1062. NULL, NULL, &ipg_perclk, NULL);
  1063. DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
  1064. NULL, NULL, &ipg_clk, NULL);
  1065. DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
  1066. NULL, NULL, &ipg_perclk, NULL);
  1067. /* FEC */
  1068. DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
  1069. NULL, NULL, &ipg_clk, NULL);
  1070. /* NFC */
  1071. DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
  1072. clk_nfc, &emi_slow_clk, NULL);
  1073. /* SSI */
  1074. DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
  1075. NULL, NULL, &ipg_clk, NULL);
  1076. DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
  1077. NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
  1078. DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
  1079. NULL, NULL, &ipg_clk, NULL);
  1080. DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
  1081. NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
  1082. DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
  1083. NULL, NULL, &ipg_clk, NULL);
  1084. DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
  1085. NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
  1086. /* eCSPI */
  1087. DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
  1088. NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
  1089. &ipg_clk, &spba_clk);
  1090. DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
  1091. NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
  1092. DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
  1093. NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
  1094. &ipg_clk, &aips_tz2_clk);
  1095. DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
  1096. NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
  1097. /* CSPI */
  1098. DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
  1099. NULL, NULL, &ipg_clk, &aips_tz2_clk);
  1100. DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
  1101. NULL, NULL, &ipg_clk, &cspi_ipg_clk);
  1102. /* SDMA */
  1103. DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
  1104. NULL, NULL, &ahb_clk, NULL);
  1105. /* eSDHC */
  1106. DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
  1107. NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
  1108. DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
  1109. clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
  1110. DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
  1111. NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
  1112. DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
  1113. NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
  1114. DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
  1115. NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
  1116. /* mx51 specific */
  1117. DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
  1118. clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
  1119. static struct clk esdhc3_clk = {
  1120. .id = 2,
  1121. .parent = &esdhc1_clk,
  1122. .set_parent = clk_esdhc3_set_parent,
  1123. .enable_reg = MXC_CCM_CCGR3,
  1124. .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
  1125. .enable = _clk_max_enable,
  1126. .disable = _clk_max_disable,
  1127. .secondary = &esdhc3_ipg_clk,
  1128. };
  1129. static struct clk esdhc4_clk = {
  1130. .id = 3,
  1131. .parent = &esdhc1_clk,
  1132. .set_parent = clk_esdhc4_set_parent,
  1133. .enable_reg = MXC_CCM_CCGR3,
  1134. .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
  1135. .enable = _clk_max_enable,
  1136. .disable = _clk_max_disable,
  1137. .secondary = &esdhc4_ipg_clk,
  1138. };
  1139. /* mx53 specific */
  1140. static struct clk esdhc2_mx53_clk = {
  1141. .id = 2,
  1142. .parent = &esdhc1_clk,
  1143. .set_parent = clk_esdhc2_mx53_set_parent,
  1144. .enable_reg = MXC_CCM_CCGR3,
  1145. .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
  1146. .enable = _clk_max_enable,
  1147. .disable = _clk_max_disable,
  1148. .secondary = &esdhc3_ipg_clk,
  1149. };
  1150. DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
  1151. clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
  1152. static struct clk esdhc4_mx53_clk = {
  1153. .id = 3,
  1154. .parent = &esdhc1_clk,
  1155. .set_parent = clk_esdhc4_mx53_set_parent,
  1156. .enable_reg = MXC_CCM_CCGR3,
  1157. .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
  1158. .enable = _clk_max_enable,
  1159. .disable = _clk_max_disable,
  1160. .secondary = &esdhc4_ipg_clk,
  1161. };
  1162. DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
  1163. DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
  1164. DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
  1165. /* IPU */
  1166. DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
  1167. NULL, NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
  1168. DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
  1169. NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
  1170. &ddr_clk, NULL);
  1171. DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
  1172. NULL, NULL, &pll3_sw_clk, NULL);
  1173. DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
  1174. NULL, NULL, &pll3_sw_clk, NULL);
  1175. #define _REGISTER_CLOCK(d, n, c) \
  1176. { \
  1177. .dev_id = d, \
  1178. .con_id = n, \
  1179. .clk = &c, \
  1180. },
  1181. static struct clk_lookup mx51_lookups[] = {
  1182. /* i.mx51 has the i.mx21 type uart */
  1183. _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
  1184. _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
  1185. _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
  1186. _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
  1187. /* i.mx51 has the i.mx27 type fec */
  1188. _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
  1189. _REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
  1190. _REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
  1191. _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
  1192. _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
  1193. _REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
  1194. _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
  1195. _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
  1196. _REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
  1197. _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
  1198. _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
  1199. _REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
  1200. _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
  1201. _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
  1202. _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
  1203. _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
  1204. _REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
  1205. _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
  1206. _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
  1207. _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
  1208. /* i.mx51 has the i.mx35 type sdma */
  1209. _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
  1210. _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
  1211. _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
  1212. _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
  1213. _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
  1214. _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
  1215. /* i.mx51 has the i.mx35 type cspi */
  1216. _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
  1217. _REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
  1218. _REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
  1219. _REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
  1220. _REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
  1221. _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
  1222. _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
  1223. _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
  1224. _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
  1225. _REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
  1226. _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
  1227. _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
  1228. _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
  1229. _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
  1230. };
  1231. static struct clk_lookup mx53_lookups[] = {
  1232. /* i.mx53 has the i.mx21 type uart */
  1233. _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
  1234. _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
  1235. _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
  1236. _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
  1237. _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
  1238. _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
  1239. /* i.mx53 has the i.mx25 type fec */
  1240. _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
  1241. _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
  1242. _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
  1243. _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
  1244. _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
  1245. /* i.mx53 has the i.mx51 type ecspi */
  1246. _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
  1247. _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
  1248. /* i.mx53 has the i.mx25 type cspi */
  1249. _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
  1250. _REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
  1251. _REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
  1252. _REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
  1253. _REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
  1254. _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
  1255. _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
  1256. /* i.mx53 has the i.mx35 type sdma */
  1257. _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
  1258. _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
  1259. _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
  1260. _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
  1261. _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
  1262. };
  1263. static void clk_tree_init(void)
  1264. {
  1265. u32 reg;
  1266. ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
  1267. /*
  1268. * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
  1269. * 8MHz, its derived from lp_apm.
  1270. *
  1271. * FIXME: Verify if true for all boards
  1272. */
  1273. reg = __raw_readl(MXC_CCM_CBCDR);
  1274. reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
  1275. reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
  1276. reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
  1277. reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
  1278. __raw_writel(reg, MXC_CCM_CBCDR);
  1279. }
  1280. int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
  1281. unsigned long ckih1, unsigned long ckih2)
  1282. {
  1283. int i;
  1284. external_low_reference = ckil;
  1285. external_high_reference = ckih1;
  1286. ckih2_reference = ckih2;
  1287. oscillator_reference = osc;
  1288. for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
  1289. clkdev_add(&mx51_lookups[i]);
  1290. clk_tree_init();
  1291. clk_enable(&cpu_clk);
  1292. clk_enable(&main_bus_clk);
  1293. clk_enable(&iim_clk);
  1294. mx51_revision();
  1295. clk_disable(&iim_clk);
  1296. mx51_display_revision();
  1297. /* move usb_phy_clk to 24MHz */
  1298. clk_set_parent(&usb_phy1_clk, &osc_clk);
  1299. /* set the usboh3_clk parent to pll2_sw_clk */
  1300. clk_set_parent(&usboh3_clk, &pll2_sw_clk);
  1301. /* Set SDHC parents to be PLL2 */
  1302. clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
  1303. clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
  1304. /* set SDHC root clock as 166.25MHZ*/
  1305. clk_set_rate(&esdhc1_clk, 166250000);
  1306. clk_set_rate(&esdhc2_clk, 166250000);
  1307. /* System timer */
  1308. mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
  1309. MX51_MXC_INT_GPT);
  1310. return 0;
  1311. }
  1312. int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
  1313. unsigned long ckih1, unsigned long ckih2)
  1314. {
  1315. int i;
  1316. external_low_reference = ckil;
  1317. external_high_reference = ckih1;
  1318. ckih2_reference = ckih2;
  1319. oscillator_reference = osc;
  1320. for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
  1321. clkdev_add(&mx53_lookups[i]);
  1322. clk_tree_init();
  1323. clk_set_parent(&uart_root_clk, &pll3_sw_clk);
  1324. clk_enable(&cpu_clk);
  1325. clk_enable(&main_bus_clk);
  1326. clk_enable(&iim_clk);
  1327. mx53_revision();
  1328. clk_disable(&iim_clk);
  1329. mx53_display_revision();
  1330. /* Set SDHC parents to be PLL2 */
  1331. clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
  1332. clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
  1333. /* set SDHC root clock as 200MHZ*/
  1334. clk_set_rate(&esdhc1_clk, 200000000);
  1335. clk_set_rate(&esdhc3_mx53_clk, 200000000);
  1336. /* System timer */
  1337. mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
  1338. MX53_INT_GPT);
  1339. return 0;
  1340. }