/arch/arm/mach-fsm/clock-local.c

https://bitbucket.org/sammyz/iscream_thunderc-2.6.35-rebase · C · 690 lines · 466 code · 118 blank · 106 comment · 149 complexity · ff565baa5e5d0124f1b29a972a9ada55 MD5 · raw file

  1. /* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15. * 02110-1301, USA.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/err.h>
  21. #include <linux/ctype.h>
  22. #include <linux/bitops.h>
  23. #include <linux/io.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/delay.h>
  26. #include <mach/msm_iomap.h>
  27. #include <mach/clk.h>
  28. #include "clock.h"
  29. #include "clock-local.h"
  30. /* When enabling/disabling a clock, check the halt bit up to this number
  31. * number of times (with a 1 us delay in between) before continuing. */
  32. #define HALT_CHECK_MAX_LOOPS 100
  33. /* For clock without halt checking, wait this long after enables/disables. */
  34. #define HALT_CHECK_DELAY_US 10
  35. DEFINE_SPINLOCK(local_clock_reg_lock);
  36. struct clk_freq_tbl local_dummy_freq = F_END;
  37. #define MAX_SOURCES 20
  38. static int src_votes[MAX_SOURCES];
  39. static DEFINE_SPINLOCK(src_vote_lock);
  40. unsigned local_sys_vdd_votes[NUM_SYS_VDD_LEVELS];
  41. static DEFINE_SPINLOCK(sys_vdd_vote_lock);
  42. static int local_clk_enable_nolock(unsigned id);
  43. static int local_clk_disable_nolock(unsigned id);
  44. static int local_src_enable_nolock(int src);
  45. static int local_src_disable_nolock(int src);
  46. /*
  47. * Common Set-Rate Functions
  48. */
  49. /* For clocks with integer dividers only. */
  50. void set_rate_basic(struct clk_local *clk, struct clk_freq_tbl *nf)
  51. {
  52. uint32_t reg_val;
  53. reg_val = readl(clk->ns_reg);
  54. reg_val &= ~(clk->ns_mask);
  55. reg_val |= nf->ns_val;
  56. writel(reg_val, clk->ns_reg);
  57. }
  58. /* For clocks with MND dividers. */
  59. void set_rate_mnd(struct clk_local *clk, struct clk_freq_tbl *nf)
  60. {
  61. uint32_t ns_reg_val, cc_reg_val;
  62. /* Assert MND reset. */
  63. ns_reg_val = readl(clk->ns_reg);
  64. ns_reg_val |= B(7);
  65. writel(ns_reg_val, clk->ns_reg);
  66. /* Program M and D values. */
  67. writel(nf->md_val, clk->md_reg);
  68. /* Program NS register. */
  69. ns_reg_val &= ~(clk->ns_mask);
  70. ns_reg_val |= nf->ns_val;
  71. writel(ns_reg_val, clk->ns_reg);
  72. /* If the clock has a separate CC register, program it. */
  73. if (clk->ns_reg != clk->cc_reg) {
  74. cc_reg_val = readl(clk->cc_reg);
  75. cc_reg_val &= ~(clk->cc_mask);
  76. cc_reg_val |= nf->cc_val;
  77. writel(cc_reg_val, clk->cc_reg);
  78. }
  79. /* Deassert MND reset. */
  80. ns_reg_val &= ~B(7);
  81. writel(ns_reg_val, clk->ns_reg);
  82. }
  83. void set_rate_nop(struct clk_local *clk, struct clk_freq_tbl *nf)
  84. {
  85. /* Nothing to do for fixed-rate clocks. */
  86. }
  87. /*
  88. * SYS_VDD voting functions
  89. */
  90. /* Update system voltage level given the current votes. */
  91. static int local_update_sys_vdd(void)
  92. {
  93. static int cur_level;
  94. int level, rc = 0;
  95. if (local_sys_vdd_votes[HIGH])
  96. level = HIGH;
  97. else if (local_sys_vdd_votes[NOMINAL])
  98. level = NOMINAL;
  99. else
  100. level = LOW;
  101. if (level == cur_level)
  102. return rc;
  103. rc = soc_update_sys_vdd(level);
  104. if (!rc)
  105. cur_level = level;
  106. return rc;
  107. }
  108. /* Vote for a system voltage level. */
  109. int local_vote_sys_vdd(unsigned level)
  110. {
  111. int rc = 0;
  112. unsigned long flags;
  113. /* Bounds checking. */
  114. if (level >= ARRAY_SIZE(local_sys_vdd_votes))
  115. return -EINVAL;
  116. spin_lock_irqsave(&sys_vdd_vote_lock, flags);
  117. local_sys_vdd_votes[level]++;
  118. rc = local_update_sys_vdd();
  119. if (rc)
  120. local_sys_vdd_votes[level]--;
  121. spin_unlock_irqrestore(&sys_vdd_vote_lock, flags);
  122. return rc;
  123. }
  124. /* Remove vote for a system voltage level. */
  125. int local_unvote_sys_vdd(unsigned level)
  126. {
  127. int rc = 0;
  128. unsigned long flags;
  129. /* Bounds checking. */
  130. if (level >= ARRAY_SIZE(local_sys_vdd_votes))
  131. return -EINVAL;
  132. spin_lock_irqsave(&sys_vdd_vote_lock, flags);
  133. if (local_sys_vdd_votes[level])
  134. local_sys_vdd_votes[level]--;
  135. else {
  136. pr_warning("%s: Reference counts are incorrect for level %d!\n",
  137. __func__, level);
  138. goto out;
  139. }
  140. rc = local_update_sys_vdd();
  141. if (rc)
  142. local_sys_vdd_votes[level]++;
  143. out:
  144. spin_unlock_irqrestore(&sys_vdd_vote_lock, flags);
  145. return rc;
  146. }
  147. /*
  148. * Clock source (PLL/XO) control functions
  149. */
  150. /* Enable clock source without taking the lock. */
  151. static int local_src_enable_nolock(int src)
  152. {
  153. int rc = 0;
  154. if (!src_votes[src]) {
  155. if (soc_clk_sources[src].par != SRC_NONE)
  156. rc = local_src_enable_nolock(soc_clk_sources[src].par);
  157. if (rc)
  158. goto err_par;
  159. /* Perform source-specific enable operations. */
  160. if (soc_clk_sources[src].enable_func)
  161. rc = soc_clk_sources[src].enable_func(src, 1);
  162. if (rc)
  163. goto err_enable;
  164. }
  165. src_votes[src]++;
  166. return rc;
  167. err_enable:
  168. if (soc_clk_sources[src].par != SRC_NONE)
  169. local_src_disable_nolock(soc_clk_sources[src].par);
  170. err_par:
  171. return rc;
  172. }
  173. /* Enable clock source. */
  174. int local_src_enable(int src)
  175. {
  176. int rc = 0;
  177. unsigned long flags;
  178. if (src == SRC_NONE)
  179. return rc;
  180. spin_lock_irqsave(&src_vote_lock, flags);
  181. rc = local_src_enable_nolock(src);
  182. spin_unlock_irqrestore(&src_vote_lock, flags);
  183. return rc;
  184. }
  185. /* Disable clock source without taking the lock. */
  186. static int local_src_disable_nolock(int src)
  187. {
  188. int rc = 0;
  189. if (src_votes[src] > 0)
  190. src_votes[src]--;
  191. else {
  192. pr_warning("%s: Reference counts are incorrect for "
  193. "src %d!\n", __func__, src);
  194. return rc;
  195. }
  196. if (src_votes[src] == 0) {
  197. /* Perform source-specific disable operations. */
  198. if (soc_clk_sources[src].enable_func)
  199. rc = soc_clk_sources[src].enable_func(src, 0);
  200. if (rc)
  201. goto err_disable;
  202. if (soc_clk_sources[src].par != SRC_NONE)
  203. rc = local_src_disable_nolock(soc_clk_sources[src].par);
  204. if (rc)
  205. goto err_disable_par;
  206. }
  207. return rc;
  208. err_disable_par:
  209. soc_clk_sources[src].enable_func(src, 1);
  210. err_disable:
  211. src_votes[src]++;
  212. return rc;
  213. }
  214. /* Disable clock source. */
  215. int local_src_disable(int src)
  216. {
  217. int rc = 0;
  218. unsigned long flags;
  219. if (src == SRC_NONE)
  220. return rc;
  221. spin_lock_irqsave(&src_vote_lock, flags);
  222. rc = local_src_disable_nolock(src);
  223. spin_unlock_irqrestore(&src_vote_lock, flags);
  224. return rc;
  225. }
  226. /*
  227. * Clock enable/disable functions
  228. */
  229. /* Return non-zero if a clock status registers shows the clock is halted. */
  230. static int local_clk_is_halted(unsigned id)
  231. {
  232. struct clk_local *clk = &soc_clk_local_tbl[id];
  233. int invert = (clk->halt_check == ENABLE);
  234. int status_bit = readl(clk->halt_reg) & B(clk->halt_bit);
  235. return invert ? !status_bit : status_bit;
  236. }
  237. /* Perform any register operations required to enable the clock. */
  238. void local_clk_enable_reg(unsigned id)
  239. {
  240. struct clk_local *clk = &soc_clk_local_tbl[id];
  241. void *reg = clk->cc_reg;
  242. uint32_t reg_val;
  243. WARN((clk->type != NORATE) && (clk->current_freq == &local_dummy_freq),
  244. "Attempting to enable clock %d before setting its rate. "
  245. "Set the rate first!\n", id);
  246. /* Enable MN counter, if applicable. */
  247. reg_val = readl(reg);
  248. if (clk->type == MND) {
  249. reg_val |= clk->current_freq->mnd_en_mask;
  250. writel(reg_val, reg);
  251. }
  252. /* Enable root. */
  253. if (clk->root_en_mask) {
  254. reg_val |= clk->root_en_mask;
  255. writel(reg_val, reg);
  256. }
  257. /* Enable branch. */
  258. if (clk->br_en_mask) {
  259. reg_val |= clk->br_en_mask;
  260. writel(reg_val, reg);
  261. }
  262. /* Wait for clock to enable before returning. */
  263. if (clk->halt_check == DELAY)
  264. udelay(HALT_CHECK_DELAY_US);
  265. else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
  266. int count;
  267. /* Use a memory barrier since some halt status registers are
  268. * not within the same 1K segment as the branch/root enable
  269. * registers. */
  270. mb();
  271. /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
  272. for (count = HALT_CHECK_MAX_LOOPS; local_clk_is_halted(id)
  273. && count > 0; count--)
  274. udelay(1);
  275. if (count == 0)
  276. pr_warning("%s: clock %d status bit stuck off\n",
  277. __func__, id);
  278. }
  279. }
  280. /* Perform any register operations required to enable the clock. */
  281. void local_clk_disable_reg(unsigned id)
  282. {
  283. struct clk_local *clk = &soc_clk_local_tbl[id];
  284. void *reg = clk->cc_reg;
  285. uint32_t reg_val;
  286. /* Disable branch. */
  287. reg_val = readl(reg);
  288. if (clk->br_en_mask) {
  289. reg_val &= ~(clk->br_en_mask);
  290. writel(reg_val, reg);
  291. }
  292. /* Wait for clock to disable before continuing. */
  293. if (clk->halt_check == DELAY)
  294. udelay(HALT_CHECK_DELAY_US);
  295. else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
  296. int count;
  297. /* Use a memory barrier since some halt status registers are
  298. * not within the same 1K segment as the branch/root enable
  299. * registers. */
  300. mb();
  301. /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
  302. for (count = HALT_CHECK_MAX_LOOPS; !local_clk_is_halted(id)
  303. && count > 0; count--)
  304. udelay(1);
  305. if (count == 0)
  306. pr_warning("%s: clock %d status bit stuck on\n",
  307. __func__, id);
  308. }
  309. /* Disable root. */
  310. if (clk->root_en_mask) {
  311. reg_val &= ~(clk->root_en_mask);
  312. writel(reg_val, reg);
  313. }
  314. /* Disable MN counter, if applicable. */
  315. if (clk->type == MND) {
  316. reg_val &= ~(clk->current_freq->mnd_en_mask);
  317. writel(reg_val, reg);
  318. }
  319. }
  320. /* Enable a clock with no locking, enabling parent clocks as needed. */
  321. static int local_clk_enable_nolock(unsigned id)
  322. {
  323. struct clk_local *clk = &soc_clk_local_tbl[id];
  324. int rc = 0;
  325. if (clk->type == RESET)
  326. return -EPERM;
  327. if (!clk->count) {
  328. rc = local_vote_sys_vdd(clk->current_freq->sys_vdd);
  329. if (rc)
  330. goto err_vdd;
  331. if (clk->parent != C(NONE)) {
  332. rc = local_clk_enable_nolock(clk->parent);
  333. if (rc)
  334. goto err_par;
  335. }
  336. rc = local_src_enable(clk->current_freq->src);
  337. if (rc)
  338. goto err_src;
  339. local_clk_enable_reg(id);
  340. }
  341. clk->count++;
  342. return rc;
  343. err_src:
  344. if (clk->parent != C(NONE))
  345. rc = local_clk_disable_nolock(clk->parent);
  346. err_par:
  347. local_unvote_sys_vdd(clk->current_freq->sys_vdd);
  348. err_vdd:
  349. return rc;
  350. }
  351. /* Disable a clock with no locking, disabling unused parents, too. */
  352. static int local_clk_disable_nolock(unsigned id)
  353. {
  354. struct clk_local *clk = &soc_clk_local_tbl[id];
  355. int rc = 0;
  356. if (clk->count > 0)
  357. clk->count--;
  358. else {
  359. pr_warning("%s: Reference counts are incorrect for clock %d!\n",
  360. __func__, id);
  361. return rc;
  362. }
  363. if (clk->count == 0) {
  364. local_clk_disable_reg(id);
  365. rc = local_src_disable(clk->current_freq->src);
  366. if (rc)
  367. goto err_src;
  368. if (clk->parent != C(NONE))
  369. rc = local_clk_disable_nolock(clk->parent);
  370. if (rc)
  371. goto err_par;
  372. rc = local_unvote_sys_vdd(clk->current_freq->sys_vdd);
  373. if (rc)
  374. goto err_vdd;
  375. }
  376. return rc;
  377. err_vdd:
  378. if (clk->parent != C(NONE))
  379. rc = local_clk_enable_nolock(clk->parent);
  380. err_par:
  381. local_src_enable(clk->current_freq->src);
  382. err_src:
  383. local_clk_enable_reg(id);
  384. clk->count++;
  385. return rc;
  386. }
  387. /* Enable a clock and any related power rail. */
  388. int local_clk_enable(unsigned id)
  389. {
  390. int rc = 0;
  391. unsigned long flags;
  392. spin_lock_irqsave(&local_clock_reg_lock, flags);
  393. rc = local_clk_enable_nolock(id);
  394. if (rc)
  395. goto unlock;
  396. /*
  397. * With remote rail control, the remote processor might modify
  398. * the clock control register when the rail is enabled/disabled.
  399. * Enable the rail inside the lock to protect against this.
  400. */
  401. rc = soc_set_pwr_rail(id, 1);
  402. if (rc)
  403. local_clk_disable_nolock(id);
  404. unlock:
  405. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  406. return rc;
  407. }
  408. /* Disable a clock and any related power rail. */
  409. void local_clk_disable(unsigned id)
  410. {
  411. unsigned long flags;
  412. spin_lock_irqsave(&local_clock_reg_lock, flags);
  413. soc_set_pwr_rail(id, 0);
  414. local_clk_disable_nolock(id);
  415. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  416. return;
  417. }
  418. /* Turn off a clock at boot, without checking refcounts or disabling parents. */
  419. void local_clk_auto_off(unsigned id)
  420. {
  421. unsigned long flags;
  422. spin_lock_irqsave(&local_clock_reg_lock, flags);
  423. local_clk_disable_reg(id);
  424. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  425. }
  426. /*
  427. * Frequency-related functions
  428. */
  429. /* Set a clock's frequency. */
  430. static int _local_clk_set_rate(unsigned id, struct clk_freq_tbl *nf)
  431. {
  432. struct clk_local *clk = &soc_clk_local_tbl[id];
  433. struct clk_freq_tbl *cf;
  434. const int32_t *chld = clk->children;
  435. int i, rc = 0;
  436. unsigned long flags;
  437. spin_lock_irqsave(&local_clock_reg_lock, flags);
  438. /* Check if frequency is actually changed. */
  439. cf = clk->current_freq;
  440. if (nf == cf)
  441. goto release_lock;
  442. /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
  443. if (clk->banked_mnd_masks == NULL) {
  444. /* Disable all branches to prevent glitches. */
  445. for (i = 0; chld && chld[i] != C(NONE); i++) {
  446. struct clk_local *ch = &soc_clk_local_tbl[chld[i]];
  447. /* Don't bother turning off if it is already off.
  448. * Checking ch->count is cheaper (cache) than reading
  449. * and writing to a register (uncached/unbuffered). */
  450. if (ch->count)
  451. local_clk_disable_reg(chld[i]);
  452. }
  453. if (clk->count)
  454. local_clk_disable_reg(id);
  455. }
  456. if (clk->count) {
  457. /* Vote for voltage and source for new freq. */
  458. rc = local_vote_sys_vdd(nf->sys_vdd);
  459. if (rc)
  460. goto sys_vdd_vote_failed;
  461. rc = local_src_enable(nf->src);
  462. if (rc) {
  463. local_unvote_sys_vdd(nf->sys_vdd);
  464. goto src_enable_failed;
  465. }
  466. }
  467. /* Perform clock-specific frequency switch operations. */
  468. BUG_ON(!clk->set_rate);
  469. clk->set_rate(clk, nf);
  470. /* Release requirements of the old freq. */
  471. if (clk->count) {
  472. local_src_disable(cf->src);
  473. local_unvote_sys_vdd(cf->sys_vdd);
  474. }
  475. /* Current freq must be updated before local_clk_enable_reg()
  476. * is called to make sure the MNCNTR_EN bit is set correctly. */
  477. clk->current_freq = nf;
  478. src_enable_failed:
  479. sys_vdd_vote_failed:
  480. /* Enable any clocks that were disabled. */
  481. if (clk->banked_mnd_masks == NULL) {
  482. if (clk->count)
  483. local_clk_enable_reg(id);
  484. /* Enable only branches that were ON before. */
  485. for (i = 0; chld && chld[i] != C(NONE); i++) {
  486. struct clk_local *ch = &soc_clk_local_tbl[chld[i]];
  487. if (ch->count)
  488. local_clk_enable_reg(chld[i]);
  489. }
  490. }
  491. release_lock:
  492. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  493. return rc;
  494. }
  495. /* Set a clock to an exact rate. */
  496. int local_clk_set_rate(unsigned id, unsigned rate)
  497. {
  498. struct clk_local *clk = &soc_clk_local_tbl[id];
  499. struct clk_freq_tbl *nf;
  500. if (clk->type == NORATE || clk->type == RESET)
  501. return -EPERM;
  502. for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
  503. && nf->freq_hz != rate; nf++)
  504. ;
  505. if (nf->freq_hz == FREQ_END)
  506. return -EINVAL;
  507. return _local_clk_set_rate(id, nf);
  508. }
  509. /* Set a clock to a rate greater than some minimum. */
  510. int local_clk_set_min_rate(unsigned id, unsigned rate)
  511. {
  512. struct clk_local *clk = &soc_clk_local_tbl[id];
  513. struct clk_freq_tbl *nf;
  514. if (clk->type == NORATE || clk->type == RESET)
  515. return -EPERM;
  516. for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
  517. && nf->freq_hz < rate; nf++)
  518. ;
  519. if (nf->freq_hz == FREQ_END)
  520. return -EINVAL;
  521. return _local_clk_set_rate(id, nf);
  522. }
  523. /* Set a clock to a maximum rate. */
  524. int local_clk_set_max_rate(unsigned id, unsigned rate)
  525. {
  526. return -EPERM;
  527. }
  528. /* Get the currently-set rate of a clock in Hz. */
  529. unsigned local_clk_get_rate(unsigned id)
  530. {
  531. struct clk_local *clk = &soc_clk_local_tbl[id];
  532. unsigned long flags;
  533. unsigned ret = 0;
  534. if (clk->type == NORATE || clk->type == RESET)
  535. return 0;
  536. spin_lock_irqsave(&local_clock_reg_lock, flags);
  537. ret = clk->current_freq->freq_hz;
  538. spin_unlock_irqrestore(&local_clock_reg_lock, flags);
  539. /* Return 0 if the rate has never been set. Might not be correct,
  540. * but it's good enough. */
  541. if (ret == FREQ_END)
  542. ret = 0;
  543. return ret;
  544. }
  545. /* Check if a clock is currently enabled. */
  546. unsigned local_clk_is_enabled(unsigned id)
  547. {
  548. struct clk_local *clk = &soc_clk_local_tbl[id];
  549. if (clk->type == RESET)
  550. return -EPERM;
  551. return !!(soc_clk_local_tbl[id].count);
  552. }
  553. /* Return a supported rate that's at least the specified rate. */
  554. long local_clk_round_rate(unsigned id, unsigned rate)
  555. {
  556. struct clk_local *clk = &soc_clk_local_tbl[id];
  557. struct clk_freq_tbl *f;
  558. if (clk->type == NORATE || clk->type == RESET)
  559. return -EINVAL;
  560. for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
  561. if (f->freq_hz >= rate)
  562. return f->freq_hz;
  563. return -EPERM;
  564. }
  565. /* Return the nth supported frequency for a given clock. */
  566. int local_clk_list_rate(unsigned id, unsigned n)
  567. {
  568. struct clk_local *clk = &soc_clk_local_tbl[id];
  569. if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
  570. return -ENXIO;
  571. return (clk->freq_tbl + n)->freq_hz;
  572. }