PageRenderTime 28ms CodeModel.GetById 29ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/video/omap2/dss/dss.c

https://github.com/ssvb/linux-n810
C | 641 lines | 447 code | 150 blank | 44 comment | 65 complexity | ee48ee04bb7185ce637d926504d5daca MD5 | raw file
  1. /*
  2. * linux/drivers/video/omap2/dss/dss.c
  3. *
  4. * Copyright (C) 2009 Nokia Corporation
  5. * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
  6. *
  7. * Some code and ideas taken from drivers/video/omap/ driver
  8. * by Imre Deak.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License version 2 as published by
  12. * the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #define DSS_SUBSYS_NAME "DSS"
  23. #include <linux/kernel.h>
  24. #include <linux/io.h>
  25. #include <linux/err.h>
  26. #include <linux/delay.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/clk.h>
  30. #include <plat/display.h>
  31. #include "dss.h"
  32. #define DSS_BASE 0x48050000
  33. #define DSS_SZ_REGS SZ_512
  34. struct dss_reg {
  35. u16 idx;
  36. };
  37. #define DSS_REG(idx) ((const struct dss_reg) { idx })
  38. #define DSS_REVISION DSS_REG(0x0000)
  39. #define DSS_SYSCONFIG DSS_REG(0x0010)
  40. #define DSS_SYSSTATUS DSS_REG(0x0014)
  41. #define DSS_IRQSTATUS DSS_REG(0x0018)
  42. #define DSS_CONTROL DSS_REG(0x0040)
  43. #define DSS_SDI_CONTROL DSS_REG(0x0044)
  44. #define DSS_PLL_CONTROL DSS_REG(0x0048)
  45. #define DSS_SDI_STATUS DSS_REG(0x005C)
  46. #define REG_GET(idx, start, end) \
  47. FLD_GET(dss_read_reg(idx), start, end)
  48. #define REG_FLD_MOD(idx, val, start, end) \
  49. dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
  50. static struct {
  51. void __iomem *base;
  52. struct clk *dpll4_m4_ck;
  53. unsigned long cache_req_pck;
  54. unsigned long cache_prate;
  55. struct dss_clock_info cache_dss_cinfo;
  56. struct dispc_clock_info cache_dispc_cinfo;
  57. enum dss_clk_source dsi_clk_source;
  58. enum dss_clk_source dispc_clk_source;
  59. u32 ctx[DSS_SZ_REGS / sizeof(u32)];
  60. } dss;
  61. static int _omap_dss_wait_reset(void);
  62. static inline void dss_write_reg(const struct dss_reg idx, u32 val)
  63. {
  64. __raw_writel(val, dss.base + idx.idx);
  65. }
  66. static inline u32 dss_read_reg(const struct dss_reg idx)
  67. {
  68. return __raw_readl(dss.base + idx.idx);
  69. }
  70. #define SR(reg) \
  71. dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
  72. #define RR(reg) \
  73. dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
  74. void dss_save_context(void)
  75. {
  76. if (cpu_is_omap24xx())
  77. return;
  78. SR(SYSCONFIG);
  79. SR(CONTROL);
  80. #ifdef CONFIG_OMAP2_DSS_SDI
  81. SR(SDI_CONTROL);
  82. SR(PLL_CONTROL);
  83. #endif
  84. }
  85. void dss_restore_context(void)
  86. {
  87. if (_omap_dss_wait_reset())
  88. DSSERR("DSS not coming out of reset after sleep\n");
  89. RR(SYSCONFIG);
  90. RR(CONTROL);
  91. #ifdef CONFIG_OMAP2_DSS_SDI
  92. RR(SDI_CONTROL);
  93. RR(PLL_CONTROL);
  94. #endif
  95. }
  96. #undef SR
  97. #undef RR
  98. void dss_sdi_init(u8 datapairs)
  99. {
  100. u32 l;
  101. BUG_ON(datapairs > 3 || datapairs < 1);
  102. l = dss_read_reg(DSS_SDI_CONTROL);
  103. l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
  104. l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
  105. l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
  106. dss_write_reg(DSS_SDI_CONTROL, l);
  107. l = dss_read_reg(DSS_PLL_CONTROL);
  108. l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
  109. l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
  110. l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
  111. dss_write_reg(DSS_PLL_CONTROL, l);
  112. }
  113. int dss_sdi_enable(void)
  114. {
  115. unsigned long timeout;
  116. dispc_pck_free_enable(1);
  117. /* Reset SDI PLL */
  118. REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
  119. udelay(1); /* wait 2x PCLK */
  120. /* Lock SDI PLL */
  121. REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
  122. /* Waiting for PLL lock request to complete */
  123. timeout = jiffies + msecs_to_jiffies(500);
  124. while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
  125. if (time_after_eq(jiffies, timeout)) {
  126. DSSERR("PLL lock request timed out\n");
  127. goto err1;
  128. }
  129. }
  130. /* Clearing PLL_GO bit */
  131. REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
  132. /* Waiting for PLL to lock */
  133. timeout = jiffies + msecs_to_jiffies(500);
  134. while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
  135. if (time_after_eq(jiffies, timeout)) {
  136. DSSERR("PLL lock timed out\n");
  137. goto err1;
  138. }
  139. }
  140. dispc_lcd_enable_signal(1);
  141. /* Waiting for SDI reset to complete */
  142. timeout = jiffies + msecs_to_jiffies(500);
  143. while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
  144. if (time_after_eq(jiffies, timeout)) {
  145. DSSERR("SDI reset timed out\n");
  146. goto err2;
  147. }
  148. }
  149. return 0;
  150. err2:
  151. dispc_lcd_enable_signal(0);
  152. err1:
  153. /* Reset SDI PLL */
  154. REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
  155. dispc_pck_free_enable(0);
  156. return -ETIMEDOUT;
  157. }
  158. void dss_sdi_disable(void)
  159. {
  160. dispc_lcd_enable_signal(0);
  161. dispc_pck_free_enable(0);
  162. /* Reset SDI PLL */
  163. REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
  164. }
  165. void dss_dump_clocks(struct seq_file *s)
  166. {
  167. unsigned long dpll4_ck_rate;
  168. unsigned long dpll4_m4_ck_rate;
  169. dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
  170. dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
  171. dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
  172. seq_printf(s, "- DSS -\n");
  173. seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
  174. if (cpu_is_omap3630())
  175. seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
  176. dpll4_ck_rate,
  177. dpll4_ck_rate / dpll4_m4_ck_rate,
  178. dss_clk_get_rate(DSS_CLK_FCK1));
  179. else
  180. seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
  181. dpll4_ck_rate,
  182. dpll4_ck_rate / dpll4_m4_ck_rate,
  183. dss_clk_get_rate(DSS_CLK_FCK1));
  184. dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
  185. }
  186. void dss_dump_regs(struct seq_file *s)
  187. {
  188. #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
  189. dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
  190. DUMPREG(DSS_REVISION);
  191. DUMPREG(DSS_SYSCONFIG);
  192. DUMPREG(DSS_SYSSTATUS);
  193. DUMPREG(DSS_IRQSTATUS);
  194. DUMPREG(DSS_CONTROL);
  195. DUMPREG(DSS_SDI_CONTROL);
  196. DUMPREG(DSS_PLL_CONTROL);
  197. DUMPREG(DSS_SDI_STATUS);
  198. dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
  199. #undef DUMPREG
  200. }
  201. void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
  202. {
  203. int b;
  204. BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
  205. clk_src != DSS_SRC_DSS1_ALWON_FCLK);
  206. b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
  207. if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
  208. dsi_wait_dsi1_pll_active();
  209. REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
  210. dss.dispc_clk_source = clk_src;
  211. }
  212. void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
  213. {
  214. int b;
  215. BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
  216. clk_src != DSS_SRC_DSS1_ALWON_FCLK);
  217. b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
  218. if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
  219. dsi_wait_dsi2_pll_active();
  220. REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
  221. dss.dsi_clk_source = clk_src;
  222. }
  223. enum dss_clk_source dss_get_dispc_clk_source(void)
  224. {
  225. return dss.dispc_clk_source;
  226. }
  227. enum dss_clk_source dss_get_dsi_clk_source(void)
  228. {
  229. return dss.dsi_clk_source;
  230. }
  231. /* calculate clock rates using dividers in cinfo */
  232. int dss_calc_clock_rates(struct dss_clock_info *cinfo)
  233. {
  234. unsigned long prate;
  235. if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
  236. cinfo->fck_div == 0)
  237. return -EINVAL;
  238. prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
  239. cinfo->fck = prate / cinfo->fck_div;
  240. return 0;
  241. }
  242. int dss_set_clock_div(struct dss_clock_info *cinfo)
  243. {
  244. unsigned long prate;
  245. int r;
  246. if (cpu_is_omap34xx()) {
  247. prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
  248. DSSDBG("dpll4_m4 = %ld\n", prate);
  249. r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
  250. if (r)
  251. return r;
  252. }
  253. DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
  254. return 0;
  255. }
  256. int dss_get_clock_div(struct dss_clock_info *cinfo)
  257. {
  258. cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
  259. if (cpu_is_omap34xx()) {
  260. unsigned long prate;
  261. prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
  262. if (cpu_is_omap3630())
  263. cinfo->fck_div = prate / (cinfo->fck);
  264. else
  265. cinfo->fck_div = prate / (cinfo->fck / 2);
  266. } else {
  267. cinfo->fck_div = 0;
  268. }
  269. return 0;
  270. }
  271. unsigned long dss_get_dpll4_rate(void)
  272. {
  273. if (cpu_is_omap34xx())
  274. return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
  275. else
  276. return 0;
  277. }
  278. int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
  279. struct dss_clock_info *dss_cinfo,
  280. struct dispc_clock_info *dispc_cinfo)
  281. {
  282. unsigned long prate;
  283. struct dss_clock_info best_dss;
  284. struct dispc_clock_info best_dispc;
  285. unsigned long fck;
  286. u16 fck_div;
  287. int match = 0;
  288. int min_fck_per_pck;
  289. prate = dss_get_dpll4_rate();
  290. fck = dss_clk_get_rate(DSS_CLK_FCK1);
  291. if (req_pck == dss.cache_req_pck &&
  292. ((cpu_is_omap34xx() && prate == dss.cache_prate) ||
  293. dss.cache_dss_cinfo.fck == fck)) {
  294. DSSDBG("dispc clock info found from cache.\n");
  295. *dss_cinfo = dss.cache_dss_cinfo;
  296. *dispc_cinfo = dss.cache_dispc_cinfo;
  297. return 0;
  298. }
  299. min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
  300. if (min_fck_per_pck &&
  301. req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
  302. DSSERR("Requested pixel clock not possible with the current "
  303. "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
  304. "the constraint off.\n");
  305. min_fck_per_pck = 0;
  306. }
  307. retry:
  308. memset(&best_dss, 0, sizeof(best_dss));
  309. memset(&best_dispc, 0, sizeof(best_dispc));
  310. if (cpu_is_omap24xx()) {
  311. struct dispc_clock_info cur_dispc;
  312. /* XXX can we change the clock on omap2? */
  313. fck = dss_clk_get_rate(DSS_CLK_FCK1);
  314. fck_div = 1;
  315. dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
  316. match = 1;
  317. best_dss.fck = fck;
  318. best_dss.fck_div = fck_div;
  319. best_dispc = cur_dispc;
  320. goto found;
  321. } else if (cpu_is_omap34xx()) {
  322. for (fck_div = (cpu_is_omap3630() ? 32 : 16);
  323. fck_div > 0; --fck_div) {
  324. struct dispc_clock_info cur_dispc;
  325. if (cpu_is_omap3630())
  326. fck = prate / fck_div;
  327. else
  328. fck = prate / fck_div * 2;
  329. if (fck > DISPC_MAX_FCK)
  330. continue;
  331. if (min_fck_per_pck &&
  332. fck < req_pck * min_fck_per_pck)
  333. continue;
  334. match = 1;
  335. dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
  336. if (abs(cur_dispc.pck - req_pck) <
  337. abs(best_dispc.pck - req_pck)) {
  338. best_dss.fck = fck;
  339. best_dss.fck_div = fck_div;
  340. best_dispc = cur_dispc;
  341. if (cur_dispc.pck == req_pck)
  342. goto found;
  343. }
  344. }
  345. } else {
  346. BUG();
  347. }
  348. found:
  349. if (!match) {
  350. if (min_fck_per_pck) {
  351. DSSERR("Could not find suitable clock settings.\n"
  352. "Turning FCK/PCK constraint off and"
  353. "trying again.\n");
  354. min_fck_per_pck = 0;
  355. goto retry;
  356. }
  357. DSSERR("Could not find suitable clock settings.\n");
  358. return -EINVAL;
  359. }
  360. if (dss_cinfo)
  361. *dss_cinfo = best_dss;
  362. if (dispc_cinfo)
  363. *dispc_cinfo = best_dispc;
  364. dss.cache_req_pck = req_pck;
  365. dss.cache_prate = prate;
  366. dss.cache_dss_cinfo = best_dss;
  367. dss.cache_dispc_cinfo = best_dispc;
  368. return 0;
  369. }
  370. static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
  371. {
  372. dispc_irq_handler();
  373. return IRQ_HANDLED;
  374. }
  375. static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
  376. {
  377. u32 irqstatus;
  378. irqstatus = dss_read_reg(DSS_IRQSTATUS);
  379. if (irqstatus & (1<<0)) /* DISPC_IRQ */
  380. dispc_irq_handler();
  381. #ifdef CONFIG_OMAP2_DSS_DSI
  382. if (irqstatus & (1<<1)) /* DSI_IRQ */
  383. dsi_irq_handler();
  384. #endif
  385. return IRQ_HANDLED;
  386. }
  387. static int _omap_dss_wait_reset(void)
  388. {
  389. int t = 0;
  390. while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
  391. if (++t > 1000) {
  392. DSSERR("soft reset failed\n");
  393. return -ENODEV;
  394. }
  395. udelay(1);
  396. }
  397. return 0;
  398. }
  399. static int _omap_dss_reset(void)
  400. {
  401. /* Soft reset */
  402. REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
  403. return _omap_dss_wait_reset();
  404. }
  405. void dss_set_venc_output(enum omap_dss_venc_type type)
  406. {
  407. int l = 0;
  408. if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
  409. l = 0;
  410. else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
  411. l = 1;
  412. else
  413. BUG();
  414. /* venc out selection. 0 = comp, 1 = svideo */
  415. REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
  416. }
  417. void dss_set_dac_pwrdn_bgz(bool enable)
  418. {
  419. REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
  420. }
  421. int dss_init(bool skip_init)
  422. {
  423. int r;
  424. u32 rev;
  425. dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
  426. if (!dss.base) {
  427. DSSERR("can't ioremap DSS\n");
  428. r = -ENOMEM;
  429. goto fail0;
  430. }
  431. if (!skip_init) {
  432. /* disable LCD and DIGIT output. This seems to fix the synclost
  433. * problem that we get, if the bootloader starts the DSS and
  434. * the kernel resets it */
  435. omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
  436. /* We need to wait here a bit, otherwise we sometimes start to
  437. * get synclost errors, and after that only power cycle will
  438. * restore DSS functionality. I have no idea why this happens.
  439. * And we have to wait _before_ resetting the DSS, but after
  440. * enabling clocks.
  441. */
  442. msleep(50);
  443. _omap_dss_reset();
  444. }
  445. /* autoidle */
  446. REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
  447. /* Select DPLL */
  448. REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
  449. #ifdef CONFIG_OMAP2_DSS_VENC
  450. REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
  451. REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
  452. REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
  453. #endif
  454. r = request_irq(INT_24XX_DSS_IRQ,
  455. cpu_is_omap24xx()
  456. ? dss_irq_handler_omap2
  457. : dss_irq_handler_omap3,
  458. 0, "OMAP DSS", NULL);
  459. if (r < 0) {
  460. DSSERR("omap2 dss: request_irq failed\n");
  461. goto fail1;
  462. }
  463. if (cpu_is_omap34xx()) {
  464. dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
  465. if (IS_ERR(dss.dpll4_m4_ck)) {
  466. DSSERR("Failed to get dpll4_m4_ck\n");
  467. r = PTR_ERR(dss.dpll4_m4_ck);
  468. goto fail2;
  469. }
  470. }
  471. dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
  472. dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
  473. dss_save_context();
  474. rev = dss_read_reg(DSS_REVISION);
  475. printk(KERN_INFO "OMAP DSS rev %d.%d\n",
  476. FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
  477. return 0;
  478. fail2:
  479. free_irq(INT_24XX_DSS_IRQ, NULL);
  480. fail1:
  481. iounmap(dss.base);
  482. fail0:
  483. return r;
  484. }
  485. void dss_exit(void)
  486. {
  487. if (cpu_is_omap34xx())
  488. clk_put(dss.dpll4_m4_ck);
  489. free_irq(INT_24XX_DSS_IRQ, NULL);
  490. iounmap(dss.base);
  491. }