PageRenderTime 51ms CodeModel.GetById 22ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/arm/mach-pnx4008/dma.c

https://bitbucket.org/sola/android_board_snowball_kernel
C | 1106 lines | 926 code | 159 blank | 21 comment | 128 complexity | 9a3c38a013f72a8f829a951978365adc MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * linux/arch/arm/mach-pnx4008/dma.c
  3. *
  4. * PNX4008 DMA registration and IRQ dispatching
  5. *
  6. * Author: Vitaly Wool
  7. * Copyright: MontaVista Software Inc. (c) 2005
  8. *
  9. * Based on the code from Nicolas Pitre
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/errno.h>
  20. #include <linux/err.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/clk.h>
  23. #include <linux/io.h>
  24. #include <linux/gfp.h>
  25. #include <asm/system.h>
  26. #include <mach/hardware.h>
  27. #include <mach/dma.h>
  28. #include <asm/dma-mapping.h>
  29. #include <mach/clock.h>
  30. static struct dma_channel {
  31. char *name;
  32. void (*irq_handler) (int, int, void *);
  33. void *data;
  34. struct pnx4008_dma_ll *ll;
  35. u32 ll_dma;
  36. void *target_addr;
  37. int target_id;
  38. } dma_channels[MAX_DMA_CHANNELS];
  39. static struct ll_pool {
  40. void *vaddr;
  41. void *cur;
  42. dma_addr_t dma_addr;
  43. int count;
  44. } ll_pool;
  45. static DEFINE_SPINLOCK(ll_lock);
  46. struct pnx4008_dma_ll *pnx4008_alloc_ll_entry(dma_addr_t * ll_dma)
  47. {
  48. struct pnx4008_dma_ll *ll = NULL;
  49. unsigned long flags;
  50. spin_lock_irqsave(&ll_lock, flags);
  51. if (ll_pool.count > 4) { /* can give one more */
  52. ll = *(struct pnx4008_dma_ll **) ll_pool.cur;
  53. *ll_dma = ll_pool.dma_addr + ((void *)ll - ll_pool.vaddr);
  54. *(void **)ll_pool.cur = **(void ***)ll_pool.cur;
  55. memset(ll, 0, sizeof(*ll));
  56. ll_pool.count--;
  57. }
  58. spin_unlock_irqrestore(&ll_lock, flags);
  59. return ll;
  60. }
  61. EXPORT_SYMBOL_GPL(pnx4008_alloc_ll_entry);
  62. void pnx4008_free_ll_entry(struct pnx4008_dma_ll * ll, dma_addr_t ll_dma)
  63. {
  64. unsigned long flags;
  65. if (ll) {
  66. if ((unsigned long)((long)ll - (long)ll_pool.vaddr) > 0x4000) {
  67. printk(KERN_ERR "Trying to free entry not allocated by DMA\n");
  68. BUG();
  69. }
  70. if (ll->flags & DMA_BUFFER_ALLOCATED)
  71. ll->free(ll->alloc_data);
  72. spin_lock_irqsave(&ll_lock, flags);
  73. *(long *)ll = *(long *)ll_pool.cur;
  74. *(long *)ll_pool.cur = (long)ll;
  75. ll_pool.count++;
  76. spin_unlock_irqrestore(&ll_lock, flags);
  77. }
  78. }
  79. EXPORT_SYMBOL_GPL(pnx4008_free_ll_entry);
  80. void pnx4008_free_ll(u32 ll_dma, struct pnx4008_dma_ll * ll)
  81. {
  82. struct pnx4008_dma_ll *ptr;
  83. u32 dma;
  84. while (ll) {
  85. dma = ll->next_dma;
  86. ptr = ll->next;
  87. pnx4008_free_ll_entry(ll, ll_dma);
  88. ll_dma = dma;
  89. ll = ptr;
  90. }
  91. }
  92. EXPORT_SYMBOL_GPL(pnx4008_free_ll);
  93. static int dma_channels_requested = 0;
  94. static inline void dma_increment_usage(void)
  95. {
  96. if (!dma_channels_requested++) {
  97. struct clk *clk = clk_get(0, "dma_ck");
  98. if (!IS_ERR(clk)) {
  99. clk_set_rate(clk, 1);
  100. clk_put(clk);
  101. }
  102. pnx4008_config_dma(-1, -1, 1);
  103. }
  104. }
  105. static inline void dma_decrement_usage(void)
  106. {
  107. if (!--dma_channels_requested) {
  108. struct clk *clk = clk_get(0, "dma_ck");
  109. if (!IS_ERR(clk)) {
  110. clk_set_rate(clk, 0);
  111. clk_put(clk);
  112. }
  113. pnx4008_config_dma(-1, -1, 0);
  114. }
  115. }
  116. static DEFINE_SPINLOCK(dma_lock);
  117. static inline void pnx4008_dma_lock(void)
  118. {
  119. spin_lock_irq(&dma_lock);
  120. }
  121. static inline void pnx4008_dma_unlock(void)
  122. {
  123. spin_unlock_irq(&dma_lock);
  124. }
  125. #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
  126. int pnx4008_request_channel(char *name, int ch,
  127. void (*irq_handler) (int, int, void *), void *data)
  128. {
  129. int i, found = 0;
  130. /* basic sanity checks */
  131. if (!name || (ch != -1 && !VALID_CHANNEL(ch)))
  132. return -EINVAL;
  133. pnx4008_dma_lock();
  134. /* try grabbing a DMA channel with the requested priority */
  135. for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
  136. if (!dma_channels[i].name && (ch == -1 || ch == i)) {
  137. found = 1;
  138. break;
  139. }
  140. }
  141. if (found) {
  142. dma_increment_usage();
  143. dma_channels[i].name = name;
  144. dma_channels[i].irq_handler = irq_handler;
  145. dma_channels[i].data = data;
  146. dma_channels[i].ll = NULL;
  147. dma_channels[i].ll_dma = 0;
  148. } else {
  149. printk(KERN_WARNING "No more available DMA channels for %s\n",
  150. name);
  151. i = -ENODEV;
  152. }
  153. pnx4008_dma_unlock();
  154. return i;
  155. }
  156. EXPORT_SYMBOL_GPL(pnx4008_request_channel);
  157. void pnx4008_free_channel(int ch)
  158. {
  159. if (!dma_channels[ch].name) {
  160. printk(KERN_CRIT
  161. "%s: trying to free channel %d which is already freed\n",
  162. __func__, ch);
  163. return;
  164. }
  165. pnx4008_dma_lock();
  166. pnx4008_free_ll(dma_channels[ch].ll_dma, dma_channels[ch].ll);
  167. dma_channels[ch].ll = NULL;
  168. dma_decrement_usage();
  169. dma_channels[ch].name = NULL;
  170. pnx4008_dma_unlock();
  171. }
  172. EXPORT_SYMBOL_GPL(pnx4008_free_channel);
  173. int pnx4008_config_dma(int ahb_m1_be, int ahb_m2_be, int enable)
  174. {
  175. unsigned long dma_cfg = __raw_readl(DMAC_CONFIG);
  176. switch (ahb_m1_be) {
  177. case 0:
  178. dma_cfg &= ~(1 << 1);
  179. break;
  180. case 1:
  181. dma_cfg |= (1 << 1);
  182. break;
  183. default:
  184. break;
  185. }
  186. switch (ahb_m2_be) {
  187. case 0:
  188. dma_cfg &= ~(1 << 2);
  189. break;
  190. case 1:
  191. dma_cfg |= (1 << 2);
  192. break;
  193. default:
  194. break;
  195. }
  196. switch (enable) {
  197. case 0:
  198. dma_cfg &= ~(1 << 0);
  199. break;
  200. case 1:
  201. dma_cfg |= (1 << 0);
  202. break;
  203. default:
  204. break;
  205. }
  206. pnx4008_dma_lock();
  207. __raw_writel(dma_cfg, DMAC_CONFIG);
  208. pnx4008_dma_unlock();
  209. return 0;
  210. }
  211. EXPORT_SYMBOL_GPL(pnx4008_config_dma);
  212. int pnx4008_dma_pack_control(const struct pnx4008_dma_ch_ctrl * ch_ctrl,
  213. unsigned long *ctrl)
  214. {
  215. int i = 0, dbsize, sbsize, err = 0;
  216. if (!ctrl || !ch_ctrl) {
  217. err = -EINVAL;
  218. goto out;
  219. }
  220. *ctrl = 0;
  221. switch (ch_ctrl->tc_mask) {
  222. case 0:
  223. break;
  224. case 1:
  225. *ctrl |= (1 << 31);
  226. break;
  227. default:
  228. err = -EINVAL;
  229. goto out;
  230. }
  231. switch (ch_ctrl->cacheable) {
  232. case 0:
  233. break;
  234. case 1:
  235. *ctrl |= (1 << 30);
  236. break;
  237. default:
  238. err = -EINVAL;
  239. goto out;
  240. }
  241. switch (ch_ctrl->bufferable) {
  242. case 0:
  243. break;
  244. case 1:
  245. *ctrl |= (1 << 29);
  246. break;
  247. default:
  248. err = -EINVAL;
  249. goto out;
  250. }
  251. switch (ch_ctrl->priv_mode) {
  252. case 0:
  253. break;
  254. case 1:
  255. *ctrl |= (1 << 28);
  256. break;
  257. default:
  258. err = -EINVAL;
  259. goto out;
  260. }
  261. switch (ch_ctrl->di) {
  262. case 0:
  263. break;
  264. case 1:
  265. *ctrl |= (1 << 27);
  266. break;
  267. default:
  268. err = -EINVAL;
  269. goto out;
  270. }
  271. switch (ch_ctrl->si) {
  272. case 0:
  273. break;
  274. case 1:
  275. *ctrl |= (1 << 26);
  276. break;
  277. default:
  278. err = -EINVAL;
  279. goto out;
  280. }
  281. switch (ch_ctrl->dest_ahb1) {
  282. case 0:
  283. break;
  284. case 1:
  285. *ctrl |= (1 << 25);
  286. break;
  287. default:
  288. err = -EINVAL;
  289. goto out;
  290. }
  291. switch (ch_ctrl->src_ahb1) {
  292. case 0:
  293. break;
  294. case 1:
  295. *ctrl |= (1 << 24);
  296. break;
  297. default:
  298. err = -EINVAL;
  299. goto out;
  300. }
  301. switch (ch_ctrl->dwidth) {
  302. case WIDTH_BYTE:
  303. *ctrl &= ~(7 << 21);
  304. break;
  305. case WIDTH_HWORD:
  306. *ctrl &= ~(7 << 21);
  307. *ctrl |= (1 << 21);
  308. break;
  309. case WIDTH_WORD:
  310. *ctrl &= ~(7 << 21);
  311. *ctrl |= (2 << 21);
  312. break;
  313. default:
  314. err = -EINVAL;
  315. goto out;
  316. }
  317. switch (ch_ctrl->swidth) {
  318. case WIDTH_BYTE:
  319. *ctrl &= ~(7 << 18);
  320. break;
  321. case WIDTH_HWORD:
  322. *ctrl &= ~(7 << 18);
  323. *ctrl |= (1 << 18);
  324. break;
  325. case WIDTH_WORD:
  326. *ctrl &= ~(7 << 18);
  327. *ctrl |= (2 << 18);
  328. break;
  329. default:
  330. err = -EINVAL;
  331. goto out;
  332. }
  333. dbsize = ch_ctrl->dbsize;
  334. while (!(dbsize & 1)) {
  335. i++;
  336. dbsize >>= 1;
  337. }
  338. if (ch_ctrl->dbsize != 1 || i > 8 || i == 1) {
  339. err = -EINVAL;
  340. goto out;
  341. } else if (i > 1)
  342. i--;
  343. *ctrl &= ~(7 << 15);
  344. *ctrl |= (i << 15);
  345. sbsize = ch_ctrl->sbsize;
  346. while (!(sbsize & 1)) {
  347. i++;
  348. sbsize >>= 1;
  349. }
  350. if (ch_ctrl->sbsize != 1 || i > 8 || i == 1) {
  351. err = -EINVAL;
  352. goto out;
  353. } else if (i > 1)
  354. i--;
  355. *ctrl &= ~(7 << 12);
  356. *ctrl |= (i << 12);
  357. if (ch_ctrl->tr_size > 0x7ff) {
  358. err = -E2BIG;
  359. goto out;
  360. }
  361. *ctrl &= ~0x7ff;
  362. *ctrl |= ch_ctrl->tr_size & 0x7ff;
  363. out:
  364. return err;
  365. }
  366. EXPORT_SYMBOL_GPL(pnx4008_dma_pack_control);
  367. int pnx4008_dma_parse_control(unsigned long ctrl,
  368. struct pnx4008_dma_ch_ctrl * ch_ctrl)
  369. {
  370. int err = 0;
  371. if (!ch_ctrl) {
  372. err = -EINVAL;
  373. goto out;
  374. }
  375. ch_ctrl->tr_size = ctrl & 0x7ff;
  376. ctrl >>= 12;
  377. ch_ctrl->sbsize = 1 << (ctrl & 7);
  378. if (ch_ctrl->sbsize > 1)
  379. ch_ctrl->sbsize <<= 1;
  380. ctrl >>= 3;
  381. ch_ctrl->dbsize = 1 << (ctrl & 7);
  382. if (ch_ctrl->dbsize > 1)
  383. ch_ctrl->dbsize <<= 1;
  384. ctrl >>= 3;
  385. switch (ctrl & 7) {
  386. case 0:
  387. ch_ctrl->swidth = WIDTH_BYTE;
  388. break;
  389. case 1:
  390. ch_ctrl->swidth = WIDTH_HWORD;
  391. break;
  392. case 2:
  393. ch_ctrl->swidth = WIDTH_WORD;
  394. break;
  395. default:
  396. err = -EINVAL;
  397. goto out;
  398. }
  399. ctrl >>= 3;
  400. switch (ctrl & 7) {
  401. case 0:
  402. ch_ctrl->dwidth = WIDTH_BYTE;
  403. break;
  404. case 1:
  405. ch_ctrl->dwidth = WIDTH_HWORD;
  406. break;
  407. case 2:
  408. ch_ctrl->dwidth = WIDTH_WORD;
  409. break;
  410. default:
  411. err = -EINVAL;
  412. goto out;
  413. }
  414. ctrl >>= 3;
  415. ch_ctrl->src_ahb1 = ctrl & 1;
  416. ctrl >>= 1;
  417. ch_ctrl->dest_ahb1 = ctrl & 1;
  418. ctrl >>= 1;
  419. ch_ctrl->si = ctrl & 1;
  420. ctrl >>= 1;
  421. ch_ctrl->di = ctrl & 1;
  422. ctrl >>= 1;
  423. ch_ctrl->priv_mode = ctrl & 1;
  424. ctrl >>= 1;
  425. ch_ctrl->bufferable = ctrl & 1;
  426. ctrl >>= 1;
  427. ch_ctrl->cacheable = ctrl & 1;
  428. ctrl >>= 1;
  429. ch_ctrl->tc_mask = ctrl & 1;
  430. out:
  431. return err;
  432. }
  433. EXPORT_SYMBOL_GPL(pnx4008_dma_parse_control);
  434. int pnx4008_dma_pack_config(const struct pnx4008_dma_ch_config * ch_cfg,
  435. unsigned long *cfg)
  436. {
  437. int err = 0;
  438. if (!cfg || !ch_cfg) {
  439. err = -EINVAL;
  440. goto out;
  441. }
  442. *cfg = 0;
  443. switch (ch_cfg->halt) {
  444. case 0:
  445. break;
  446. case 1:
  447. *cfg |= (1 << 18);
  448. break;
  449. default:
  450. err = -EINVAL;
  451. goto out;
  452. }
  453. switch (ch_cfg->active) {
  454. case 0:
  455. break;
  456. case 1:
  457. *cfg |= (1 << 17);
  458. break;
  459. default:
  460. err = -EINVAL;
  461. goto out;
  462. }
  463. switch (ch_cfg->lock) {
  464. case 0:
  465. break;
  466. case 1:
  467. *cfg |= (1 << 16);
  468. break;
  469. default:
  470. err = -EINVAL;
  471. goto out;
  472. }
  473. switch (ch_cfg->itc) {
  474. case 0:
  475. break;
  476. case 1:
  477. *cfg |= (1 << 15);
  478. break;
  479. default:
  480. err = -EINVAL;
  481. goto out;
  482. }
  483. switch (ch_cfg->ie) {
  484. case 0:
  485. break;
  486. case 1:
  487. *cfg |= (1 << 14);
  488. break;
  489. default:
  490. err = -EINVAL;
  491. goto out;
  492. }
  493. switch (ch_cfg->flow_cntrl) {
  494. case FC_MEM2MEM_DMA:
  495. *cfg &= ~(7 << 11);
  496. break;
  497. case FC_MEM2PER_DMA:
  498. *cfg &= ~(7 << 11);
  499. *cfg |= (1 << 11);
  500. break;
  501. case FC_PER2MEM_DMA:
  502. *cfg &= ~(7 << 11);
  503. *cfg |= (2 << 11);
  504. break;
  505. case FC_PER2PER_DMA:
  506. *cfg &= ~(7 << 11);
  507. *cfg |= (3 << 11);
  508. break;
  509. case FC_PER2PER_DPER:
  510. *cfg &= ~(7 << 11);
  511. *cfg |= (4 << 11);
  512. break;
  513. case FC_MEM2PER_PER:
  514. *cfg &= ~(7 << 11);
  515. *cfg |= (5 << 11);
  516. break;
  517. case FC_PER2MEM_PER:
  518. *cfg &= ~(7 << 11);
  519. *cfg |= (6 << 11);
  520. break;
  521. case FC_PER2PER_SPER:
  522. *cfg |= (7 << 11);
  523. break;
  524. default:
  525. err = -EINVAL;
  526. goto out;
  527. }
  528. *cfg &= ~(0x1f << 6);
  529. *cfg |= ((ch_cfg->dest_per & 0x1f) << 6);
  530. *cfg &= ~(0x1f << 1);
  531. *cfg |= ((ch_cfg->src_per & 0x1f) << 1);
  532. out:
  533. return err;
  534. }
  535. EXPORT_SYMBOL_GPL(pnx4008_dma_pack_config);
  536. int pnx4008_dma_parse_config(unsigned long cfg,
  537. struct pnx4008_dma_ch_config * ch_cfg)
  538. {
  539. int err = 0;
  540. if (!ch_cfg) {
  541. err = -EINVAL;
  542. goto out;
  543. }
  544. cfg >>= 1;
  545. ch_cfg->src_per = cfg & 0x1f;
  546. cfg >>= 5;
  547. ch_cfg->dest_per = cfg & 0x1f;
  548. cfg >>= 5;
  549. switch (cfg & 7) {
  550. case 0:
  551. ch_cfg->flow_cntrl = FC_MEM2MEM_DMA;
  552. break;
  553. case 1:
  554. ch_cfg->flow_cntrl = FC_MEM2PER_DMA;
  555. break;
  556. case 2:
  557. ch_cfg->flow_cntrl = FC_PER2MEM_DMA;
  558. break;
  559. case 3:
  560. ch_cfg->flow_cntrl = FC_PER2PER_DMA;
  561. break;
  562. case 4:
  563. ch_cfg->flow_cntrl = FC_PER2PER_DPER;
  564. break;
  565. case 5:
  566. ch_cfg->flow_cntrl = FC_MEM2PER_PER;
  567. break;
  568. case 6:
  569. ch_cfg->flow_cntrl = FC_PER2MEM_PER;
  570. break;
  571. case 7:
  572. ch_cfg->flow_cntrl = FC_PER2PER_SPER;
  573. }
  574. cfg >>= 3;
  575. ch_cfg->ie = cfg & 1;
  576. cfg >>= 1;
  577. ch_cfg->itc = cfg & 1;
  578. cfg >>= 1;
  579. ch_cfg->lock = cfg & 1;
  580. cfg >>= 1;
  581. ch_cfg->active = cfg & 1;
  582. cfg >>= 1;
  583. ch_cfg->halt = cfg & 1;
  584. out:
  585. return err;
  586. }
  587. EXPORT_SYMBOL_GPL(pnx4008_dma_parse_config);
  588. void pnx4008_dma_split_head_entry(struct pnx4008_dma_config * config,
  589. struct pnx4008_dma_ch_ctrl * ctrl)
  590. {
  591. int new_len = ctrl->tr_size, num_entries = 0;
  592. int old_len = new_len;
  593. int src_width, dest_width, count = 1;
  594. switch (ctrl->swidth) {
  595. case WIDTH_BYTE:
  596. src_width = 1;
  597. break;
  598. case WIDTH_HWORD:
  599. src_width = 2;
  600. break;
  601. case WIDTH_WORD:
  602. src_width = 4;
  603. break;
  604. default:
  605. return;
  606. }
  607. switch (ctrl->dwidth) {
  608. case WIDTH_BYTE:
  609. dest_width = 1;
  610. break;
  611. case WIDTH_HWORD:
  612. dest_width = 2;
  613. break;
  614. case WIDTH_WORD:
  615. dest_width = 4;
  616. break;
  617. default:
  618. return;
  619. }
  620. while (new_len > 0x7FF) {
  621. num_entries++;
  622. new_len = (ctrl->tr_size + num_entries) / (num_entries + 1);
  623. }
  624. if (num_entries != 0) {
  625. struct pnx4008_dma_ll *ll = NULL;
  626. config->ch_ctrl &= ~0x7ff;
  627. config->ch_ctrl |= new_len;
  628. if (!config->is_ll) {
  629. config->is_ll = 1;
  630. while (num_entries) {
  631. if (!ll) {
  632. config->ll =
  633. pnx4008_alloc_ll_entry(&config->
  634. ll_dma);
  635. ll = config->ll;
  636. } else {
  637. ll->next =
  638. pnx4008_alloc_ll_entry(&ll->
  639. next_dma);
  640. ll = ll->next;
  641. }
  642. if (ctrl->si)
  643. ll->src_addr =
  644. config->src_addr +
  645. src_width * new_len * count;
  646. else
  647. ll->src_addr = config->src_addr;
  648. if (ctrl->di)
  649. ll->dest_addr =
  650. config->dest_addr +
  651. dest_width * new_len * count;
  652. else
  653. ll->dest_addr = config->dest_addr;
  654. ll->ch_ctrl = config->ch_ctrl & 0x7fffffff;
  655. ll->next_dma = 0;
  656. ll->next = NULL;
  657. num_entries--;
  658. count++;
  659. }
  660. } else {
  661. struct pnx4008_dma_ll *ll_old = config->ll;
  662. unsigned long ll_dma_old = config->ll_dma;
  663. while (num_entries) {
  664. if (!ll) {
  665. config->ll =
  666. pnx4008_alloc_ll_entry(&config->
  667. ll_dma);
  668. ll = config->ll;
  669. } else {
  670. ll->next =
  671. pnx4008_alloc_ll_entry(&ll->
  672. next_dma);
  673. ll = ll->next;
  674. }
  675. if (ctrl->si)
  676. ll->src_addr =
  677. config->src_addr +
  678. src_width * new_len * count;
  679. else
  680. ll->src_addr = config->src_addr;
  681. if (ctrl->di)
  682. ll->dest_addr =
  683. config->dest_addr +
  684. dest_width * new_len * count;
  685. else
  686. ll->dest_addr = config->dest_addr;
  687. ll->ch_ctrl = config->ch_ctrl & 0x7fffffff;
  688. ll->next_dma = 0;
  689. ll->next = NULL;
  690. num_entries--;
  691. count++;
  692. }
  693. ll->next_dma = ll_dma_old;
  694. ll->next = ll_old;
  695. }
  696. /* adjust last length/tc */
  697. ll->ch_ctrl = config->ch_ctrl & (~0x7ff);
  698. ll->ch_ctrl |= old_len - new_len * (count - 1);
  699. config->ch_ctrl &= 0x7fffffff;
  700. }
  701. }
  702. EXPORT_SYMBOL_GPL(pnx4008_dma_split_head_entry);
  703. void pnx4008_dma_split_ll_entry(struct pnx4008_dma_ll * cur_ll,
  704. struct pnx4008_dma_ch_ctrl * ctrl)
  705. {
  706. int new_len = ctrl->tr_size, num_entries = 0;
  707. int old_len = new_len;
  708. int src_width, dest_width, count = 1;
  709. switch (ctrl->swidth) {
  710. case WIDTH_BYTE:
  711. src_width = 1;
  712. break;
  713. case WIDTH_HWORD:
  714. src_width = 2;
  715. break;
  716. case WIDTH_WORD:
  717. src_width = 4;
  718. break;
  719. default:
  720. return;
  721. }
  722. switch (ctrl->dwidth) {
  723. case WIDTH_BYTE:
  724. dest_width = 1;
  725. break;
  726. case WIDTH_HWORD:
  727. dest_width = 2;
  728. break;
  729. case WIDTH_WORD:
  730. dest_width = 4;
  731. break;
  732. default:
  733. return;
  734. }
  735. while (new_len > 0x7FF) {
  736. num_entries++;
  737. new_len = (ctrl->tr_size + num_entries) / (num_entries + 1);
  738. }
  739. if (num_entries != 0) {
  740. struct pnx4008_dma_ll *ll = NULL;
  741. cur_ll->ch_ctrl &= ~0x7ff;
  742. cur_ll->ch_ctrl |= new_len;
  743. if (!cur_ll->next) {
  744. while (num_entries) {
  745. if (!ll) {
  746. cur_ll->next =
  747. pnx4008_alloc_ll_entry(&cur_ll->
  748. next_dma);
  749. ll = cur_ll->next;
  750. } else {
  751. ll->next =
  752. pnx4008_alloc_ll_entry(&ll->
  753. next_dma);
  754. ll = ll->next;
  755. }
  756. if (ctrl->si)
  757. ll->src_addr =
  758. cur_ll->src_addr +
  759. src_width * new_len * count;
  760. else
  761. ll->src_addr = cur_ll->src_addr;
  762. if (ctrl->di)
  763. ll->dest_addr =
  764. cur_ll->dest_addr +
  765. dest_width * new_len * count;
  766. else
  767. ll->dest_addr = cur_ll->dest_addr;
  768. ll->ch_ctrl = cur_ll->ch_ctrl & 0x7fffffff;
  769. ll->next_dma = 0;
  770. ll->next = NULL;
  771. num_entries--;
  772. count++;
  773. }
  774. } else {
  775. struct pnx4008_dma_ll *ll_old = cur_ll->next;
  776. unsigned long ll_dma_old = cur_ll->next_dma;
  777. while (num_entries) {
  778. if (!ll) {
  779. cur_ll->next =
  780. pnx4008_alloc_ll_entry(&cur_ll->
  781. next_dma);
  782. ll = cur_ll->next;
  783. } else {
  784. ll->next =
  785. pnx4008_alloc_ll_entry(&ll->
  786. next_dma);
  787. ll = ll->next;
  788. }
  789. if (ctrl->si)
  790. ll->src_addr =
  791. cur_ll->src_addr +
  792. src_width * new_len * count;
  793. else
  794. ll->src_addr = cur_ll->src_addr;
  795. if (ctrl->di)
  796. ll->dest_addr =
  797. cur_ll->dest_addr +
  798. dest_width * new_len * count;
  799. else
  800. ll->dest_addr = cur_ll->dest_addr;
  801. ll->ch_ctrl = cur_ll->ch_ctrl & 0x7fffffff;
  802. ll->next_dma = 0;
  803. ll->next = NULL;
  804. num_entries--;
  805. count++;
  806. }
  807. ll->next_dma = ll_dma_old;
  808. ll->next = ll_old;
  809. }
  810. /* adjust last length/tc */
  811. ll->ch_ctrl = cur_ll->ch_ctrl & (~0x7ff);
  812. ll->ch_ctrl |= old_len - new_len * (count - 1);
  813. cur_ll->ch_ctrl &= 0x7fffffff;
  814. }
  815. }
  816. EXPORT_SYMBOL_GPL(pnx4008_dma_split_ll_entry);
  817. int pnx4008_config_channel(int ch, struct pnx4008_dma_config * config)
  818. {
  819. if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
  820. return -EINVAL;
  821. pnx4008_dma_lock();
  822. __raw_writel(config->src_addr, DMAC_Cx_SRC_ADDR(ch));
  823. __raw_writel(config->dest_addr, DMAC_Cx_DEST_ADDR(ch));
  824. if (config->is_ll)
  825. __raw_writel(config->ll_dma, DMAC_Cx_LLI(ch));
  826. else
  827. __raw_writel(0, DMAC_Cx_LLI(ch));
  828. __raw_writel(config->ch_ctrl, DMAC_Cx_CONTROL(ch));
  829. __raw_writel(config->ch_cfg, DMAC_Cx_CONFIG(ch));
  830. pnx4008_dma_unlock();
  831. return 0;
  832. }
  833. EXPORT_SYMBOL_GPL(pnx4008_config_channel);
  834. int pnx4008_channel_get_config(int ch, struct pnx4008_dma_config * config)
  835. {
  836. if (!VALID_CHANNEL(ch) || !dma_channels[ch].name || !config)
  837. return -EINVAL;
  838. pnx4008_dma_lock();
  839. config->ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
  840. config->ch_ctrl = __raw_readl(DMAC_Cx_CONTROL(ch));
  841. config->ll_dma = __raw_readl(DMAC_Cx_LLI(ch));
  842. config->is_ll = config->ll_dma ? 1 : 0;
  843. config->src_addr = __raw_readl(DMAC_Cx_SRC_ADDR(ch));
  844. config->dest_addr = __raw_readl(DMAC_Cx_DEST_ADDR(ch));
  845. pnx4008_dma_unlock();
  846. return 0;
  847. }
  848. EXPORT_SYMBOL_GPL(pnx4008_channel_get_config);
  849. int pnx4008_dma_ch_enable(int ch)
  850. {
  851. unsigned long ch_cfg;
  852. if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
  853. return -EINVAL;
  854. pnx4008_dma_lock();
  855. ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
  856. ch_cfg |= 1;
  857. __raw_writel(ch_cfg, DMAC_Cx_CONFIG(ch));
  858. pnx4008_dma_unlock();
  859. return 0;
  860. }
  861. EXPORT_SYMBOL_GPL(pnx4008_dma_ch_enable);
  862. int pnx4008_dma_ch_disable(int ch)
  863. {
  864. unsigned long ch_cfg;
  865. if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
  866. return -EINVAL;
  867. pnx4008_dma_lock();
  868. ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
  869. ch_cfg &= ~1;
  870. __raw_writel(ch_cfg, DMAC_Cx_CONFIG(ch));
  871. pnx4008_dma_unlock();
  872. return 0;
  873. }
  874. EXPORT_SYMBOL_GPL(pnx4008_dma_ch_disable);
  875. int pnx4008_dma_ch_enabled(int ch)
  876. {
  877. unsigned long ch_cfg;
  878. if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
  879. return -EINVAL;
  880. pnx4008_dma_lock();
  881. ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
  882. pnx4008_dma_unlock();
  883. return ch_cfg & 1;
  884. }
  885. EXPORT_SYMBOL_GPL(pnx4008_dma_ch_enabled);
  886. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  887. {
  888. int i;
  889. unsigned long dint = __raw_readl(DMAC_INT_STAT);
  890. unsigned long tcint = __raw_readl(DMAC_INT_TC_STAT);
  891. unsigned long eint = __raw_readl(DMAC_INT_ERR_STAT);
  892. unsigned long i_bit;
  893. for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
  894. i_bit = 1 << i;
  895. if (dint & i_bit) {
  896. struct dma_channel *channel = &dma_channels[i];
  897. if (channel->name && channel->irq_handler) {
  898. int cause = 0;
  899. if (eint & i_bit)
  900. cause |= DMA_ERR_INT;
  901. if (tcint & i_bit)
  902. cause |= DMA_TC_INT;
  903. channel->irq_handler(i, cause, channel->data);
  904. } else {
  905. /*
  906. * IRQ for an unregistered DMA channel
  907. */
  908. printk(KERN_WARNING
  909. "spurious IRQ for DMA channel %d\n", i);
  910. }
  911. if (tcint & i_bit)
  912. __raw_writel(i_bit, DMAC_INT_TC_CLEAR);
  913. if (eint & i_bit)
  914. __raw_writel(i_bit, DMAC_INT_ERR_CLEAR);
  915. }
  916. }
  917. return IRQ_HANDLED;
  918. }
  919. static int __init pnx4008_dma_init(void)
  920. {
  921. int ret, i;
  922. ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
  923. if (ret) {
  924. printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
  925. goto out;
  926. }
  927. ll_pool.count = 0x4000 / sizeof(struct pnx4008_dma_ll);
  928. ll_pool.cur = ll_pool.vaddr =
  929. dma_alloc_coherent(NULL, ll_pool.count * sizeof(struct pnx4008_dma_ll),
  930. &ll_pool.dma_addr, GFP_KERNEL);
  931. if (!ll_pool.vaddr) {
  932. ret = -ENOMEM;
  933. free_irq(DMA_INT, NULL);
  934. goto out;
  935. }
  936. for (i = 0; i < ll_pool.count - 1; i++) {
  937. void **addr = ll_pool.vaddr + i * sizeof(struct pnx4008_dma_ll);
  938. *addr = (void *)addr + sizeof(struct pnx4008_dma_ll);
  939. }
  940. *(long *)(ll_pool.vaddr +
  941. (ll_pool.count - 1) * sizeof(struct pnx4008_dma_ll)) =
  942. (long)ll_pool.vaddr;
  943. __raw_writel(1, DMAC_CONFIG);
  944. out:
  945. return ret;
  946. }
  947. arch_initcall(pnx4008_dma_init);