PageRenderTime 92ms CodeModel.GetById 48ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/xtensa/variants/s6000/dmac.c

https://bitbucket.org/cresqo/cm7-p500-kernel
C | 173 lines | 137 code | 23 blank | 13 comment | 23 complexity | 45a0a93a87fa876db28b33d28e79ff39 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. /*
  2. * Authors: Oskar Schirmer <os@emlix.com>
  3. * Daniel Gloeckner <dg@emlix.com>
  4. * (c) 2008 emlix GmbH http://www.emlix.com
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/io.h>
  13. #include <linux/types.h>
  14. #include <linux/errno.h>
  15. #include <linux/spinlock.h>
  16. #include <asm/cacheflush.h>
  17. #include <variant/dmac.h>
  18. /* DMA engine lookup */
  19. struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
  20. /* DMA control, per engine */
  21. void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
  22. {
  23. if (xtensa_need_flush_dma_source(src)) {
  24. u32 base = src;
  25. u32 span = size;
  26. u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
  27. if (chunk && (size > chunk)) {
  28. s32 skip =
  29. readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
  30. u32 gaps = (size+chunk-1)/chunk - 1;
  31. if (skip >= 0) {
  32. span += gaps * skip;
  33. } else if (-skip > chunk) {
  34. s32 decr = gaps * (chunk + skip);
  35. base += decr;
  36. span = chunk - decr;
  37. } else {
  38. span = max(span + gaps * skip,
  39. (chunk + skip) * gaps - skip);
  40. }
  41. }
  42. flush_dcache_unaligned(base, span);
  43. }
  44. if (xtensa_need_invalidate_dma_destination(dst)) {
  45. u32 base = dst;
  46. u32 span = size;
  47. u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
  48. if (chunk && (size > chunk)) {
  49. s32 skip =
  50. readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
  51. u32 gaps = (size+chunk-1)/chunk - 1;
  52. if (skip >= 0) {
  53. span += gaps * skip;
  54. } else if (-skip > chunk) {
  55. s32 decr = gaps * (chunk + skip);
  56. base += decr;
  57. span = chunk - decr;
  58. } else {
  59. span = max(span + gaps * skip,
  60. (chunk + skip) * gaps - skip);
  61. }
  62. }
  63. invalidate_dcache_unaligned(base, span);
  64. }
  65. s6dmac_put_fifo(dmac, chan, src, dst, size);
  66. }
  67. void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
  68. {
  69. unsigned long flags;
  70. spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
  71. spin_lock_irqsave(spinl, flags);
  72. _s6dmac_disable_error_irqs(dmac, mask);
  73. spin_unlock_irqrestore(spinl, flags);
  74. }
  75. u32 s6dmac_int_sources(u32 dmac, u32 channel)
  76. {
  77. u32 mask, ret, tmp;
  78. mask = 1 << channel;
  79. tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
  80. tmp &= mask;
  81. writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
  82. ret = tmp >> channel;
  83. tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
  84. tmp &= mask;
  85. writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
  86. ret |= (tmp >> channel) << 1;
  87. tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
  88. tmp &= mask;
  89. writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
  90. ret |= (tmp >> channel) << 2;
  91. tmp = readl(dmac + S6_DMA_INTRAW0);
  92. tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
  93. writel(tmp, dmac + S6_DMA_INTCLEAR0);
  94. if (tmp & (mask << S6_DMA_INT0_UNDER))
  95. ret |= 1 << 3;
  96. if (tmp & (mask << S6_DMA_INT0_OVER))
  97. ret |= 1 << 4;
  98. tmp = readl(dmac + S6_DMA_MASTERERRINFO);
  99. mask <<= S6_DMA_INT1_CHANNEL;
  100. if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
  101. == channel)
  102. mask |= 1 << S6_DMA_INT1_MASTER;
  103. if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
  104. == channel)
  105. mask |= 1 << (S6_DMA_INT1_MASTER + 1);
  106. if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
  107. == channel)
  108. mask |= 1 << (S6_DMA_INT1_MASTER + 2);
  109. tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
  110. writel(tmp, dmac + S6_DMA_INTCLEAR1);
  111. ret |= ((tmp >> channel) & 1) << 5;
  112. ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
  113. return ret;
  114. }
  115. void s6dmac_release_chan(u32 dmac, int chan)
  116. {
  117. if (chan >= 0)
  118. s6dmac_disable_chan(dmac, chan);
  119. }
  120. /* global init */
  121. static inline void __init dmac_init(u32 dmac, u8 chan_nb)
  122. {
  123. s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
  124. spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
  125. s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
  126. writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
  127. dmac + S6_DMA_INTCLEAR1);
  128. }
  129. static inline void __init dmac_master(u32 dmac,
  130. u32 m0start, u32 m0end, u32 m1start, u32 m1end)
  131. {
  132. writel(m0start, dmac + S6_DMA_MASTER0START);
  133. writel(m0end - 1, dmac + S6_DMA_MASTER0END);
  134. writel(m1start, dmac + S6_DMA_MASTER1START);
  135. writel(m1end - 1, dmac + S6_DMA_MASTER1END);
  136. }
  137. static void __init s6_dmac_init(void)
  138. {
  139. dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
  140. dmac_master(S6_REG_LMSDMA,
  141. S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
  142. dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
  143. dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
  144. dmac_master(S6_REG_DPDMA,
  145. S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
  146. dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
  147. dmac_master(S6_REG_HIFDMA,
  148. S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
  149. }
  150. arch_initcall(s6_dmac_init);