PageRenderTime 21ms CodeModel.GetById 5ms app.highlight 13ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/xtensa/variants/s6000/dmac.c

http://github.com/mirrors/linux
C | 173 lines | 137 code | 23 blank | 13 comment | 23 complexity | f1ce491be902876107bab6b028183df7 MD5 | raw file
  1/*
  2 * Authors:	Oskar Schirmer <oskar@scara.com>
  3 *		Daniel Gloeckner <dg@emlix.com>
  4 * (c) 2008 emlix GmbH http://www.emlix.com
  5 *
  6 * This program is free software; you can redistribute  it and/or modify it
  7 * under  the terms of  the GNU General  Public License as published by the
  8 * Free Software Foundation;  either version 2 of the  License, or (at your
  9 * option) any later version.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/io.h>
 14#include <linux/types.h>
 15#include <linux/errno.h>
 16#include <linux/spinlock.h>
 17#include <asm/cacheflush.h>
 18#include <variant/dmac.h>
 19
 20/* DMA engine lookup */
 21
 22struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
 23
 24
 25/* DMA control, per engine */
 26
 27void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
 28{
 29	if (xtensa_need_flush_dma_source(src)) {
 30		u32 base = src;
 31		u32 span = size;
 32		u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
 33		if (chunk && (size > chunk)) {
 34			s32 skip =
 35				readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
 36			u32 gaps = (size+chunk-1)/chunk - 1;
 37			if (skip >= 0) {
 38				span += gaps * skip;
 39			} else if (-skip > chunk) {
 40				s32 decr = gaps * (chunk + skip);
 41				base += decr;
 42				span = chunk - decr;
 43			} else {
 44				span = max(span + gaps * skip,
 45					(chunk + skip) * gaps - skip);
 46			}
 47		}
 48		flush_dcache_unaligned(base, span);
 49	}
 50	if (xtensa_need_invalidate_dma_destination(dst)) {
 51		u32 base = dst;
 52		u32 span = size;
 53		u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
 54		if (chunk && (size > chunk)) {
 55			s32 skip =
 56				readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
 57			u32 gaps = (size+chunk-1)/chunk - 1;
 58			if (skip >= 0) {
 59				span += gaps * skip;
 60			} else if (-skip > chunk) {
 61				s32 decr = gaps * (chunk + skip);
 62				base += decr;
 63				span = chunk - decr;
 64			} else {
 65				span = max(span + gaps * skip,
 66					(chunk + skip) * gaps - skip);
 67			}
 68		}
 69		invalidate_dcache_unaligned(base, span);
 70	}
 71	s6dmac_put_fifo(dmac, chan, src, dst, size);
 72}
 73
 74void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
 75{
 76	unsigned long flags;
 77	spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
 78	spin_lock_irqsave(spinl, flags);
 79	_s6dmac_disable_error_irqs(dmac, mask);
 80	spin_unlock_irqrestore(spinl, flags);
 81}
 82
 83u32 s6dmac_int_sources(u32 dmac, u32 channel)
 84{
 85	u32 mask, ret, tmp;
 86	mask = 1 << channel;
 87
 88	tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
 89	tmp &= mask;
 90	writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
 91	ret = tmp >> channel;
 92
 93	tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
 94	tmp &= mask;
 95	writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
 96	ret |= (tmp >> channel) << 1;
 97
 98	tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
 99	tmp &= mask;
100	writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
101	ret |= (tmp >> channel) << 2;
102
103	tmp = readl(dmac + S6_DMA_INTRAW0);
104	tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
105	writel(tmp, dmac + S6_DMA_INTCLEAR0);
106
107	if (tmp & (mask << S6_DMA_INT0_UNDER))
108		ret |= 1 << 3;
109	if (tmp & (mask << S6_DMA_INT0_OVER))
110		ret |= 1 << 4;
111
112	tmp = readl(dmac + S6_DMA_MASTERERRINFO);
113	mask <<= S6_DMA_INT1_CHANNEL;
114	if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
115			== channel)
116		mask |= 1 << S6_DMA_INT1_MASTER;
117	if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
118			== channel)
119		mask |= 1 << (S6_DMA_INT1_MASTER + 1);
120	if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
121			== channel)
122		mask |= 1 << (S6_DMA_INT1_MASTER + 2);
123
124	tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
125	writel(tmp, dmac + S6_DMA_INTCLEAR1);
126	ret |= ((tmp >> channel) & 1) << 5;
127	ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
128
129	return ret;
130}
131
132void s6dmac_release_chan(u32 dmac, int chan)
133{
134	if (chan >= 0)
135		s6dmac_disable_chan(dmac, chan);
136}
137
138
139/* global init */
140
141static inline void __init dmac_init(u32 dmac, u8 chan_nb)
142{
143	s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
144	spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
145	s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
146	writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
147		dmac + S6_DMA_INTCLEAR1);
148}
149
150static inline void __init dmac_master(u32 dmac,
151	u32 m0start, u32 m0end, u32 m1start, u32 m1end)
152{
153	writel(m0start, dmac + S6_DMA_MASTER0START);
154	writel(m0end - 1, dmac + S6_DMA_MASTER0END);
155	writel(m1start, dmac + S6_DMA_MASTER1START);
156	writel(m1end - 1, dmac + S6_DMA_MASTER1END);
157}
158
159static void __init s6_dmac_init(void)
160{
161	dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
162	dmac_master(S6_REG_LMSDMA,
163		S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
164	dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
165	dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
166	dmac_master(S6_REG_DPDMA,
167		S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
168	dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
169	dmac_master(S6_REG_HIFDMA,
170		S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
171}
172
173arch_initcall(s6_dmac_init);