PageRenderTime 77ms CodeModel.GetById 21ms app.highlight 48ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/dma/at_hdmac.c

https://bitbucket.org/ndreys/linux-sunxi
C | 1436 lines | 918 code | 242 blank | 276 comment | 110 complexity | 1af492729464b12600cb162f56519358 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
   1/*
   2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
   3 *
   4 * Copyright (C) 2008 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 *
  12 * This supports the Atmel AHB DMA Controller,
  13 *
  14 * The driver has currently been tested with the Atmel AT91SAM9RL
  15 * and AT91SAM9G45 series.
  16 */
  17
  18#include <linux/clk.h>
  19#include <linux/dmaengine.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/dmapool.h>
  22#include <linux/interrupt.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
  25#include <linux/slab.h>
  26
  27#include "at_hdmac_regs.h"
  28
  29/*
  30 * Glossary
  31 * --------
  32 *
  33 * at_hdmac		: Name of the ATmel AHB DMA Controller
  34 * at_dma_ / atdma	: ATmel DMA controller entity related
  35 * atc_	/ atchan	: ATmel DMA Channel entity related
  36 */
  37
  38#define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
  39#define	ATC_DEFAULT_CTRLA	(0)
  40#define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
  41				|ATC_DIF(AT_DMA_MEM_IF))
  42
  43/*
  44 * Initial number of descriptors to allocate for each channel. This could
  45 * be increased during dma usage.
  46 */
  47static unsigned int init_nr_desc_per_channel = 64;
  48module_param(init_nr_desc_per_channel, uint, 0644);
  49MODULE_PARM_DESC(init_nr_desc_per_channel,
  50		 "initial descriptors per channel (default: 64)");
  51
  52
  53/* prototypes */
  54static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  55
  56
  57/*----------------------------------------------------------------------*/
  58
  59static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
  60{
  61	return list_first_entry(&atchan->active_list,
  62				struct at_desc, desc_node);
  63}
  64
  65static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  66{
  67	return list_first_entry(&atchan->queue,
  68				struct at_desc, desc_node);
  69}
  70
  71/**
  72 * atc_alloc_descriptor - allocate and return an initialized descriptor
  73 * @chan: the channel to allocate descriptors for
  74 * @gfp_flags: GFP allocation flags
  75 *
  76 * Note: The ack-bit is positioned in the descriptor flag at creation time
  77 *       to make initial allocation more convenient. This bit will be cleared
  78 *       and control will be given to client at usage time (during
  79 *       preparation functions).
  80 */
  81static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
  82					    gfp_t gfp_flags)
  83{
  84	struct at_desc	*desc = NULL;
  85	struct at_dma	*atdma = to_at_dma(chan->device);
  86	dma_addr_t phys;
  87
  88	desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
  89	if (desc) {
  90		memset(desc, 0, sizeof(struct at_desc));
  91		INIT_LIST_HEAD(&desc->tx_list);
  92		dma_async_tx_descriptor_init(&desc->txd, chan);
  93		/* txd.flags will be overwritten in prep functions */
  94		desc->txd.flags = DMA_CTRL_ACK;
  95		desc->txd.tx_submit = atc_tx_submit;
  96		desc->txd.phys = phys;
  97	}
  98
  99	return desc;
 100}
 101
 102/**
 103 * atc_desc_get - get an unused descriptor from free_list
 104 * @atchan: channel we want a new descriptor for
 105 */
 106static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
 107{
 108	struct at_desc *desc, *_desc;
 109	struct at_desc *ret = NULL;
 110	unsigned int i = 0;
 111	LIST_HEAD(tmp_list);
 112
 113	spin_lock_bh(&atchan->lock);
 114	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
 115		i++;
 116		if (async_tx_test_ack(&desc->txd)) {
 117			list_del(&desc->desc_node);
 118			ret = desc;
 119			break;
 120		}
 121		dev_dbg(chan2dev(&atchan->chan_common),
 122				"desc %p not ACKed\n", desc);
 123	}
 124	spin_unlock_bh(&atchan->lock);
 125	dev_vdbg(chan2dev(&atchan->chan_common),
 126		"scanned %u descriptors on freelist\n", i);
 127
 128	/* no more descriptor available in initial pool: create one more */
 129	if (!ret) {
 130		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
 131		if (ret) {
 132			spin_lock_bh(&atchan->lock);
 133			atchan->descs_allocated++;
 134			spin_unlock_bh(&atchan->lock);
 135		} else {
 136			dev_err(chan2dev(&atchan->chan_common),
 137					"not enough descriptors available\n");
 138		}
 139	}
 140
 141	return ret;
 142}
 143
 144/**
 145 * atc_desc_put - move a descriptor, including any children, to the free list
 146 * @atchan: channel we work on
 147 * @desc: descriptor, at the head of a chain, to move to free list
 148 */
 149static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
 150{
 151	if (desc) {
 152		struct at_desc *child;
 153
 154		spin_lock_bh(&atchan->lock);
 155		list_for_each_entry(child, &desc->tx_list, desc_node)
 156			dev_vdbg(chan2dev(&atchan->chan_common),
 157					"moving child desc %p to freelist\n",
 158					child);
 159		list_splice_init(&desc->tx_list, &atchan->free_list);
 160		dev_vdbg(chan2dev(&atchan->chan_common),
 161			 "moving desc %p to freelist\n", desc);
 162		list_add(&desc->desc_node, &atchan->free_list);
 163		spin_unlock_bh(&atchan->lock);
 164	}
 165}
 166
 167/**
 168 * atc_desc_chain - build chain adding a descripor
 169 * @first: address of first descripor of the chain
 170 * @prev: address of previous descripor of the chain
 171 * @desc: descriptor to queue
 172 *
 173 * Called from prep_* functions
 174 */
 175static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
 176			   struct at_desc *desc)
 177{
 178	if (!(*first)) {
 179		*first = desc;
 180	} else {
 181		/* inform the HW lli about chaining */
 182		(*prev)->lli.dscr = desc->txd.phys;
 183		/* insert the link descriptor to the LD ring */
 184		list_add_tail(&desc->desc_node,
 185				&(*first)->tx_list);
 186	}
 187	*prev = desc;
 188}
 189
 190/**
 191 * atc_assign_cookie - compute and assign new cookie
 192 * @atchan: channel we work on
 193 * @desc: descriptor to assign cookie for
 194 *
 195 * Called with atchan->lock held and bh disabled
 196 */
 197static dma_cookie_t
 198atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
 199{
 200	dma_cookie_t cookie = atchan->chan_common.cookie;
 201
 202	if (++cookie < 0)
 203		cookie = 1;
 204
 205	atchan->chan_common.cookie = cookie;
 206	desc->txd.cookie = cookie;
 207
 208	return cookie;
 209}
 210
 211/**
 212 * atc_dostart - starts the DMA engine for real
 213 * @atchan: the channel we want to start
 214 * @first: first descriptor in the list we want to begin with
 215 *
 216 * Called with atchan->lock held and bh disabled
 217 */
 218static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
 219{
 220	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
 221
 222	/* ASSERT:  channel is idle */
 223	if (atc_chan_is_enabled(atchan)) {
 224		dev_err(chan2dev(&atchan->chan_common),
 225			"BUG: Attempted to start non-idle channel\n");
 226		dev_err(chan2dev(&atchan->chan_common),
 227			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
 228			channel_readl(atchan, SADDR),
 229			channel_readl(atchan, DADDR),
 230			channel_readl(atchan, CTRLA),
 231			channel_readl(atchan, CTRLB),
 232			channel_readl(atchan, DSCR));
 233
 234		/* The tasklet will hopefully advance the queue... */
 235		return;
 236	}
 237
 238	vdbg_dump_regs(atchan);
 239
 240	channel_writel(atchan, SADDR, 0);
 241	channel_writel(atchan, DADDR, 0);
 242	channel_writel(atchan, CTRLA, 0);
 243	channel_writel(atchan, CTRLB, 0);
 244	channel_writel(atchan, DSCR, first->txd.phys);
 245	dma_writel(atdma, CHER, atchan->mask);
 246
 247	vdbg_dump_regs(atchan);
 248}
 249
 250/**
 251 * atc_chain_complete - finish work for one transaction chain
 252 * @atchan: channel we work on
 253 * @desc: descriptor at the head of the chain we want do complete
 254 *
 255 * Called with atchan->lock held and bh disabled */
 256static void
 257atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 258{
 259	struct dma_async_tx_descriptor	*txd = &desc->txd;
 260
 261	dev_vdbg(chan2dev(&atchan->chan_common),
 262		"descriptor %u complete\n", txd->cookie);
 263
 264	atchan->completed_cookie = txd->cookie;
 265
 266	/* move children to free_list */
 267	list_splice_init(&desc->tx_list, &atchan->free_list);
 268	/* move myself to free_list */
 269	list_move(&desc->desc_node, &atchan->free_list);
 270
 271	/* unmap dma addresses (not on slave channels) */
 272	if (!atchan->chan_common.private) {
 273		struct device *parent = chan2parent(&atchan->chan_common);
 274		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 275			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 276				dma_unmap_single(parent,
 277						desc->lli.daddr,
 278						desc->len, DMA_FROM_DEVICE);
 279			else
 280				dma_unmap_page(parent,
 281						desc->lli.daddr,
 282						desc->len, DMA_FROM_DEVICE);
 283		}
 284		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 285			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
 286				dma_unmap_single(parent,
 287						desc->lli.saddr,
 288						desc->len, DMA_TO_DEVICE);
 289			else
 290				dma_unmap_page(parent,
 291						desc->lli.saddr,
 292						desc->len, DMA_TO_DEVICE);
 293		}
 294	}
 295
 296	/* for cyclic transfers,
 297	 * no need to replay callback function while stopping */
 298	if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
 299		dma_async_tx_callback	callback = txd->callback;
 300		void			*param = txd->callback_param;
 301
 302		/*
 303		 * The API requires that no submissions are done from a
 304		 * callback, so we don't need to drop the lock here
 305		 */
 306		if (callback)
 307			callback(param);
 308	}
 309
 310	dma_run_dependencies(txd);
 311}
 312
 313/**
 314 * atc_complete_all - finish work for all transactions
 315 * @atchan: channel to complete transactions for
 316 *
 317 * Eventually submit queued descriptors if any
 318 *
 319 * Assume channel is idle while calling this function
 320 * Called with atchan->lock held and bh disabled
 321 */
 322static void atc_complete_all(struct at_dma_chan *atchan)
 323{
 324	struct at_desc *desc, *_desc;
 325	LIST_HEAD(list);
 326
 327	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 328
 329	BUG_ON(atc_chan_is_enabled(atchan));
 330
 331	/*
 332	 * Submit queued descriptors ASAP, i.e. before we go through
 333	 * the completed ones.
 334	 */
 335	if (!list_empty(&atchan->queue))
 336		atc_dostart(atchan, atc_first_queued(atchan));
 337	/* empty active_list now it is completed */
 338	list_splice_init(&atchan->active_list, &list);
 339	/* empty queue list by moving descriptors (if any) to active_list */
 340	list_splice_init(&atchan->queue, &atchan->active_list);
 341
 342	list_for_each_entry_safe(desc, _desc, &list, desc_node)
 343		atc_chain_complete(atchan, desc);
 344}
 345
 346/**
 347 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
 348 * @atchan: channel to be cleaned up
 349 *
 350 * Called with atchan->lock held and bh disabled
 351 */
 352static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
 353{
 354	struct at_desc	*desc, *_desc;
 355	struct at_desc	*child;
 356
 357	dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
 358
 359	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
 360		if (!(desc->lli.ctrla & ATC_DONE))
 361			/* This one is currently in progress */
 362			return;
 363
 364		list_for_each_entry(child, &desc->tx_list, desc_node)
 365			if (!(child->lli.ctrla & ATC_DONE))
 366				/* Currently in progress */
 367				return;
 368
 369		/*
 370		 * No descriptors so far seem to be in progress, i.e.
 371		 * this chain must be done.
 372		 */
 373		atc_chain_complete(atchan, desc);
 374	}
 375}
 376
 377/**
 378 * atc_advance_work - at the end of a transaction, move forward
 379 * @atchan: channel where the transaction ended
 380 *
 381 * Called with atchan->lock held and bh disabled
 382 */
 383static void atc_advance_work(struct at_dma_chan *atchan)
 384{
 385	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 386
 387	if (list_empty(&atchan->active_list) ||
 388	    list_is_singular(&atchan->active_list)) {
 389		atc_complete_all(atchan);
 390	} else {
 391		atc_chain_complete(atchan, atc_first_active(atchan));
 392		/* advance work */
 393		atc_dostart(atchan, atc_first_active(atchan));
 394	}
 395}
 396
 397
 398/**
 399 * atc_handle_error - handle errors reported by DMA controller
 400 * @atchan: channel where error occurs
 401 *
 402 * Called with atchan->lock held and bh disabled
 403 */
 404static void atc_handle_error(struct at_dma_chan *atchan)
 405{
 406	struct at_desc *bad_desc;
 407	struct at_desc *child;
 408
 409	/*
 410	 * The descriptor currently at the head of the active list is
 411	 * broked. Since we don't have any way to report errors, we'll
 412	 * just have to scream loudly and try to carry on.
 413	 */
 414	bad_desc = atc_first_active(atchan);
 415	list_del_init(&bad_desc->desc_node);
 416
 417	/* As we are stopped, take advantage to push queued descriptors
 418	 * in active_list */
 419	list_splice_init(&atchan->queue, atchan->active_list.prev);
 420
 421	/* Try to restart the controller */
 422	if (!list_empty(&atchan->active_list))
 423		atc_dostart(atchan, atc_first_active(atchan));
 424
 425	/*
 426	 * KERN_CRITICAL may seem harsh, but since this only happens
 427	 * when someone submits a bad physical address in a
 428	 * descriptor, we should consider ourselves lucky that the
 429	 * controller flagged an error instead of scribbling over
 430	 * random memory locations.
 431	 */
 432	dev_crit(chan2dev(&atchan->chan_common),
 433			"Bad descriptor submitted for DMA!\n");
 434	dev_crit(chan2dev(&atchan->chan_common),
 435			"  cookie: %d\n", bad_desc->txd.cookie);
 436	atc_dump_lli(atchan, &bad_desc->lli);
 437	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
 438		atc_dump_lli(atchan, &child->lli);
 439
 440	/* Pretend the descriptor completed successfully */
 441	atc_chain_complete(atchan, bad_desc);
 442}
 443
 444/**
 445 * atc_handle_cyclic - at the end of a period, run callback function
 446 * @atchan: channel used for cyclic operations
 447 *
 448 * Called with atchan->lock held and bh disabled
 449 */
 450static void atc_handle_cyclic(struct at_dma_chan *atchan)
 451{
 452	struct at_desc			*first = atc_first_active(atchan);
 453	struct dma_async_tx_descriptor	*txd = &first->txd;
 454	dma_async_tx_callback		callback = txd->callback;
 455	void				*param = txd->callback_param;
 456
 457	dev_vdbg(chan2dev(&atchan->chan_common),
 458			"new cyclic period llp 0x%08x\n",
 459			channel_readl(atchan, DSCR));
 460
 461	if (callback)
 462		callback(param);
 463}
 464
 465/*--  IRQ & Tasklet  ---------------------------------------------------*/
 466
 467static void atc_tasklet(unsigned long data)
 468{
 469	struct at_dma_chan *atchan = (struct at_dma_chan *)data;
 470
 471	spin_lock(&atchan->lock);
 472	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
 473		atc_handle_error(atchan);
 474	else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
 475		atc_handle_cyclic(atchan);
 476	else
 477		atc_advance_work(atchan);
 478
 479	spin_unlock(&atchan->lock);
 480}
 481
 482static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
 483{
 484	struct at_dma		*atdma = (struct at_dma *)dev_id;
 485	struct at_dma_chan	*atchan;
 486	int			i;
 487	u32			status, pending, imr;
 488	int			ret = IRQ_NONE;
 489
 490	do {
 491		imr = dma_readl(atdma, EBCIMR);
 492		status = dma_readl(atdma, EBCISR);
 493		pending = status & imr;
 494
 495		if (!pending)
 496			break;
 497
 498		dev_vdbg(atdma->dma_common.dev,
 499			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
 500			 status, imr, pending);
 501
 502		for (i = 0; i < atdma->dma_common.chancnt; i++) {
 503			atchan = &atdma->chan[i];
 504			if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
 505				if (pending & AT_DMA_ERR(i)) {
 506					/* Disable channel on AHB error */
 507					dma_writel(atdma, CHDR,
 508						AT_DMA_RES(i) | atchan->mask);
 509					/* Give information to tasklet */
 510					set_bit(ATC_IS_ERROR, &atchan->status);
 511				}
 512				tasklet_schedule(&atchan->tasklet);
 513				ret = IRQ_HANDLED;
 514			}
 515		}
 516
 517	} while (pending);
 518
 519	return ret;
 520}
 521
 522
 523/*--  DMA Engine API  --------------------------------------------------*/
 524
 525/**
 526 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
 527 * @desc: descriptor at the head of the transaction chain
 528 *
 529 * Queue chain if DMA engine is working already
 530 *
 531 * Cookie increment and adding to active_list or queue must be atomic
 532 */
 533static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
 534{
 535	struct at_desc		*desc = txd_to_at_desc(tx);
 536	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
 537	dma_cookie_t		cookie;
 538
 539	spin_lock_bh(&atchan->lock);
 540	cookie = atc_assign_cookie(atchan, desc);
 541
 542	if (list_empty(&atchan->active_list)) {
 543		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
 544				desc->txd.cookie);
 545		atc_dostart(atchan, desc);
 546		list_add_tail(&desc->desc_node, &atchan->active_list);
 547	} else {
 548		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
 549				desc->txd.cookie);
 550		list_add_tail(&desc->desc_node, &atchan->queue);
 551	}
 552
 553	spin_unlock_bh(&atchan->lock);
 554
 555	return cookie;
 556}
 557
 558/**
 559 * atc_prep_dma_memcpy - prepare a memcpy operation
 560 * @chan: the channel to prepare operation on
 561 * @dest: operation virtual destination address
 562 * @src: operation virtual source address
 563 * @len: operation length
 564 * @flags: tx descriptor status flags
 565 */
 566static struct dma_async_tx_descriptor *
 567atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 568		size_t len, unsigned long flags)
 569{
 570	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 571	struct at_desc		*desc = NULL;
 572	struct at_desc		*first = NULL;
 573	struct at_desc		*prev = NULL;
 574	size_t			xfer_count;
 575	size_t			offset;
 576	unsigned int		src_width;
 577	unsigned int		dst_width;
 578	u32			ctrla;
 579	u32			ctrlb;
 580
 581	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
 582			dest, src, len, flags);
 583
 584	if (unlikely(!len)) {
 585		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
 586		return NULL;
 587	}
 588
 589	ctrla =   ATC_DEFAULT_CTRLA;
 590	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 591		| ATC_SRC_ADDR_MODE_INCR
 592		| ATC_DST_ADDR_MODE_INCR
 593		| ATC_FC_MEM2MEM;
 594
 595	/*
 596	 * We can be a lot more clever here, but this should take care
 597	 * of the most common optimization.
 598	 */
 599	if (!((src | dest  | len) & 3)) {
 600		ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
 601		src_width = dst_width = 2;
 602	} else if (!((src | dest | len) & 1)) {
 603		ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
 604		src_width = dst_width = 1;
 605	} else {
 606		ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
 607		src_width = dst_width = 0;
 608	}
 609
 610	for (offset = 0; offset < len; offset += xfer_count << src_width) {
 611		xfer_count = min_t(size_t, (len - offset) >> src_width,
 612				ATC_BTSIZE_MAX);
 613
 614		desc = atc_desc_get(atchan);
 615		if (!desc)
 616			goto err_desc_get;
 617
 618		desc->lli.saddr = src + offset;
 619		desc->lli.daddr = dest + offset;
 620		desc->lli.ctrla = ctrla | xfer_count;
 621		desc->lli.ctrlb = ctrlb;
 622
 623		desc->txd.cookie = 0;
 624
 625		atc_desc_chain(&first, &prev, desc);
 626	}
 627
 628	/* First descriptor of the chain embedds additional information */
 629	first->txd.cookie = -EBUSY;
 630	first->len = len;
 631
 632	/* set end-of-link to the last link descriptor of list*/
 633	set_desc_eol(desc);
 634
 635	first->txd.flags = flags; /* client is in control of this ack */
 636
 637	return &first->txd;
 638
 639err_desc_get:
 640	atc_desc_put(atchan, first);
 641	return NULL;
 642}
 643
 644
 645/**
 646 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 647 * @chan: DMA channel
 648 * @sgl: scatterlist to transfer to/from
 649 * @sg_len: number of entries in @scatterlist
 650 * @direction: DMA direction
 651 * @flags: tx descriptor status flags
 652 */
 653static struct dma_async_tx_descriptor *
 654atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 655		unsigned int sg_len, enum dma_data_direction direction,
 656		unsigned long flags)
 657{
 658	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 659	struct at_dma_slave	*atslave = chan->private;
 660	struct at_desc		*first = NULL;
 661	struct at_desc		*prev = NULL;
 662	u32			ctrla;
 663	u32			ctrlb;
 664	dma_addr_t		reg;
 665	unsigned int		reg_width;
 666	unsigned int		mem_width;
 667	unsigned int		i;
 668	struct scatterlist	*sg;
 669	size_t			total_len = 0;
 670
 671	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
 672			sg_len,
 673			direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
 674			flags);
 675
 676	if (unlikely(!atslave || !sg_len)) {
 677		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
 678		return NULL;
 679	}
 680
 681	reg_width = atslave->reg_width;
 682
 683	ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
 684	ctrlb = ATC_IEN;
 685
 686	switch (direction) {
 687	case DMA_TO_DEVICE:
 688		ctrla |=  ATC_DST_WIDTH(reg_width);
 689		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
 690			| ATC_SRC_ADDR_MODE_INCR
 691			| ATC_FC_MEM2PER
 692			| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
 693		reg = atslave->tx_reg;
 694		for_each_sg(sgl, sg, sg_len, i) {
 695			struct at_desc	*desc;
 696			u32		len;
 697			u32		mem;
 698
 699			desc = atc_desc_get(atchan);
 700			if (!desc)
 701				goto err_desc_get;
 702
 703			mem = sg_dma_address(sg);
 704			len = sg_dma_len(sg);
 705			if (unlikely(!len)) {
 706				dev_dbg(chan2dev(chan),
 707					"prep_slave_sg: sg(%d) data length is zero\n", i);
 708				goto err;
 709			}
 710			mem_width = 2;
 711			if (unlikely(mem & 3 || len & 3))
 712				mem_width = 0;
 713
 714			desc->lli.saddr = mem;
 715			desc->lli.daddr = reg;
 716			desc->lli.ctrla = ctrla
 717					| ATC_SRC_WIDTH(mem_width)
 718					| len >> mem_width;
 719			desc->lli.ctrlb = ctrlb;
 720
 721			atc_desc_chain(&first, &prev, desc);
 722			total_len += len;
 723		}
 724		break;
 725	case DMA_FROM_DEVICE:
 726		ctrla |=  ATC_SRC_WIDTH(reg_width);
 727		ctrlb |=  ATC_DST_ADDR_MODE_INCR
 728			| ATC_SRC_ADDR_MODE_FIXED
 729			| ATC_FC_PER2MEM
 730			| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
 731
 732		reg = atslave->rx_reg;
 733		for_each_sg(sgl, sg, sg_len, i) {
 734			struct at_desc	*desc;
 735			u32		len;
 736			u32		mem;
 737
 738			desc = atc_desc_get(atchan);
 739			if (!desc)
 740				goto err_desc_get;
 741
 742			mem = sg_dma_address(sg);
 743			len = sg_dma_len(sg);
 744			if (unlikely(!len)) {
 745				dev_dbg(chan2dev(chan),
 746					"prep_slave_sg: sg(%d) data length is zero\n", i);
 747				goto err;
 748			}
 749			mem_width = 2;
 750			if (unlikely(mem & 3 || len & 3))
 751				mem_width = 0;
 752
 753			desc->lli.saddr = reg;
 754			desc->lli.daddr = mem;
 755			desc->lli.ctrla = ctrla
 756					| ATC_DST_WIDTH(mem_width)
 757					| len >> reg_width;
 758			desc->lli.ctrlb = ctrlb;
 759
 760			atc_desc_chain(&first, &prev, desc);
 761			total_len += len;
 762		}
 763		break;
 764	default:
 765		return NULL;
 766	}
 767
 768	/* set end-of-link to the last link descriptor of list*/
 769	set_desc_eol(prev);
 770
 771	/* First descriptor of the chain embedds additional information */
 772	first->txd.cookie = -EBUSY;
 773	first->len = total_len;
 774
 775	/* first link descriptor of list is responsible of flags */
 776	first->txd.flags = flags; /* client is in control of this ack */
 777
 778	return &first->txd;
 779
 780err_desc_get:
 781	dev_err(chan2dev(chan), "not enough descriptors available\n");
 782err:
 783	atc_desc_put(atchan, first);
 784	return NULL;
 785}
 786
 787/**
 788 * atc_dma_cyclic_check_values
 789 * Check for too big/unaligned periods and unaligned DMA buffer
 790 */
 791static int
 792atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
 793		size_t period_len, enum dma_data_direction direction)
 794{
 795	if (period_len > (ATC_BTSIZE_MAX << reg_width))
 796		goto err_out;
 797	if (unlikely(period_len & ((1 << reg_width) - 1)))
 798		goto err_out;
 799	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
 800		goto err_out;
 801	if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
 802		goto err_out;
 803
 804	return 0;
 805
 806err_out:
 807	return -EINVAL;
 808}
 809
 810/**
 811 * atc_dma_cyclic_fill_desc - Fill one period decriptor
 812 */
 813static int
 814atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
 815		unsigned int period_index, dma_addr_t buf_addr,
 816		size_t period_len, enum dma_data_direction direction)
 817{
 818	u32		ctrla;
 819	unsigned int	reg_width = atslave->reg_width;
 820
 821	/* prepare common CRTLA value */
 822	ctrla =   ATC_DEFAULT_CTRLA | atslave->ctrla
 823		| ATC_DST_WIDTH(reg_width)
 824		| ATC_SRC_WIDTH(reg_width)
 825		| period_len >> reg_width;
 826
 827	switch (direction) {
 828	case DMA_TO_DEVICE:
 829		desc->lli.saddr = buf_addr + (period_len * period_index);
 830		desc->lli.daddr = atslave->tx_reg;
 831		desc->lli.ctrla = ctrla;
 832		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
 833				| ATC_SRC_ADDR_MODE_INCR
 834				| ATC_FC_MEM2PER
 835				| ATC_SIF(AT_DMA_MEM_IF)
 836				| ATC_DIF(AT_DMA_PER_IF);
 837		break;
 838
 839	case DMA_FROM_DEVICE:
 840		desc->lli.saddr = atslave->rx_reg;
 841		desc->lli.daddr = buf_addr + (period_len * period_index);
 842		desc->lli.ctrla = ctrla;
 843		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
 844				| ATC_SRC_ADDR_MODE_FIXED
 845				| ATC_FC_PER2MEM
 846				| ATC_SIF(AT_DMA_PER_IF)
 847				| ATC_DIF(AT_DMA_MEM_IF);
 848		break;
 849
 850	default:
 851		return -EINVAL;
 852	}
 853
 854	return 0;
 855}
 856
 857/**
 858 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
 859 * @chan: the DMA channel to prepare
 860 * @buf_addr: physical DMA address where the buffer starts
 861 * @buf_len: total number of bytes for the entire buffer
 862 * @period_len: number of bytes for each period
 863 * @direction: transfer direction, to or from device
 864 */
 865static struct dma_async_tx_descriptor *
 866atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 867		size_t period_len, enum dma_data_direction direction)
 868{
 869	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 870	struct at_dma_slave	*atslave = chan->private;
 871	struct at_desc		*first = NULL;
 872	struct at_desc		*prev = NULL;
 873	unsigned long		was_cyclic;
 874	unsigned int		periods = buf_len / period_len;
 875	unsigned int		i;
 876
 877	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
 878			direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
 879			buf_addr,
 880			periods, buf_len, period_len);
 881
 882	if (unlikely(!atslave || !buf_len || !period_len)) {
 883		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
 884		return NULL;
 885	}
 886
 887	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
 888	if (was_cyclic) {
 889		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
 890		return NULL;
 891	}
 892
 893	/* Check for too big/unaligned periods and unaligned DMA buffer */
 894	if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
 895					period_len, direction))
 896		goto err_out;
 897
 898	/* build cyclic linked list */
 899	for (i = 0; i < periods; i++) {
 900		struct at_desc	*desc;
 901
 902		desc = atc_desc_get(atchan);
 903		if (!desc)
 904			goto err_desc_get;
 905
 906		if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
 907						period_len, direction))
 908			goto err_desc_get;
 909
 910		atc_desc_chain(&first, &prev, desc);
 911	}
 912
 913	/* lets make a cyclic list */
 914	prev->lli.dscr = first->txd.phys;
 915
 916	/* First descriptor of the chain embedds additional information */
 917	first->txd.cookie = -EBUSY;
 918	first->len = buf_len;
 919
 920	return &first->txd;
 921
 922err_desc_get:
 923	dev_err(chan2dev(chan), "not enough descriptors available\n");
 924	atc_desc_put(atchan, first);
 925err_out:
 926	clear_bit(ATC_IS_CYCLIC, &atchan->status);
 927	return NULL;
 928}
 929
 930
 931static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 932		       unsigned long arg)
 933{
 934	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 935	struct at_dma		*atdma = to_at_dma(chan->device);
 936	int			chan_id = atchan->chan_common.chan_id;
 937
 938	LIST_HEAD(list);
 939
 940	dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
 941
 942	if (cmd == DMA_PAUSE) {
 943		spin_lock_bh(&atchan->lock);
 944
 945		dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
 946		set_bit(ATC_IS_PAUSED, &atchan->status);
 947
 948		spin_unlock_bh(&atchan->lock);
 949	} else if (cmd == DMA_RESUME) {
 950		if (!test_bit(ATC_IS_PAUSED, &atchan->status))
 951			return 0;
 952
 953		spin_lock_bh(&atchan->lock);
 954
 955		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
 956		clear_bit(ATC_IS_PAUSED, &atchan->status);
 957
 958		spin_unlock_bh(&atchan->lock);
 959	} else if (cmd == DMA_TERMINATE_ALL) {
 960		struct at_desc	*desc, *_desc;
 961		/*
 962		 * This is only called when something went wrong elsewhere, so
 963		 * we don't really care about the data. Just disable the
 964		 * channel. We still have to poll the channel enable bit due
 965		 * to AHB/HSB limitations.
 966		 */
 967		spin_lock_bh(&atchan->lock);
 968
 969		/* disabling channel: must also remove suspend state */
 970		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
 971
 972		/* confirm that this channel is disabled */
 973		while (dma_readl(atdma, CHSR) & atchan->mask)
 974			cpu_relax();
 975
 976		/* active_list entries will end up before queued entries */
 977		list_splice_init(&atchan->queue, &list);
 978		list_splice_init(&atchan->active_list, &list);
 979
 980		/* Flush all pending and queued descriptors */
 981		list_for_each_entry_safe(desc, _desc, &list, desc_node)
 982			atc_chain_complete(atchan, desc);
 983
 984		clear_bit(ATC_IS_PAUSED, &atchan->status);
 985		/* if channel dedicated to cyclic operations, free it */
 986		clear_bit(ATC_IS_CYCLIC, &atchan->status);
 987
 988		spin_unlock_bh(&atchan->lock);
 989	} else {
 990		return -ENXIO;
 991	}
 992
 993	return 0;
 994}
 995
 996/**
 997 * atc_tx_status - poll for transaction completion
 998 * @chan: DMA channel
 999 * @cookie: transaction identifier to check status of
1000 * @txstate: if not %NULL updated with transaction state
1001 *
1002 * If @txstate is passed in, upon return it reflect the driver
1003 * internal state and can be used with dma_async_is_complete() to check
1004 * the status of multiple cookies without re-checking hardware state.
1005 */
1006static enum dma_status
1007atc_tx_status(struct dma_chan *chan,
1008		dma_cookie_t cookie,
1009		struct dma_tx_state *txstate)
1010{
1011	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1012	dma_cookie_t		last_used;
1013	dma_cookie_t		last_complete;
1014	enum dma_status		ret;
1015
1016	spin_lock_bh(&atchan->lock);
1017
1018	last_complete = atchan->completed_cookie;
1019	last_used = chan->cookie;
1020
1021	ret = dma_async_is_complete(cookie, last_complete, last_used);
1022	if (ret != DMA_SUCCESS) {
1023		atc_cleanup_descriptors(atchan);
1024
1025		last_complete = atchan->completed_cookie;
1026		last_used = chan->cookie;
1027
1028		ret = dma_async_is_complete(cookie, last_complete, last_used);
1029	}
1030
1031	spin_unlock_bh(&atchan->lock);
1032
1033	if (ret != DMA_SUCCESS)
1034		dma_set_tx_state(txstate, last_complete, last_used,
1035			atc_first_active(atchan)->len);
1036	else
1037		dma_set_tx_state(txstate, last_complete, last_used, 0);
1038
1039	if (test_bit(ATC_IS_PAUSED, &atchan->status))
1040		ret = DMA_PAUSED;
1041
1042	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1043		 ret, cookie, last_complete ? last_complete : 0,
1044		 last_used ? last_used : 0);
1045
1046	return ret;
1047}
1048
1049/**
1050 * atc_issue_pending - try to finish work
1051 * @chan: target DMA channel
1052 */
1053static void atc_issue_pending(struct dma_chan *chan)
1054{
1055	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1056
1057	dev_vdbg(chan2dev(chan), "issue_pending\n");
1058
1059	/* Not needed for cyclic transfers */
1060	if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1061		return;
1062
1063	spin_lock_bh(&atchan->lock);
1064	if (!atc_chan_is_enabled(atchan)) {
1065		atc_advance_work(atchan);
1066	}
1067	spin_unlock_bh(&atchan->lock);
1068}
1069
1070/**
1071 * atc_alloc_chan_resources - allocate resources for DMA channel
1072 * @chan: allocate descriptor resources for this channel
1073 * @client: current client requesting the channel be ready for requests
1074 *
1075 * return - the number of allocated descriptors
1076 */
1077static int atc_alloc_chan_resources(struct dma_chan *chan)
1078{
1079	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1080	struct at_dma		*atdma = to_at_dma(chan->device);
1081	struct at_desc		*desc;
1082	struct at_dma_slave	*atslave;
1083	int			i;
1084	u32			cfg;
1085	LIST_HEAD(tmp_list);
1086
1087	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1088
1089	/* ASSERT:  channel is idle */
1090	if (atc_chan_is_enabled(atchan)) {
1091		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1092		return -EIO;
1093	}
1094
1095	cfg = ATC_DEFAULT_CFG;
1096
1097	atslave = chan->private;
1098	if (atslave) {
1099		/*
1100		 * We need controller-specific data to set up slave
1101		 * transfers.
1102		 */
1103		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1104
1105		/* if cfg configuration specified take it instad of default */
1106		if (atslave->cfg)
1107			cfg = atslave->cfg;
1108	}
1109
1110	/* have we already been set up?
1111	 * reconfigure channel but no need to reallocate descriptors */
1112	if (!list_empty(&atchan->free_list))
1113		return atchan->descs_allocated;
1114
1115	/* Allocate initial pool of descriptors */
1116	for (i = 0; i < init_nr_desc_per_channel; i++) {
1117		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1118		if (!desc) {
1119			dev_err(atdma->dma_common.dev,
1120				"Only %d initial descriptors\n", i);
1121			break;
1122		}
1123		list_add_tail(&desc->desc_node, &tmp_list);
1124	}
1125
1126	spin_lock_bh(&atchan->lock);
1127	atchan->descs_allocated = i;
1128	list_splice(&tmp_list, &atchan->free_list);
1129	atchan->completed_cookie = chan->cookie = 1;
1130	spin_unlock_bh(&atchan->lock);
1131
1132	/* channel parameters */
1133	channel_writel(atchan, CFG, cfg);
1134
1135	dev_dbg(chan2dev(chan),
1136		"alloc_chan_resources: allocated %d descriptors\n",
1137		atchan->descs_allocated);
1138
1139	return atchan->descs_allocated;
1140}
1141
1142/**
1143 * atc_free_chan_resources - free all channel resources
1144 * @chan: DMA channel
1145 */
1146static void atc_free_chan_resources(struct dma_chan *chan)
1147{
1148	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1149	struct at_dma		*atdma = to_at_dma(chan->device);
1150	struct at_desc		*desc, *_desc;
1151	LIST_HEAD(list);
1152
1153	dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1154		atchan->descs_allocated);
1155
1156	/* ASSERT:  channel is idle */
1157	BUG_ON(!list_empty(&atchan->active_list));
1158	BUG_ON(!list_empty(&atchan->queue));
1159	BUG_ON(atc_chan_is_enabled(atchan));
1160
1161	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1162		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1163		list_del(&desc->desc_node);
1164		/* free link descriptor */
1165		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1166	}
1167	list_splice_init(&atchan->free_list, &list);
1168	atchan->descs_allocated = 0;
1169	atchan->status = 0;
1170
1171	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1172}
1173
1174
1175/*--  Module Management  -----------------------------------------------*/
1176
1177/**
1178 * at_dma_off - disable DMA controller
1179 * @atdma: the Atmel HDAMC device
1180 */
1181static void at_dma_off(struct at_dma *atdma)
1182{
1183	dma_writel(atdma, EN, 0);
1184
1185	/* disable all interrupts */
1186	dma_writel(atdma, EBCIDR, -1L);
1187
1188	/* confirm that all channels are disabled */
1189	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1190		cpu_relax();
1191}
1192
1193static int __init at_dma_probe(struct platform_device *pdev)
1194{
1195	struct at_dma_platform_data *pdata;
1196	struct resource		*io;
1197	struct at_dma		*atdma;
1198	size_t			size;
1199	int			irq;
1200	int			err;
1201	int			i;
1202
1203	/* get DMA Controller parameters from platform */
1204	pdata = pdev->dev.platform_data;
1205	if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
1206		return -EINVAL;
1207
1208	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1209	if (!io)
1210		return -EINVAL;
1211
1212	irq = platform_get_irq(pdev, 0);
1213	if (irq < 0)
1214		return irq;
1215
1216	size = sizeof(struct at_dma);
1217	size += pdata->nr_channels * sizeof(struct at_dma_chan);
1218	atdma = kzalloc(size, GFP_KERNEL);
1219	if (!atdma)
1220		return -ENOMEM;
1221
1222	/* discover transaction capabilites from the platform data */
1223	atdma->dma_common.cap_mask = pdata->cap_mask;
1224	atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1225
1226	size = io->end - io->start + 1;
1227	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1228		err = -EBUSY;
1229		goto err_kfree;
1230	}
1231
1232	atdma->regs = ioremap(io->start, size);
1233	if (!atdma->regs) {
1234		err = -ENOMEM;
1235		goto err_release_r;
1236	}
1237
1238	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1239	if (IS_ERR(atdma->clk)) {
1240		err = PTR_ERR(atdma->clk);
1241		goto err_clk;
1242	}
1243	clk_enable(atdma->clk);
1244
1245	/* force dma off, just in case */
1246	at_dma_off(atdma);
1247
1248	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1249	if (err)
1250		goto err_irq;
1251
1252	platform_set_drvdata(pdev, atdma);
1253
1254	/* create a pool of consistent memory blocks for hardware descriptors */
1255	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1256			&pdev->dev, sizeof(struct at_desc),
1257			4 /* word alignment */, 0);
1258	if (!atdma->dma_desc_pool) {
1259		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1260		err = -ENOMEM;
1261		goto err_pool_create;
1262	}
1263
1264	/* clear any pending interrupt */
1265	while (dma_readl(atdma, EBCISR))
1266		cpu_relax();
1267
1268	/* initialize channels related values */
1269	INIT_LIST_HEAD(&atdma->dma_common.channels);
1270	for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1271		struct at_dma_chan	*atchan = &atdma->chan[i];
1272
1273		atchan->chan_common.device = &atdma->dma_common;
1274		atchan->chan_common.cookie = atchan->completed_cookie = 1;
1275		atchan->chan_common.chan_id = i;
1276		list_add_tail(&atchan->chan_common.device_node,
1277				&atdma->dma_common.channels);
1278
1279		atchan->ch_regs = atdma->regs + ch_regs(i);
1280		spin_lock_init(&atchan->lock);
1281		atchan->mask = 1 << i;
1282
1283		INIT_LIST_HEAD(&atchan->active_list);
1284		INIT_LIST_HEAD(&atchan->queue);
1285		INIT_LIST_HEAD(&atchan->free_list);
1286
1287		tasklet_init(&atchan->tasklet, atc_tasklet,
1288				(unsigned long)atchan);
1289		atc_enable_chan_irq(atdma, i);
1290	}
1291
1292	/* set base routines */
1293	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1294	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1295	atdma->dma_common.device_tx_status = atc_tx_status;
1296	atdma->dma_common.device_issue_pending = atc_issue_pending;
1297	atdma->dma_common.dev = &pdev->dev;
1298
1299	/* set prep routines based on capability */
1300	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1301		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1302
1303	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
1304		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1305
1306	if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1307		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1308
1309	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1310	    dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1311		atdma->dma_common.device_control = atc_control;
1312
1313	dma_writel(atdma, EN, AT_DMA_ENABLE);
1314
1315	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1316	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1317	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1318	  atdma->dma_common.chancnt);
1319
1320	dma_async_device_register(&atdma->dma_common);
1321
1322	return 0;
1323
1324err_pool_create:
1325	platform_set_drvdata(pdev, NULL);
1326	free_irq(platform_get_irq(pdev, 0), atdma);
1327err_irq:
1328	clk_disable(atdma->clk);
1329	clk_put(atdma->clk);
1330err_clk:
1331	iounmap(atdma->regs);
1332	atdma->regs = NULL;
1333err_release_r:
1334	release_mem_region(io->start, size);
1335err_kfree:
1336	kfree(atdma);
1337	return err;
1338}
1339
1340static int __exit at_dma_remove(struct platform_device *pdev)
1341{
1342	struct at_dma		*atdma = platform_get_drvdata(pdev);
1343	struct dma_chan		*chan, *_chan;
1344	struct resource		*io;
1345
1346	at_dma_off(atdma);
1347	dma_async_device_unregister(&atdma->dma_common);
1348
1349	dma_pool_destroy(atdma->dma_desc_pool);
1350	platform_set_drvdata(pdev, NULL);
1351	free_irq(platform_get_irq(pdev, 0), atdma);
1352
1353	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1354			device_node) {
1355		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1356
1357		/* Disable interrupts */
1358		atc_disable_chan_irq(atdma, chan->chan_id);
1359		tasklet_disable(&atchan->tasklet);
1360
1361		tasklet_kill(&atchan->tasklet);
1362		list_del(&chan->device_node);
1363	}
1364
1365	clk_disable(atdma->clk);
1366	clk_put(atdma->clk);
1367
1368	iounmap(atdma->regs);
1369	atdma->regs = NULL;
1370
1371	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1372	release_mem_region(io->start, io->end - io->start + 1);
1373
1374	kfree(atdma);
1375
1376	return 0;
1377}
1378
1379static void at_dma_shutdown(struct platform_device *pdev)
1380{
1381	struct at_dma	*atdma = platform_get_drvdata(pdev);
1382
1383	at_dma_off(platform_get_drvdata(pdev));
1384	clk_disable(atdma->clk);
1385}
1386
1387static int at_dma_suspend_noirq(struct device *dev)
1388{
1389	struct platform_device *pdev = to_platform_device(dev);
1390	struct at_dma *atdma = platform_get_drvdata(pdev);
1391
1392	at_dma_off(platform_get_drvdata(pdev));
1393	clk_disable(atdma->clk);
1394	return 0;
1395}
1396
1397static int at_dma_resume_noirq(struct device *dev)
1398{
1399	struct platform_device *pdev = to_platform_device(dev);
1400	struct at_dma *atdma = platform_get_drvdata(pdev);
1401
1402	clk_enable(atdma->clk);
1403	dma_writel(atdma, EN, AT_DMA_ENABLE);
1404	return 0;
1405}
1406
1407static const struct dev_pm_ops at_dma_dev_pm_ops = {
1408	.suspend_noirq = at_dma_suspend_noirq,
1409	.resume_noirq = at_dma_resume_noirq,
1410};
1411
1412static struct platform_driver at_dma_driver = {
1413	.remove		= __exit_p(at_dma_remove),
1414	.shutdown	= at_dma_shutdown,
1415	.driver = {
1416		.name	= "at_hdmac",
1417		.pm	= &at_dma_dev_pm_ops,
1418	},
1419};
1420
1421static int __init at_dma_init(void)
1422{
1423	return platform_driver_probe(&at_dma_driver, at_dma_probe);
1424}
1425subsys_initcall(at_dma_init);
1426
1427static void __exit at_dma_exit(void)
1428{
1429	platform_driver_unregister(&at_dma_driver);
1430}
1431module_exit(at_dma_exit);
1432
1433MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1434MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1435MODULE_LICENSE("GPL");
1436MODULE_ALIAS("platform:at_hdmac");