PageRenderTime 78ms CodeModel.GetById 16ms app.highlight 55ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/ppc64/kernel/iommu.c

https://bitbucket.org/evzijst/gittest
C | 567 lines | 362 code | 108 blank | 97 comment | 76 complexity | 59d58ae6667bf23c24f2fbf912764948 MD5 | raw file
  1/*
  2 * arch/ppc64/kernel/iommu.c
  3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  4 * 
  5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
  6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
  7 *               and  Ben. Herrenschmidt, IBM Corporation
  8 *
  9 * Dynamic DMA mapping support, bus-independent parts.
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by
 13 * the Free Software Foundation; either version 2 of the License, or
 14 * (at your option) any later version.
 15 * 
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 * 
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 24 */
 25
 26
 27#include <linux/config.h>
 28#include <linux/init.h>
 29#include <linux/types.h>
 30#include <linux/slab.h>
 31#include <linux/mm.h>
 32#include <linux/spinlock.h>
 33#include <linux/string.h>
 34#include <linux/dma-mapping.h>
 35#include <linux/init.h>
 36#include <linux/bitops.h>
 37#include <asm/io.h>
 38#include <asm/prom.h>
 39#include <asm/iommu.h>
 40#include <asm/pci-bridge.h>
 41#include <asm/machdep.h>
 42
 43#define DBG(...)
 44
 45#ifdef CONFIG_IOMMU_VMERGE
 46static int novmerge = 0;
 47#else
 48static int novmerge = 1;
 49#endif
 50
 51static int __init setup_iommu(char *str)
 52{
 53	if (!strcmp(str, "novmerge"))
 54		novmerge = 1;
 55	else if (!strcmp(str, "vmerge"))
 56		novmerge = 0;
 57	return 1;
 58}
 59
 60__setup("iommu=", setup_iommu);
 61
 62static unsigned long iommu_range_alloc(struct iommu_table *tbl,
 63                                       unsigned long npages,
 64                                       unsigned long *handle,
 65                                       unsigned int align_order)
 66{ 
 67	unsigned long n, end, i, start;
 68	unsigned long limit;
 69	int largealloc = npages > 15;
 70	int pass = 0;
 71	unsigned long align_mask;
 72
 73	align_mask = 0xffffffffffffffffl >> (64 - align_order);
 74
 75	/* This allocator was derived from x86_64's bit string search */
 76
 77	/* Sanity check */
 78	if (unlikely(npages) == 0) {
 79		if (printk_ratelimit())
 80			WARN_ON(1);
 81		return DMA_ERROR_CODE;
 82	}
 83
 84	if (handle && *handle)
 85		start = *handle;
 86	else
 87		start = largealloc ? tbl->it_largehint : tbl->it_hint;
 88
 89	/* Use only half of the table for small allocs (15 pages or less) */
 90	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
 91
 92	if (largealloc && start < tbl->it_halfpoint)
 93		start = tbl->it_halfpoint;
 94
 95	/* The case below can happen if we have a small segment appended
 96	 * to a large, or when the previous alloc was at the very end of
 97	 * the available space. If so, go back to the initial start.
 98	 */
 99	if (start >= limit)
100		start = largealloc ? tbl->it_largehint : tbl->it_hint;
101	
102 again:
103
104	n = find_next_zero_bit(tbl->it_map, limit, start);
105
106	/* Align allocation */
107	n = (n + align_mask) & ~align_mask;
108
109	end = n + npages;
110
111	if (unlikely(end >= limit)) {
112		if (likely(pass < 2)) {
113			/* First failure, just rescan the half of the table.
114			 * Second failure, rescan the other half of the table.
115			 */
116			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
117			limit = pass ? tbl->it_size : limit;
118			pass++;
119			goto again;
120		} else {
121			/* Third failure, give up */
122			return DMA_ERROR_CODE;
123		}
124	}
125
126	for (i = n; i < end; i++)
127		if (test_bit(i, tbl->it_map)) {
128			start = i+1;
129			goto again;
130		}
131
132	for (i = n; i < end; i++)
133		__set_bit(i, tbl->it_map);
134
135	/* Bump the hint to a new block for small allocs. */
136	if (largealloc) {
137		/* Don't bump to new block to avoid fragmentation */
138		tbl->it_largehint = end;
139	} else {
140		/* Overflow will be taken care of at the next allocation */
141		tbl->it_hint = (end + tbl->it_blocksize - 1) &
142		                ~(tbl->it_blocksize - 1);
143	}
144
145	/* Update handle for SG allocations */
146	if (handle)
147		*handle = end;
148
149	return n;
150}
151
152static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
153		       unsigned int npages, enum dma_data_direction direction,
154		       unsigned int align_order)
155{
156	unsigned long entry, flags;
157	dma_addr_t ret = DMA_ERROR_CODE;
158	
159	spin_lock_irqsave(&(tbl->it_lock), flags);
160
161	entry = iommu_range_alloc(tbl, npages, NULL, align_order);
162
163	if (unlikely(entry == DMA_ERROR_CODE)) {
164		spin_unlock_irqrestore(&(tbl->it_lock), flags);
165		return DMA_ERROR_CODE;
166	}
167
168	entry += tbl->it_offset;	/* Offset into real TCE table */
169	ret = entry << PAGE_SHIFT;	/* Set the return dma address */
170
171	/* Put the TCEs in the HW table */
172	ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
173			 direction);
174
175
176	/* Flush/invalidate TLB caches if necessary */
177	if (ppc_md.tce_flush)
178		ppc_md.tce_flush(tbl);
179
180	spin_unlock_irqrestore(&(tbl->it_lock), flags);
181
182	/* Make sure updates are seen by hardware */
183	mb();
184
185	return ret;
186}
187
188static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
189			 unsigned int npages)
190{
191	unsigned long entry, free_entry;
192	unsigned long i;
193
194	entry = dma_addr >> PAGE_SHIFT;
195	free_entry = entry - tbl->it_offset;
196
197	if (((free_entry + npages) > tbl->it_size) ||
198	    (entry < tbl->it_offset)) {
199		if (printk_ratelimit()) {
200			printk(KERN_INFO "iommu_free: invalid entry\n");
201			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
202			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
203			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
204			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
205			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
206			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
207			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
208			WARN_ON(1);
209		}
210		return;
211	}
212
213	ppc_md.tce_free(tbl, entry, npages);
214	
215	for (i = 0; i < npages; i++)
216		__clear_bit(free_entry+i, tbl->it_map);
217}
218
219static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
220		unsigned int npages)
221{
222	unsigned long flags;
223
224	spin_lock_irqsave(&(tbl->it_lock), flags);
225
226	__iommu_free(tbl, dma_addr, npages);
227
228	/* Make sure TLB cache is flushed if the HW needs it. We do
229	 * not do an mb() here on purpose, it is not needed on any of
230	 * the current platforms.
231	 */
232	if (ppc_md.tce_flush)
233		ppc_md.tce_flush(tbl);
234
235	spin_unlock_irqrestore(&(tbl->it_lock), flags);
236}
237
238int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
239		struct scatterlist *sglist, int nelems,
240		enum dma_data_direction direction)
241{
242	dma_addr_t dma_next = 0, dma_addr;
243	unsigned long flags;
244	struct scatterlist *s, *outs, *segstart;
245	int outcount;
246	unsigned long handle;
247
248	BUG_ON(direction == DMA_NONE);
249
250	if ((nelems == 0) || !tbl)
251		return 0;
252
253	outs = s = segstart = &sglist[0];
254	outcount = 1;
255	handle = 0;
256
257	/* Init first segment length for backout at failure */
258	outs->dma_length = 0;
259
260	DBG("mapping %d elements:\n", nelems);
261
262	spin_lock_irqsave(&(tbl->it_lock), flags);
263
264	for (s = outs; nelems; nelems--, s++) {
265		unsigned long vaddr, npages, entry, slen;
266
267		slen = s->length;
268		/* Sanity check */
269		if (slen == 0) {
270			dma_next = 0;
271			continue;
272		}
273		/* Allocate iommu entries for that segment */
274		vaddr = (unsigned long)page_address(s->page) + s->offset;
275		npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
276		npages >>= PAGE_SHIFT;
277		entry = iommu_range_alloc(tbl, npages, &handle, 0);
278
279		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
280
281		/* Handle failure */
282		if (unlikely(entry == DMA_ERROR_CODE)) {
283			if (printk_ratelimit())
284				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
285				       " npages %lx\n", tbl, vaddr, npages);
286			goto failure;
287		}
288
289		/* Convert entry to a dma_addr_t */
290		entry += tbl->it_offset;
291		dma_addr = entry << PAGE_SHIFT;
292		dma_addr |= s->offset;
293
294		DBG("  - %lx pages, entry: %lx, dma_addr: %lx\n",
295			    npages, entry, dma_addr);
296
297		/* Insert into HW table */
298		ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
299
300		/* If we are in an open segment, try merging */
301		if (segstart != s) {
302			DBG("  - trying merge...\n");
303			/* We cannot merge if:
304			 * - allocated dma_addr isn't contiguous to previous allocation
305			 */
306			if (novmerge || (dma_addr != dma_next)) {
307				/* Can't merge: create a new segment */
308				segstart = s;
309				outcount++; outs++;
310				DBG("    can't merge, new segment.\n");
311			} else {
312				outs->dma_length += s->length;
313				DBG("    merged, new len: %lx\n", outs->dma_length);
314			}
315		}
316
317		if (segstart == s) {
318			/* This is a new segment, fill entries */
319			DBG("  - filling new segment.\n");
320			outs->dma_address = dma_addr;
321			outs->dma_length = slen;
322		}
323
324		/* Calculate next page pointer for contiguous check */
325		dma_next = dma_addr + slen;
326
327		DBG("  - dma next is: %lx\n", dma_next);
328	}
329
330	/* Flush/invalidate TLB caches if necessary */
331	if (ppc_md.tce_flush)
332		ppc_md.tce_flush(tbl);
333
334	spin_unlock_irqrestore(&(tbl->it_lock), flags);
335
336	/* Make sure updates are seen by hardware */
337	mb();
338
339	DBG("mapped %d elements:\n", outcount);
340
341	/* For the sake of iommu_free_sg, we clear out the length in the
342	 * next entry of the sglist if we didn't fill the list completely
343	 */
344	if (outcount < nelems) {
345		outs++;
346		outs->dma_address = DMA_ERROR_CODE;
347		outs->dma_length = 0;
348	}
349	return outcount;
350
351 failure:
352	for (s = &sglist[0]; s <= outs; s++) {
353		if (s->dma_length != 0) {
354			unsigned long vaddr, npages;
355
356			vaddr = s->dma_address & PAGE_MASK;
357			npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
358				>> PAGE_SHIFT;
359			__iommu_free(tbl, vaddr, npages);
360		}
361	}
362	spin_unlock_irqrestore(&(tbl->it_lock), flags);
363	return 0;
364}
365
366
367void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
368		int nelems, enum dma_data_direction direction)
369{
370	unsigned long flags;
371
372	BUG_ON(direction == DMA_NONE);
373
374	if (!tbl)
375		return;
376
377	spin_lock_irqsave(&(tbl->it_lock), flags);
378
379	while (nelems--) {
380		unsigned int npages;
381		dma_addr_t dma_handle = sglist->dma_address;
382
383		if (sglist->dma_length == 0)
384			break;
385		npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
386			  - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
387		__iommu_free(tbl, dma_handle, npages);
388		sglist++;
389	}
390
391	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
392	 * do not do an mb() here, the affected platforms do not need it
393	 * when freeing.
394	 */
395	if (ppc_md.tce_flush)
396		ppc_md.tce_flush(tbl);
397
398	spin_unlock_irqrestore(&(tbl->it_lock), flags);
399}
400
401/*
402 * Build a iommu_table structure.  This contains a bit map which
403 * is used to manage allocation of the tce space.
404 */
405struct iommu_table *iommu_init_table(struct iommu_table *tbl)
406{
407	unsigned long sz;
408	static int welcomed = 0;
409
410	/* Set aside 1/4 of the table for large allocations. */
411	tbl->it_halfpoint = tbl->it_size * 3 / 4;
412
413	/* number of bytes needed for the bitmap */
414	sz = (tbl->it_size + 7) >> 3;
415
416	tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
417	if (!tbl->it_map)
418		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
419
420	memset(tbl->it_map, 0, sz);
421
422	tbl->it_hint = 0;
423	tbl->it_largehint = tbl->it_halfpoint;
424	spin_lock_init(&tbl->it_lock);
425
426	if (!welcomed) {
427		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
428		       novmerge ? "disabled" : "enabled");
429		welcomed = 1;
430	}
431
432	return tbl;
433}
434
435void iommu_free_table(struct device_node *dn)
436{
437	struct iommu_table *tbl = dn->iommu_table;
438	unsigned long bitmap_sz, i;
439	unsigned int order;
440
441	if (!tbl || !tbl->it_map) {
442		printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
443				dn->full_name);
444		return;
445	}
446
447	/* verify that table contains no entries */
448	/* it_size is in entries, and we're examining 64 at a time */
449	for (i = 0; i < (tbl->it_size/64); i++) {
450		if (tbl->it_map[i] != 0) {
451			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
452				__FUNCTION__, dn->full_name);
453			break;
454		}
455	}
456
457	/* calculate bitmap size in bytes */
458	bitmap_sz = (tbl->it_size + 7) / 8;
459
460	/* free bitmap */
461	order = get_order(bitmap_sz);
462	free_pages((unsigned long) tbl->it_map, order);
463
464	/* free table */
465	kfree(tbl);
466}
467
468/* Creates TCEs for a user provided buffer.  The user buffer must be
469 * contiguous real kernel storage (not vmalloc).  The address of the buffer
470 * passed here is the kernel (virtual) address of the buffer.  The buffer
471 * need not be page aligned, the dma_addr_t returned will point to the same
472 * byte within the page as vaddr.
473 */
474dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
475		size_t size, enum dma_data_direction direction)
476{
477	dma_addr_t dma_handle = DMA_ERROR_CODE;
478	unsigned long uaddr;
479	unsigned int npages;
480
481	BUG_ON(direction == DMA_NONE);
482
483	uaddr = (unsigned long)vaddr;
484	npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
485	npages >>= PAGE_SHIFT;
486
487	if (tbl) {
488		dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
489		if (dma_handle == DMA_ERROR_CODE) {
490			if (printk_ratelimit())  {
491				printk(KERN_INFO "iommu_alloc failed, "
492						"tbl %p vaddr %p npages %d\n",
493						tbl, vaddr, npages);
494			}
495		} else
496			dma_handle |= (uaddr & ~PAGE_MASK);
497	}
498
499	return dma_handle;
500}
501
502void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
503		size_t size, enum dma_data_direction direction)
504{
505	BUG_ON(direction == DMA_NONE);
506
507	if (tbl)
508		iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
509					(dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
510}
511
512/* Allocates a contiguous real buffer and creates mappings over it.
513 * Returns the virtual address of the buffer and sets dma_handle
514 * to the dma address (mapping) of the first page.
515 */
516void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
517		dma_addr_t *dma_handle, unsigned int __nocast flag)
518{
519	void *ret = NULL;
520	dma_addr_t mapping;
521	unsigned int npages, order;
522
523	size = PAGE_ALIGN(size);
524	npages = size >> PAGE_SHIFT;
525	order = get_order(size);
526
527 	/*
528	 * Client asked for way too much space.  This is checked later
529	 * anyway.  It is easier to debug here for the drivers than in
530	 * the tce tables.
531	 */
532	if (order >= IOMAP_MAX_ORDER) {
533		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
534		return NULL;
535	}
536
537	if (!tbl)
538		return NULL;
539
540	/* Alloc enough pages (and possibly more) */
541	ret = (void *)__get_free_pages(flag, order);
542	if (!ret)
543		return NULL;
544	memset(ret, 0, size);
545
546	/* Set up tces to cover the allocated range */
547	mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
548	if (mapping == DMA_ERROR_CODE) {
549		free_pages((unsigned long)ret, order);
550		ret = NULL;
551	} else
552		*dma_handle = mapping;
553	return ret;
554}
555
556void iommu_free_coherent(struct iommu_table *tbl, size_t size,
557			 void *vaddr, dma_addr_t dma_handle)
558{
559	unsigned int npages;
560
561	if (tbl) {
562		size = PAGE_ALIGN(size);
563		npages = size >> PAGE_SHIFT;
564		iommu_free(tbl, dma_handle, npages);
565		free_pages((unsigned long)vaddr, get_order(size));
566	}
567}