PageRenderTime 27ms CodeModel.GetById 7ms app.highlight 14ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/parisc/mm/ioremap.c

https://github.com/aicjofs/android_kernel_lge_v500_20d
C | 99 lines | 56 code | 14 blank | 29 comment | 11 complexity | 76a8c73bac69162aca9ee315821b147c MD5 | raw file
 1/*
 2 * arch/parisc/mm/ioremap.c
 3 *
 4 * (C) Copyright 1995 1996 Linus Torvalds
 5 * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
 6 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
 7 */
 8
 9#include <linux/vmalloc.h>
10#include <linux/errno.h>
11#include <linux/module.h>
12#include <linux/io.h>
13#include <asm/pgalloc.h>
14
15/*
16 * Generic mapping function (not visible outside):
17 */
18
19/*
20 * Remap an arbitrary physical address space into the kernel virtual
21 * address space.
22 *
23 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
24 * have to convert them into an offset in a page-aligned mapping, but the
25 * caller shouldn't need to know that small detail.
26 */
27void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
28{
29	void __iomem *addr;
30	struct vm_struct *area;
31	unsigned long offset, last_addr;
32	pgprot_t pgprot;
33
34#ifdef CONFIG_EISA
35	unsigned long end = phys_addr + size - 1;
36	/* Support EISA addresses */
37	if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
38	    (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
39		phys_addr |= F_EXTEND(0xfc000000);
40		flags |= _PAGE_NO_CACHE;
41	}
42#endif
43
44	/* Don't allow wraparound or zero size */
45	last_addr = phys_addr + size - 1;
46	if (!size || last_addr < phys_addr)
47		return NULL;
48
49	/*
50	 * Don't allow anybody to remap normal RAM that we're using..
51	 */
52	if (phys_addr < virt_to_phys(high_memory)) {
53		char *t_addr, *t_end;
54		struct page *page;
55
56		t_addr = __va(phys_addr);
57		t_end = t_addr + (size - 1);
58	   
59		for (page = virt_to_page(t_addr); 
60		     page <= virt_to_page(t_end); page++) {
61			if(!PageReserved(page))
62				return NULL;
63		}
64	}
65
66	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
67			  _PAGE_ACCESSED | flags);
68
69	/*
70	 * Mappings have to be page-aligned
71	 */
72	offset = phys_addr & ~PAGE_MASK;
73	phys_addr &= PAGE_MASK;
74	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
75
76	/*
77	 * Ok, go for it..
78	 */
79	area = get_vm_area(size, VM_IOREMAP);
80	if (!area)
81		return NULL;
82
83	addr = (void __iomem *) area->addr;
84	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
85			       phys_addr, pgprot)) {
86		vfree(addr);
87		return NULL;
88	}
89
90	return (void __iomem *) (offset + (char __iomem *)addr);
91}
92EXPORT_SYMBOL(__ioremap);
93
94void iounmap(const volatile void __iomem *addr)
95{
96	if (addr > high_memory)
97		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
98}
99EXPORT_SYMBOL(iounmap);