PageRenderTime 52ms CodeModel.GetById 17ms app.highlight 29ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/sparc/mm/generic.c

https://bitbucket.org/evzijst/gittest
C | 154 lines | 127 code | 15 blank | 12 comment | 16 complexity | a3498a06f8b96197bff82abdc53db114 MD5 | raw file
  1/* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
  2 * generic.c: Generic Sparc mm routines that are not dependent upon
  3 *            MMU type but are Sparc specific.
  4 *
  5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 10#include <linux/swap.h>
 11#include <linux/pagemap.h>
 12
 13#include <asm/pgalloc.h>
 14#include <asm/pgtable.h>
 15#include <asm/page.h>
 16#include <asm/cacheflush.h>
 17#include <asm/tlbflush.h>
 18
 19static inline void forget_pte(pte_t page)
 20{
 21#if 0 /* old 2.4 code */
 22	if (pte_none(page))
 23		return;
 24	if (pte_present(page)) {
 25		unsigned long pfn = pte_pfn(page);
 26		struct page *ptpage;
 27		if (!pfn_valid(pfn))
 28			return;
 29		ptpage = pfn_to_page(pfn);
 30		if (PageReserved(ptpage))
 31			return;
 32		page_cache_release(ptpage);
 33		return;
 34	}
 35	swap_free(pte_to_swp_entry(page));
 36#else
 37	if (!pte_none(page)) {
 38		printk("forget_pte: old mapping existed!\n");
 39		BUG();
 40	}
 41#endif
 42}
 43
 44/* Remap IO memory, the same way as remap_pfn_range(), but use
 45 * the obio memory space.
 46 *
 47 * They use a pgprot that sets PAGE_IO and does not check the
 48 * mem_map table as this is independent of normal memory.
 49 */
 50static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
 51	unsigned long offset, pgprot_t prot, int space)
 52{
 53	unsigned long end;
 54
 55	address &= ~PMD_MASK;
 56	end = address + size;
 57	if (end > PMD_SIZE)
 58		end = PMD_SIZE;
 59	do {
 60		pte_t oldpage = *pte;
 61		pte_clear(mm, address, pte);
 62		set_pte(pte, mk_pte_io(offset, prot, space));
 63		forget_pte(oldpage);
 64		address += PAGE_SIZE;
 65		offset += PAGE_SIZE;
 66		pte++;
 67	} while (address < end);
 68}
 69
 70static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
 71	unsigned long offset, pgprot_t prot, int space)
 72{
 73	unsigned long end;
 74
 75	address &= ~PGDIR_MASK;
 76	end = address + size;
 77	if (end > PGDIR_SIZE)
 78		end = PGDIR_SIZE;
 79	offset -= address;
 80	do {
 81		pte_t * pte = pte_alloc_map(mm, pmd, address);
 82		if (!pte)
 83			return -ENOMEM;
 84		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
 85		address = (address + PMD_SIZE) & PMD_MASK;
 86		pmd++;
 87	} while (address < end);
 88	return 0;
 89}
 90
 91int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
 92{
 93	int error = 0;
 94	pgd_t * dir;
 95	unsigned long beg = from;
 96	unsigned long end = from + size;
 97	struct mm_struct *mm = vma->vm_mm;
 98
 99	prot = __pgprot(pg_iobits);
100	offset -= from;
101	dir = pgd_offset(mm, from);
102	flush_cache_range(vma, beg, end);
103
104	spin_lock(&mm->page_table_lock);
105	while (from < end) {
106		pmd_t *pmd = pmd_alloc(current->mm, dir, from);
107		error = -ENOMEM;
108		if (!pmd)
109			break;
110		error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
111		if (error)
112			break;
113		from = (from + PGDIR_SIZE) & PGDIR_MASK;
114		dir++;
115	}
116	spin_unlock(&mm->page_table_lock);
117
118	flush_tlb_range(vma, beg, end);
119	return error;
120}
121
122int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
123			unsigned long pfn, unsigned long size, pgprot_t prot)
124{
125	int error = 0;
126	pgd_t * dir;
127	unsigned long beg = from;
128	unsigned long end = from + size;
129	struct mm_struct *mm = vma->vm_mm;
130	int space = GET_IOSPACE(pfn);
131	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
132
133	prot = __pgprot(pg_iobits);
134	offset -= from;
135	dir = pgd_offset(mm, from);
136	flush_cache_range(vma, beg, end);
137
138	spin_lock(&mm->page_table_lock);
139	while (from < end) {
140		pmd_t *pmd = pmd_alloc(current->mm, dir, from);
141		error = -ENOMEM;
142		if (!pmd)
143			break;
144		error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
145		if (error)
146			break;
147		from = (from + PGDIR_SIZE) & PGDIR_MASK;
148		dir++;
149	}
150	spin_unlock(&mm->page_table_lock);
151
152	flush_tlb_range(vma, beg, end);
153	return error;
154}