PageRenderTime 25ms CodeModel.GetById 18ms app.highlight 3ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/unicore32/include/asm/pgtable.h

http://github.com/mirrors/linux
C Header | 287 lines | 159 code | 48 blank | 80 comment | 5 complexity | 105b56a7221e8ed49eed0eeaf495a7b6 MD5 | raw file
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * linux/arch/unicore32/include/asm/pgtable.h
  4 *
  5 * Code specific to PKUnity SoC and UniCore ISA
  6 *
  7 * Copyright (C) 2001-2010 GUAN Xue-tao
  8 */
  9#ifndef __UNICORE_PGTABLE_H__
 10#define __UNICORE_PGTABLE_H__
 11
 12#define __ARCH_USE_5LEVEL_HACK
 13#include <asm-generic/pgtable-nopmd.h>
 14#include <asm/cpu-single.h>
 15
 16#include <asm/memory.h>
 17#include <asm/pgtable-hwdef.h>
 18
 19/*
 20 * Just any arbitrary offset to the start of the vmalloc VM area: the
 21 * current 8MB value just means that there will be a 8MB "hole" after the
 22 * physical memory until the kernel virtual memory starts.  That means that
 23 * any out-of-bounds memory accesses will hopefully be caught.
 24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 25 * area for the same reason. ;)
 26 *
 27 * Note that platforms may override VMALLOC_START, but they must provide
 28 * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
 29 * which may not overlap IO space.
 30 */
 31#ifndef VMALLOC_START
 32#define VMALLOC_OFFSET		SZ_8M
 33#define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) \
 34					& ~(VMALLOC_OFFSET-1))
 35#define VMALLOC_END		(0xff000000UL)
 36#endif
 37
 38#define PTRS_PER_PTE		1024
 39#define PTRS_PER_PGD		1024
 40
 41/*
 42 * PGDIR_SHIFT determines what a third-level page table entry can map
 43 */
 44#define PGDIR_SHIFT		22
 45
 46#ifndef __ASSEMBLY__
 47extern void __pte_error(const char *file, int line, unsigned long val);
 48extern void __pgd_error(const char *file, int line, unsigned long val);
 49
 50#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 51#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
 52#endif /* !__ASSEMBLY__ */
 53
 54#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
 55#define PGDIR_MASK		(~(PGDIR_SIZE-1))
 56
 57/*
 58 * This is the lowest virtual address we can permit any user space
 59 * mapping to be mapped at.  This is particularly important for
 60 * non-high vector CPUs.
 61 */
 62#define FIRST_USER_ADDRESS	PAGE_SIZE
 63
 64#define FIRST_USER_PGD_NR	1
 65#define USER_PTRS_PER_PGD	((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
 66
 67/*
 68 * section address mask and size definitions.
 69 */
 70#define SECTION_SHIFT		22
 71#define SECTION_SIZE		(1UL << SECTION_SHIFT)
 72#define SECTION_MASK		(~(SECTION_SIZE-1))
 73
 74#ifndef __ASSEMBLY__
 75
 76/*
 77 * The pgprot_* and protection_map entries will be fixed up in runtime
 78 * to include the cachable bits based on memory policy, as well as any
 79 * architecture dependent bits.
 80 */
 81#define _PTE_DEFAULT		(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE)
 82
 83extern pgprot_t pgprot_user;
 84extern pgprot_t pgprot_kernel;
 85
 86#define PAGE_NONE		pgprot_user
 87#define PAGE_SHARED		__pgprot(pgprot_val(pgprot_user | PTE_READ \
 88								| PTE_WRITE))
 89#define PAGE_SHARED_EXEC	__pgprot(pgprot_val(pgprot_user | PTE_READ \
 90								| PTE_WRITE \
 91								| PTE_EXEC))
 92#define PAGE_COPY		__pgprot(pgprot_val(pgprot_user | PTE_READ)
 93#define PAGE_COPY_EXEC		__pgprot(pgprot_val(pgprot_user | PTE_READ \
 94								| PTE_EXEC))
 95#define PAGE_READONLY		__pgprot(pgprot_val(pgprot_user | PTE_READ))
 96#define PAGE_READONLY_EXEC	__pgprot(pgprot_val(pgprot_user | PTE_READ \
 97								| PTE_EXEC))
 98#define PAGE_KERNEL		pgprot_kernel
 99#define PAGE_KERNEL_EXEC	__pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
100
101#define __PAGE_NONE		__pgprot(_PTE_DEFAULT)
102#define __PAGE_SHARED		__pgprot(_PTE_DEFAULT | PTE_READ \
103							| PTE_WRITE)
104#define __PAGE_SHARED_EXEC	__pgprot(_PTE_DEFAULT | PTE_READ \
105							| PTE_WRITE \
106							| PTE_EXEC)
107#define __PAGE_COPY		__pgprot(_PTE_DEFAULT | PTE_READ)
108#define __PAGE_COPY_EXEC	__pgprot(_PTE_DEFAULT | PTE_READ \
109							| PTE_EXEC)
110#define __PAGE_READONLY		__pgprot(_PTE_DEFAULT | PTE_READ)
111#define __PAGE_READONLY_EXEC	__pgprot(_PTE_DEFAULT | PTE_READ \
112							| PTE_EXEC)
113
114#endif /* __ASSEMBLY__ */
115
116/*
117 * The table below defines the page protection levels that we insert into our
118 * Linux page table version.  These get translated into the best that the
119 * architecture can perform.  Note that on UniCore hardware:
120 *  1) We cannot do execute protection
121 *  2) If we could do execute protection, then read is implied
122 *  3) write implies read permissions
123 */
124#define __P000  __PAGE_NONE
125#define __P001  __PAGE_READONLY
126#define __P010  __PAGE_COPY
127#define __P011  __PAGE_COPY
128#define __P100  __PAGE_READONLY_EXEC
129#define __P101  __PAGE_READONLY_EXEC
130#define __P110  __PAGE_COPY_EXEC
131#define __P111  __PAGE_COPY_EXEC
132
133#define __S000  __PAGE_NONE
134#define __S001  __PAGE_READONLY
135#define __S010  __PAGE_SHARED
136#define __S011  __PAGE_SHARED
137#define __S100  __PAGE_READONLY_EXEC
138#define __S101  __PAGE_READONLY_EXEC
139#define __S110  __PAGE_SHARED_EXEC
140#define __S111  __PAGE_SHARED_EXEC
141
142#ifndef __ASSEMBLY__
143/*
144 * ZERO_PAGE is a global shared page that is always zero: used
145 * for zero-mapped memory areas etc..
146 */
147extern struct page *empty_zero_page;
148#define ZERO_PAGE(vaddr)		(empty_zero_page)
149
150#define pte_pfn(pte)			(pte_val(pte) >> PAGE_SHIFT)
151#define pfn_pte(pfn, prot)		(__pte(((pfn) << PAGE_SHIFT) \
152						| pgprot_val(prot)))
153
154#define pte_none(pte)			(!pte_val(pte))
155#define pte_clear(mm, addr, ptep)	set_pte(ptep, __pte(0))
156#define pte_page(pte)			(pfn_to_page(pte_pfn(pte)))
157#define pte_offset_kernel(dir, addr)	(pmd_page_vaddr(*(dir)) \
158						+ __pte_index(addr))
159
160#define pte_offset_map(dir, addr)	(pmd_page_vaddr(*(dir)) \
161						+ __pte_index(addr))
162#define pte_unmap(pte)			do { } while (0)
163
164#define set_pte(ptep, pte)	cpu_set_pte(ptep, pte)
165
166#define set_pte_at(mm, addr, ptep, pteval)	\
167	do {					\
168		set_pte(ptep, pteval);          \
169	} while (0)
170
171/*
172 * The following only work if pte_present() is true.
173 * Undefined behaviour if not..
174 */
175#define pte_present(pte)	(pte_val(pte) & PTE_PRESENT)
176#define pte_write(pte)		(pte_val(pte) & PTE_WRITE)
177#define pte_dirty(pte)		(pte_val(pte) & PTE_DIRTY)
178#define pte_young(pte)		(pte_val(pte) & PTE_YOUNG)
179#define pte_exec(pte)		(pte_val(pte) & PTE_EXEC)
180
181#define PTE_BIT_FUNC(fn, op) \
182static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
183
184PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE);
185PTE_BIT_FUNC(mkwrite,   |= PTE_WRITE);
186PTE_BIT_FUNC(mkclean,   &= ~PTE_DIRTY);
187PTE_BIT_FUNC(mkdirty,   |= PTE_DIRTY);
188PTE_BIT_FUNC(mkold,     &= ~PTE_YOUNG);
189PTE_BIT_FUNC(mkyoung,   |= PTE_YOUNG);
190
191/*
192 * Mark the prot value as uncacheable.
193 */
194#define pgprot_noncached(prot)		\
195	__pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
196#define pgprot_writecombine(prot)	\
197	__pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
198
199#define pmd_none(pmd)		(!pmd_val(pmd))
200#define pmd_present(pmd)	(pmd_val(pmd) & PMD_PRESENT)
201#define pmd_bad(pmd)		(((pmd_val(pmd) &		\
202				(PMD_PRESENT | PMD_TYPE_MASK))	\
203				!= (PMD_PRESENT | PMD_TYPE_TABLE)))
204
205#define set_pmd(pmdpd, pmdval)		\
206	do {				\
207		*(pmdpd) = pmdval;	\
208	} while (0)
209
210#define pmd_clear(pmdp)			\
211	do {				\
212		set_pmd(pmdp, __pmd(0));\
213		clean_pmd_entry(pmdp);	\
214	} while (0)
215
216#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
217#define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
218
219/*
220 * Conversion functions: convert a page and protection to a page entry,
221 * and a page entry and page directory to the page they refer to.
222 */
223#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
224
225/* to find an entry in a page-table-directory */
226#define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
227
228#define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
229
230/* to find an entry in a kernel page-table-directory */
231#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
232
233/* Find an entry in the third-level page table.. */
234#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
235
236static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
237{
238	const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
239	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
240	return pte;
241}
242
243extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
244
245/*
246 * Encode and decode a swap entry.  Swap entries are stored in the Linux
247 * page tables as follows:
248 *
249 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
250 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
251 *   <--------------- offset --------------> <--- type --> 0 0 0 0 0
252 *
253 * This gives us up to 127 swap files and 32GB per swap file.  Note that
254 * the offset field is always non-zero.
255 */
256#define __SWP_TYPE_SHIFT	5
257#define __SWP_TYPE_BITS		7
258#define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
259#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
260
261#define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT)		\
262				& __SWP_TYPE_MASK)
263#define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
264#define __swp_entry(type, offset) ((swp_entry_t) {			\
265				((type) << __SWP_TYPE_SHIFT) |		\
266				((offset) << __SWP_OFFSET_SHIFT) })
267
268#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
269#define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
270
271/*
272 * It is an error for the kernel to have more swap files than we can
273 * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
274 * is increased beyond what we presently support.
275 */
276#define MAX_SWAPFILES_CHECK()	\
277	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
278
279/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
280/* FIXME: this is not correct */
281#define kern_addr_valid(addr)	(1)
282
283#include <asm-generic/pgtable.h>
284
285#endif /* !__ASSEMBLY__ */
286
287#endif /* __UNICORE_PGTABLE_H__ */