PageRenderTime 40ms CodeModel.GetById 9ms app.highlight 24ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/sh/mm/init.c

https://bitbucket.org/evzijst/gittest
C | 313 lines | 200 code | 44 blank | 69 comment | 20 complexity | e689729fd9f07861e54062f60900da70 MD5 | raw file
  1/* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $
  2 *
  3 *  linux/arch/sh/mm/init.c
  4 *
  5 *  Copyright (C) 1999  Niibe Yutaka
  6 *  Copyright (C) 2002, 2004  Paul Mundt
  7 *
  8 *  Based on linux/arch/i386/mm/init.c:
  9 *   Copyright (C) 1995  Linus Torvalds
 10 */
 11
 12#include <linux/config.h>
 13#include <linux/signal.h>
 14#include <linux/sched.h>
 15#include <linux/kernel.h>
 16#include <linux/errno.h>
 17#include <linux/string.h>
 18#include <linux/types.h>
 19#include <linux/ptrace.h>
 20#include <linux/mman.h>
 21#include <linux/mm.h>
 22#include <linux/swap.h>
 23#include <linux/smp.h>
 24#include <linux/init.h>
 25#include <linux/highmem.h>
 26#include <linux/bootmem.h>
 27#include <linux/pagemap.h>
 28
 29#include <asm/processor.h>
 30#include <asm/system.h>
 31#include <asm/uaccess.h>
 32#include <asm/pgtable.h>
 33#include <asm/pgalloc.h>
 34#include <asm/mmu_context.h>
 35#include <asm/io.h>
 36#include <asm/tlb.h>
 37#include <asm/cacheflush.h>
 38#include <asm/cache.h>
 39
 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 41pgd_t swapper_pg_dir[PTRS_PER_PGD];
 42
 43/*
 44 * Cache of MMU context last used.
 45 */
 46unsigned long mmu_context_cache = NO_CONTEXT;
 47
 48#ifdef CONFIG_MMU
 49/* It'd be good if these lines were in the standard header file. */
 50#define START_PFN	(NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
 51#define MAX_LOW_PFN	(NODE_DATA(0)->bdata->node_low_pfn)
 52#endif
 53
 54#ifdef CONFIG_DISCONTIGMEM
 55pg_data_t discontig_page_data[MAX_NUMNODES];
 56bootmem_data_t discontig_node_bdata[MAX_NUMNODES];
 57#endif
 58
 59void (*copy_page)(void *from, void *to);
 60void (*clear_page)(void *to);
 61
 62void show_mem(void)
 63{
 64	int i, total = 0, reserved = 0;
 65	int shared = 0, cached = 0;
 66
 67	printk("Mem-info:\n");
 68	show_free_areas();
 69	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 70	i = max_mapnr;
 71	while (i-- > 0) {
 72		total++;
 73		if (PageReserved(mem_map+i))
 74			reserved++;
 75		else if (PageSwapCache(mem_map+i))
 76			cached++;
 77		else if (page_count(mem_map+i))
 78			shared += page_count(mem_map+i) - 1;
 79	}
 80	printk("%d pages of RAM\n",total);
 81	printk("%d reserved pages\n",reserved);
 82	printk("%d pages shared\n",shared);
 83	printk("%d pages swap cached\n",cached);
 84}
 85
 86static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 87{
 88	pgd_t *pgd;
 89	pmd_t *pmd;
 90	pte_t *pte;
 91
 92	pgd = swapper_pg_dir + pgd_index(addr);
 93	if (pgd_none(*pgd)) {
 94		pgd_ERROR(*pgd);
 95		return;
 96	}
 97
 98	pmd = pmd_offset(pgd, addr);
 99	if (pmd_none(*pmd)) {
100		pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
101		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
102		if (pte != pte_offset_kernel(pmd, 0)) {
103			pmd_ERROR(*pmd);
104			return;
105		}
106	}
107
108	pte = pte_offset_kernel(pmd, addr);
109	if (!pte_none(*pte)) {
110		pte_ERROR(*pte);
111		return;
112	}
113
114	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
115
116	__flush_tlb_page(get_asid(), addr);
117}
118
119/*
120 * As a performance optimization, other platforms preserve the fixmap mapping
121 * across a context switch, we don't presently do this, but this could be done
122 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
123 * of the memorry mapped UTLB configuration) -- this unfortunately forces us to
124 * give up a TLB entry for each mapping we want to preserve. While this may be
125 * viable for a small number of fixmaps, it's not particularly useful for
126 * everything and needs to be carefully evaluated. (ie, we may want this for
127 * the vsyscall page).
128 *
129 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
130 * in at __set_fixmap() time to determine the appropriate behavior to follow.
131 *
132 *					 -- PFM.
133 */
134void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
135{
136	unsigned long address = __fix_to_virt(idx);
137
138	if (idx >= __end_of_fixed_addresses) {
139		BUG();
140		return;
141	}
142
143	set_pte_phys(address, phys, prot);
144}
145
146/* References to section boundaries */
147
148extern char _text, _etext, _edata, __bss_start, _end;
149extern char __init_begin, __init_end;
150
151/*
152 * paging_init() sets up the page tables
153 *
154 * This routines also unmaps the page at virtual kernel address 0, so
155 * that we can trap those pesky NULL-reference errors in the kernel.
156 */
157void __init paging_init(void)
158{
159	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
160
161	/*
162	 * Setup some defaults for the zone sizes.. these should be safe
163	 * regardless of distcontiguous memory or MMU settings.
164	 */
165	zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
166	zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT;
167#ifdef CONFIG_HIGHMEM
168	zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT;
169#endif
170
171#ifdef CONFIG_MMU
172	/*
173	 * If we have an MMU, and want to be using it .. we need to adjust
174	 * the zone sizes accordingly, in addition to turning it on.
175	 */
176	{
177		unsigned long max_dma, low, start_pfn;
178		pgd_t *pg_dir;
179		int i;
180
181		/* We don't need kernel mapping as hardware support that. */
182		pg_dir = swapper_pg_dir;
183
184		for (i = 0; i < PTRS_PER_PGD; i++)
185			pgd_val(pg_dir[i]) = 0;
186
187		/* Turn on the MMU */
188		enable_mmu();
189
190		/* Fixup the zone sizes */
191		start_pfn = START_PFN;
192		max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
193		low = MAX_LOW_PFN;
194
195		if (low < max_dma) {
196			zones_size[ZONE_DMA] = low - start_pfn;
197			zones_size[ZONE_NORMAL] = 0;
198		} else {
199			zones_size[ZONE_DMA] = max_dma - start_pfn;
200			zones_size[ZONE_NORMAL] = low - max_dma;
201		}
202	}
203
204#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
205	/*
206	 * If we don't have CONFIG_MMU set and the processor in question
207	 * still has an MMU, care needs to be taken to make sure it doesn't
208	 * stay on.. Since the boot loader could have potentially already
209	 * turned it on, and we clearly don't want it, we simply turn it off.
210	 *
211	 * We don't need to do anything special for the zone sizes, since the
212	 * default values that were already configured up above should be
213	 * satisfactory.
214	 */
215	disable_mmu();
216#endif
217	NODE_DATA(0)->node_mem_map = NULL;
218	free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
219
220#ifdef CONFIG_DISCONTIGMEM
221	/*
222	 * And for discontig, do some more fixups on the zone sizes..
223	 */
224	zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
225	zones_size[ZONE_NORMAL] = 0;
226	free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
227#endif
228}
229
230void __init mem_init(void)
231{
232	extern unsigned long empty_zero_page[1024];
233	int codesize, reservedpages, datasize, initsize;
234	int tmp;
235	extern unsigned long memory_start;
236
237#ifdef CONFIG_MMU
238	high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
239#else
240	extern unsigned long memory_end;
241
242	high_memory = (void *)(memory_end & PAGE_MASK);
243#endif
244
245	max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start);
246
247	/* clear the zero-page */
248	memset(empty_zero_page, 0, PAGE_SIZE);
249	__flush_wback_region(empty_zero_page, PAGE_SIZE);
250
251	/* 
252	 * Setup wrappers for copy/clear_page(), these will get overridden
253	 * later in the boot process if a better method is available.
254	 */
255	copy_page = copy_page_slow;
256	clear_page = clear_page_slow;
257
258	/* this will put all low memory onto the freelists */
259	totalram_pages += free_all_bootmem_node(NODE_DATA(0));
260#ifdef CONFIG_DISCONTIGMEM
261	totalram_pages += free_all_bootmem_node(NODE_DATA(1));
262#endif
263	reservedpages = 0;
264	for (tmp = 0; tmp < num_physpages; tmp++)
265		/*
266		 * Only count reserved RAM pages
267		 */
268		if (PageReserved(mem_map+tmp))
269			reservedpages++;
270
271	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
272	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
273	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
274
275	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
276		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
277		max_mapnr << (PAGE_SHIFT-10),
278		codesize >> 10,
279		reservedpages << (PAGE_SHIFT-10),
280		datasize >> 10,
281		initsize >> 10);
282
283	p3_cache_init();
284}
285
286void free_initmem(void)
287{
288	unsigned long addr;
289	
290	addr = (unsigned long)(&__init_begin);
291	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
292		ClearPageReserved(virt_to_page(addr));
293		set_page_count(virt_to_page(addr), 1);
294		free_page(addr);
295		totalram_pages++;
296	}
297	printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
298}
299
300#ifdef CONFIG_BLK_DEV_INITRD
301void free_initrd_mem(unsigned long start, unsigned long end)
302{
303	unsigned long p;
304	for (p = start; p < end; p += PAGE_SIZE) {
305		ClearPageReserved(virt_to_page(p));
306		set_page_count(virt_to_page(p), 1);
307		free_page(p);
308		totalram_pages++;
309	}
310	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
311}
312#endif
313