PageRenderTime 41ms CodeModel.GetById 17ms app.highlight 18ms RepoModel.GetById 2ms app.codeStats 0ms

/Ethereal-msm8939-beta9/arch/metag/mm/init.c

https://bitbucket.org/MilosStamenkovic95/etherealos
C | 430 lines | 272 code | 91 blank | 67 comment | 21 complexity | 3662ac8b9ca6172b4b32d62cc9e08af8 MD5 | raw file
  1/*
  2 *  Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
  3 *
  4 */
  5
  6#include <linux/export.h>
  7#include <linux/mm.h>
  8#include <linux/swap.h>
  9#include <linux/init.h>
 10#include <linux/bootmem.h>
 11#include <linux/pagemap.h>
 12#include <linux/percpu.h>
 13#include <linux/memblock.h>
 14#include <linux/initrd.h>
 15#include <linux/of_fdt.h>
 16
 17#include <asm/setup.h>
 18#include <asm/page.h>
 19#include <asm/pgalloc.h>
 20#include <asm/mmu.h>
 21#include <asm/mmu_context.h>
 22#include <asm/sections.h>
 23#include <asm/tlb.h>
 24#include <asm/user_gateway.h>
 25#include <asm/mmzone.h>
 26#include <asm/fixmap.h>
 27
 28unsigned long pfn_base;
 29EXPORT_SYMBOL(pfn_base);
 30
 31pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
 32
 33unsigned long empty_zero_page;
 34EXPORT_SYMBOL(empty_zero_page);
 35
 36extern char __user_gateway_start;
 37extern char __user_gateway_end;
 38
 39void *gateway_page;
 40
 41/*
 42 * Insert the gateway page into a set of page tables, creating the
 43 * page tables if necessary.
 44 */
 45static void insert_gateway_page(pgd_t *pgd, unsigned long address)
 46{
 47	pud_t *pud;
 48	pmd_t *pmd;
 49	pte_t *pte;
 50
 51	BUG_ON(!pgd_present(*pgd));
 52
 53	pud = pud_offset(pgd, address);
 54	BUG_ON(!pud_present(*pud));
 55
 56	pmd = pmd_offset(pud, address);
 57	if (!pmd_present(*pmd)) {
 58		pte = alloc_bootmem_pages(PAGE_SIZE);
 59		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
 60	}
 61
 62	pte = pte_offset_kernel(pmd, address);
 63	set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
 64}
 65
 66/* Alloc and map a page in a known location accessible to userspace. */
 67static void __init user_gateway_init(void)
 68{
 69	unsigned long address = USER_GATEWAY_PAGE;
 70	int offset = pgd_index(address);
 71	pgd_t *pgd;
 72
 73	gateway_page = alloc_bootmem_pages(PAGE_SIZE);
 74
 75	pgd = swapper_pg_dir + offset;
 76	insert_gateway_page(pgd, address);
 77
 78#ifdef CONFIG_METAG_META12
 79	/*
 80	 * Insert the gateway page into our current page tables even
 81	 * though we've already inserted it into our reference page
 82	 * table (swapper_pg_dir). This is because with a META1 mmu we
 83	 * copy just the user address range and not the gateway page
 84	 * entry on context switch, see switch_mmu().
 85	 */
 86	pgd = (pgd_t *)mmu_get_base() + offset;
 87	insert_gateway_page(pgd, address);
 88#endif /* CONFIG_METAG_META12 */
 89
 90	BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
 91
 92	gateway_page += (address & ~PAGE_MASK);
 93
 94	memcpy(gateway_page, &__user_gateway_start,
 95	       &__user_gateway_end - &__user_gateway_start);
 96
 97	/*
 98	 * We don't need to flush the TLB here, there should be no mapping
 99	 * present at boot for this address and only valid mappings are in
100	 * the TLB (apart from on Meta 1.x, but those cached invalid
101	 * mappings should be impossible to hit here).
102	 *
103	 * We don't flush the code cache here even though we have written
104	 * code through the data cache and they may not be coherent. At
105	 * this point we assume there is no stale data in the code cache
106	 * for this address so there is no need to flush.
107	 */
108}
109
110static void __init allocate_pgdat(unsigned int nid)
111{
112	unsigned long start_pfn, end_pfn;
113#ifdef CONFIG_NEED_MULTIPLE_NODES
114	unsigned long phys;
115#endif
116
117	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
118
119#ifdef CONFIG_NEED_MULTIPLE_NODES
120	phys = __memblock_alloc_base(sizeof(struct pglist_data),
121				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
122	/* Retry with all of system memory */
123	if (!phys)
124		phys = __memblock_alloc_base(sizeof(struct pglist_data),
125					     SMP_CACHE_BYTES,
126					     memblock_end_of_DRAM());
127	if (!phys)
128		panic("Can't allocate pgdat for node %d\n", nid);
129
130	NODE_DATA(nid) = __va(phys);
131	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
132
133	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
134#endif
135
136	NODE_DATA(nid)->node_start_pfn = start_pfn;
137	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
138}
139
140static void __init bootmem_init_one_node(unsigned int nid)
141{
142	unsigned long total_pages, paddr;
143	unsigned long end_pfn;
144	struct pglist_data *p;
145
146	p = NODE_DATA(nid);
147
148	/* Nothing to do.. */
149	if (!p->node_spanned_pages)
150		return;
151
152	end_pfn = p->node_start_pfn + p->node_spanned_pages;
153#ifdef CONFIG_HIGHMEM
154	if (end_pfn > max_low_pfn)
155		end_pfn = max_low_pfn;
156#endif
157
158	total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
159
160	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
161	if (!paddr)
162		panic("Can't allocate bootmap for nid[%d]\n", nid);
163
164	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
165
166	free_bootmem_with_active_regions(nid, end_pfn);
167
168	/*
169	 * XXX Handle initial reservations for the system memory node
170	 * only for the moment, we'll refactor this later for handling
171	 * reservations in other nodes.
172	 */
173	if (nid == 0) {
174		struct memblock_region *reg;
175
176		/* Reserve the sections we're already using. */
177		for_each_memblock(reserved, reg) {
178			unsigned long size = reg->size;
179
180#ifdef CONFIG_HIGHMEM
181			/* ...but not highmem */
182			if (PFN_DOWN(reg->base) >= highstart_pfn)
183				continue;
184
185			if (PFN_UP(reg->base + size) > highstart_pfn)
186				size = (highstart_pfn - PFN_DOWN(reg->base))
187				       << PAGE_SHIFT;
188#endif
189
190			reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
191		}
192	}
193
194	sparse_memory_present_with_active_regions(nid);
195}
196
197static void __init do_init_bootmem(void)
198{
199	struct memblock_region *reg;
200	int i;
201
202	/* Add active regions with valid PFNs. */
203	for_each_memblock(memory, reg) {
204		unsigned long start_pfn, end_pfn;
205		start_pfn = memblock_region_memory_base_pfn(reg);
206		end_pfn = memblock_region_memory_end_pfn(reg);
207		memblock_set_node(PFN_PHYS(start_pfn),
208				  PFN_PHYS(end_pfn - start_pfn), 0);
209	}
210
211	/* All of system RAM sits in node 0 for the non-NUMA case */
212	allocate_pgdat(0);
213	node_set_online(0);
214
215	soc_mem_setup();
216
217	for_each_online_node(i)
218		bootmem_init_one_node(i);
219
220	sparse_init();
221}
222
223extern char _heap_start[];
224
225static void __init init_and_reserve_mem(void)
226{
227	unsigned long start_pfn, heap_start;
228	u64 base = min_low_pfn << PAGE_SHIFT;
229	u64 size = (max_low_pfn << PAGE_SHIFT) - base;
230
231	heap_start = (unsigned long) &_heap_start;
232
233	memblock_add(base, size);
234
235	/*
236	 * Partially used pages are not usable - thus
237	 * we are rounding upwards:
238	 */
239	start_pfn = PFN_UP(__pa(heap_start));
240
241	/*
242	 * Reserve the kernel text.
243	 */
244	memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
245
246#ifdef CONFIG_HIGHMEM
247	/*
248	 * Add & reserve highmem, so page structures are initialised.
249	 */
250	base = highstart_pfn << PAGE_SHIFT;
251	size = (highend_pfn << PAGE_SHIFT) - base;
252	if (size) {
253		memblock_add(base, size);
254		memblock_reserve(base, size);
255	}
256#endif
257}
258
259#ifdef CONFIG_HIGHMEM
260/*
261 * Ensure we have allocated page tables in swapper_pg_dir for the
262 * fixed mappings range from 'start' to 'end'.
263 */
264static void __init allocate_pgtables(unsigned long start, unsigned long end)
265{
266	pgd_t *pgd;
267	pmd_t *pmd;
268	pte_t *pte;
269	int i, j;
270	unsigned long vaddr;
271
272	vaddr = start;
273	i = pgd_index(vaddr);
274	j = pmd_index(vaddr);
275	pgd = swapper_pg_dir + i;
276
277	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
278		pmd = (pmd_t *)pgd;
279		for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
280			vaddr += PMD_SIZE;
281
282			if (!pmd_none(*pmd))
283				continue;
284
285			pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
286			pmd_populate_kernel(&init_mm, pmd, pte);
287		}
288		j = 0;
289	}
290}
291
292static void __init fixedrange_init(void)
293{
294	unsigned long vaddr, end;
295	pgd_t *pgd;
296	pud_t *pud;
297	pmd_t *pmd;
298	pte_t *pte;
299
300	/*
301	 * Fixed mappings:
302	 */
303	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
304	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
305	allocate_pgtables(vaddr, end);
306
307	/*
308	 * Permanent kmaps:
309	 */
310	vaddr = PKMAP_BASE;
311	allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
312
313	pgd = swapper_pg_dir + pgd_index(vaddr);
314	pud = pud_offset(pgd, vaddr);
315	pmd = pmd_offset(pud, vaddr);
316	pte = pte_offset_kernel(pmd, vaddr);
317	pkmap_page_table = pte;
318}
319#endif /* CONFIG_HIGHMEM */
320
321/*
322 * paging_init() continues the virtual memory environment setup which
323 * was begun by the code in arch/metag/kernel/setup.c.
324 */
325void __init paging_init(unsigned long mem_end)
326{
327	unsigned long max_zone_pfns[MAX_NR_ZONES];
328	int nid;
329
330	init_and_reserve_mem();
331
332	memblock_allow_resize();
333
334	memblock_dump_all();
335
336	nodes_clear(node_online_map);
337
338	init_new_context(&init_task, &init_mm);
339
340	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
341
342	do_init_bootmem();
343	mmu_init(mem_end);
344
345#ifdef CONFIG_HIGHMEM
346	fixedrange_init();
347	kmap_init();
348#endif
349
350	/* Initialize the zero page to a bootmem page, already zeroed. */
351	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
352
353	user_gateway_init();
354
355	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
356
357	for_each_online_node(nid) {
358		pg_data_t *pgdat = NODE_DATA(nid);
359		unsigned long low, start_pfn;
360
361		start_pfn = pgdat->bdata->node_min_pfn;
362		low = pgdat->bdata->node_low_pfn;
363
364		if (max_zone_pfns[ZONE_NORMAL] < low)
365			max_zone_pfns[ZONE_NORMAL] = low;
366
367#ifdef CONFIG_HIGHMEM
368		max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
369#endif
370		pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
371			nid, start_pfn, low);
372	}
373
374	free_area_init_nodes(max_zone_pfns);
375}
376
377void __init mem_init(void)
378{
379	int nid;
380
381#ifdef CONFIG_HIGHMEM
382	unsigned long tmp;
383
384	/*
385	 * Explicitly reset zone->managed_pages because highmem pages are
386	 * freed before calling free_all_bootmem_node();
387	 */
388	reset_all_zones_managed_pages();
389	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
390		free_highmem_page(pfn_to_page(tmp));
391	num_physpages += totalhigh_pages;
392#endif /* CONFIG_HIGHMEM */
393
394	for_each_online_node(nid) {
395		pg_data_t *pgdat = NODE_DATA(nid);
396
397		num_physpages += pgdat->node_present_pages;
398
399		if (pgdat->node_spanned_pages)
400			free_all_bootmem_node(pgdat);
401	}
402
403	pr_info("Memory: %luk/%luk available\n",
404		(unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
405		num_physpages << (PAGE_SHIFT - 10));
406
407	show_mem(0);
408
409	return;
410}
411
412void free_initmem(void)
413{
414	free_initmem_default(POISON_FREE_INITMEM);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void free_initrd_mem(unsigned long start, unsigned long end)
419{
420	free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
421}
422#endif
423
424#ifdef CONFIG_OF_FLATTREE
425void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
426{
427	pr_err("%s(%llx, %llx)\n",
428	       __func__, start, end);
429}
430#endif /* CONFIG_OF_FLATTREE */