PageRenderTime 64ms CodeModel.GetById 2ms app.highlight 54ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/powerpc/mm/tlb_nohash.c

http://github.com/mirrors/linux
C | 779 lines | 496 code | 99 blank | 184 comment | 71 complexity | 97a0fa6c14b564ec181aacaff0feb572 MD5 | raw file
  1/*
  2 * This file contains the routines for TLB flushing.
  3 * On machines where the MMU does not use a hash table to store virtual to
  4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
  5 * this does -not- include 603 however which shares the implementation with
  6 * hash based processors)
  7 *
  8 *  -- BenH
  9 *
 10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 11 *                     IBM Corp.
 12 *
 13 *  Derived from arch/ppc/mm/init.c:
 14 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 15 *
 16 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 17 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 18 *    Copyright (C) 1996 Paul Mackerras
 19 *
 20 *  Derived from "arch/i386/mm/init.c"
 21 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 22 *
 23 *  This program is free software; you can redistribute it and/or
 24 *  modify it under the terms of the GNU General Public License
 25 *  as published by the Free Software Foundation; either version
 26 *  2 of the License, or (at your option) any later version.
 27 *
 28 */
 29
 30#include <linux/kernel.h>
 31#include <linux/export.h>
 32#include <linux/mm.h>
 33#include <linux/init.h>
 34#include <linux/highmem.h>
 35#include <linux/pagemap.h>
 36#include <linux/preempt.h>
 37#include <linux/spinlock.h>
 38#include <linux/memblock.h>
 39#include <linux/of_fdt.h>
 40#include <linux/hugetlb.h>
 41
 42#include <asm/tlbflush.h>
 43#include <asm/tlb.h>
 44#include <asm/code-patching.h>
 45#include <asm/cputhreads.h>
 46#include <asm/hugetlb.h>
 47#include <asm/paca.h>
 48
 49#include "mmu_decl.h"
 50
 51/*
 52 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 53 * other sizes not listed here.   The .ind field is only used on MMUs that have
 54 * indirect page table entries.
 55 */
 56#ifdef CONFIG_PPC_BOOK3E_MMU
 57#ifdef CONFIG_PPC_FSL_BOOK3E
 58struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 59	[MMU_PAGE_4K] = {
 60		.shift	= 12,
 61		.enc	= BOOK3E_PAGESZ_4K,
 62	},
 63	[MMU_PAGE_2M] = {
 64		.shift	= 21,
 65		.enc	= BOOK3E_PAGESZ_2M,
 66	},
 67	[MMU_PAGE_4M] = {
 68		.shift	= 22,
 69		.enc	= BOOK3E_PAGESZ_4M,
 70	},
 71	[MMU_PAGE_16M] = {
 72		.shift	= 24,
 73		.enc	= BOOK3E_PAGESZ_16M,
 74	},
 75	[MMU_PAGE_64M] = {
 76		.shift	= 26,
 77		.enc	= BOOK3E_PAGESZ_64M,
 78	},
 79	[MMU_PAGE_256M] = {
 80		.shift	= 28,
 81		.enc	= BOOK3E_PAGESZ_256M,
 82	},
 83	[MMU_PAGE_1G] = {
 84		.shift	= 30,
 85		.enc	= BOOK3E_PAGESZ_1GB,
 86	},
 87};
 88#else
 89struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 90	[MMU_PAGE_4K] = {
 91		.shift	= 12,
 92		.ind	= 20,
 93		.enc	= BOOK3E_PAGESZ_4K,
 94	},
 95	[MMU_PAGE_16K] = {
 96		.shift	= 14,
 97		.enc	= BOOK3E_PAGESZ_16K,
 98	},
 99	[MMU_PAGE_64K] = {
100		.shift	= 16,
101		.ind	= 28,
102		.enc	= BOOK3E_PAGESZ_64K,
103	},
104	[MMU_PAGE_1M] = {
105		.shift	= 20,
106		.enc	= BOOK3E_PAGESZ_1M,
107	},
108	[MMU_PAGE_16M] = {
109		.shift	= 24,
110		.ind	= 36,
111		.enc	= BOOK3E_PAGESZ_16M,
112	},
113	[MMU_PAGE_256M] = {
114		.shift	= 28,
115		.enc	= BOOK3E_PAGESZ_256M,
116	},
117	[MMU_PAGE_1G] = {
118		.shift	= 30,
119		.enc	= BOOK3E_PAGESZ_1GB,
120	},
121};
122#endif /* CONFIG_FSL_BOOKE */
123
124static inline int mmu_get_tsize(int psize)
125{
126	return mmu_psize_defs[psize].enc;
127}
128#else
129static inline int mmu_get_tsize(int psize)
130{
131	/* This isn't used on !Book3E for now */
132	return 0;
133}
134#endif /* CONFIG_PPC_BOOK3E_MMU */
135
136/* The variables below are currently only used on 64-bit Book3E
137 * though this will probably be made common with other nohash
138 * implementations at some point
139 */
140#ifdef CONFIG_PPC64
141
142int mmu_linear_psize;		/* Page size used for the linear mapping */
143int mmu_pte_psize;		/* Page size used for PTE pages */
144int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
145int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
146unsigned long linear_map_top;	/* Top of linear mapping */
147
148
149/*
150 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
151 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
152 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
153 * this is set to zero.
154 */
155int extlb_level_exc;
156
157#endif /* CONFIG_PPC64 */
158
159#ifdef CONFIG_PPC_FSL_BOOK3E
160/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
161DEFINE_PER_CPU(int, next_tlbcam_idx);
162EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
163#endif
164
165/*
166 * Base TLB flushing operations:
167 *
168 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
169 *  - flush_tlb_page(vma, vmaddr) flushes one page
170 *  - flush_tlb_range(vma, start, end) flushes a range of pages
171 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
172 *
173 *  - local_* variants of page and mm only apply to the current
174 *    processor
175 */
176
177/*
178 * These are the base non-SMP variants of page and mm flushing
179 */
180void local_flush_tlb_mm(struct mm_struct *mm)
181{
182	unsigned int pid;
183
184	preempt_disable();
185	pid = mm->context.id;
186	if (pid != MMU_NO_CONTEXT)
187		_tlbil_pid(pid);
188	preempt_enable();
189}
190EXPORT_SYMBOL(local_flush_tlb_mm);
191
192void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
193			    int tsize, int ind)
194{
195	unsigned int pid;
196
197	preempt_disable();
198	pid = mm ? mm->context.id : 0;
199	if (pid != MMU_NO_CONTEXT)
200		_tlbil_va(vmaddr, pid, tsize, ind);
201	preempt_enable();
202}
203
204void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
205{
206	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
207			       mmu_get_tsize(mmu_virtual_psize), 0);
208}
209EXPORT_SYMBOL(local_flush_tlb_page);
210
211/*
212 * And here are the SMP non-local implementations
213 */
214#ifdef CONFIG_SMP
215
216static DEFINE_RAW_SPINLOCK(tlbivax_lock);
217
218struct tlb_flush_param {
219	unsigned long addr;
220	unsigned int pid;
221	unsigned int tsize;
222	unsigned int ind;
223};
224
225static void do_flush_tlb_mm_ipi(void *param)
226{
227	struct tlb_flush_param *p = param;
228
229	_tlbil_pid(p ? p->pid : 0);
230}
231
232static void do_flush_tlb_page_ipi(void *param)
233{
234	struct tlb_flush_param *p = param;
235
236	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
237}
238
239
240/* Note on invalidations and PID:
241 *
242 * We snapshot the PID with preempt disabled. At this point, it can still
243 * change either because:
244 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
245 * - we are invaliating some target that isn't currently running here
246 *   and is concurrently acquiring a new PID on another CPU
247 * - some other CPU is re-acquiring a lost PID for this mm
248 * etc...
249 *
250 * However, this shouldn't be a problem as we only guarantee
251 * invalidation of TLB entries present prior to this call, so we
252 * don't care about the PID changing, and invalidating a stale PID
253 * is generally harmless.
254 */
255
256void flush_tlb_mm(struct mm_struct *mm)
257{
258	unsigned int pid;
259
260	preempt_disable();
261	pid = mm->context.id;
262	if (unlikely(pid == MMU_NO_CONTEXT))
263		goto no_context;
264	if (!mm_is_core_local(mm)) {
265		struct tlb_flush_param p = { .pid = pid };
266		/* Ignores smp_processor_id() even if set. */
267		smp_call_function_many(mm_cpumask(mm),
268				       do_flush_tlb_mm_ipi, &p, 1);
269	}
270	_tlbil_pid(pid);
271 no_context:
272	preempt_enable();
273}
274EXPORT_SYMBOL(flush_tlb_mm);
275
276void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
277		      int tsize, int ind)
278{
279	struct cpumask *cpu_mask;
280	unsigned int pid;
281
282	/*
283	 * This function as well as __local_flush_tlb_page() must only be called
284	 * for user contexts.
285	 */
286	if (unlikely(WARN_ON(!mm)))
287		return;
288
289	preempt_disable();
290	pid = mm->context.id;
291	if (unlikely(pid == MMU_NO_CONTEXT))
292		goto bail;
293	cpu_mask = mm_cpumask(mm);
294	if (!mm_is_core_local(mm)) {
295		/* If broadcast tlbivax is supported, use it */
296		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
297			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
298			if (lock)
299				raw_spin_lock(&tlbivax_lock);
300			_tlbivax_bcast(vmaddr, pid, tsize, ind);
301			if (lock)
302				raw_spin_unlock(&tlbivax_lock);
303			goto bail;
304		} else {
305			struct tlb_flush_param p = {
306				.pid = pid,
307				.addr = vmaddr,
308				.tsize = tsize,
309				.ind = ind,
310			};
311			/* Ignores smp_processor_id() even if set in cpu_mask */
312			smp_call_function_many(cpu_mask,
313					       do_flush_tlb_page_ipi, &p, 1);
314		}
315	}
316	_tlbil_va(vmaddr, pid, tsize, ind);
317 bail:
318	preempt_enable();
319}
320
321void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
322{
323#ifdef CONFIG_HUGETLB_PAGE
324	if (vma && is_vm_hugetlb_page(vma))
325		flush_hugetlb_page(vma, vmaddr);
326#endif
327
328	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
329			 mmu_get_tsize(mmu_virtual_psize), 0);
330}
331EXPORT_SYMBOL(flush_tlb_page);
332
333#endif /* CONFIG_SMP */
334
335#ifdef CONFIG_PPC_47x
336void __init early_init_mmu_47x(void)
337{
338#ifdef CONFIG_SMP
339	unsigned long root = of_get_flat_dt_root();
340	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
341		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
342#endif /* CONFIG_SMP */
343}
344#endif /* CONFIG_PPC_47x */
345
346/*
347 * Flush kernel TLB entries in the given range
348 */
349void flush_tlb_kernel_range(unsigned long start, unsigned long end)
350{
351#ifdef CONFIG_SMP
352	preempt_disable();
353	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
354	_tlbil_pid(0);
355	preempt_enable();
356#else
357	_tlbil_pid(0);
358#endif
359}
360EXPORT_SYMBOL(flush_tlb_kernel_range);
361
362/*
363 * Currently, for range flushing, we just do a full mm flush. This should
364 * be optimized based on a threshold on the size of the range, since
365 * some implementation can stack multiple tlbivax before a tlbsync but
366 * for now, we keep it that way
367 */
368void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
369		     unsigned long end)
370
371{
372	flush_tlb_mm(vma->vm_mm);
373}
374EXPORT_SYMBOL(flush_tlb_range);
375
376void tlb_flush(struct mmu_gather *tlb)
377{
378	flush_tlb_mm(tlb->mm);
379}
380
381/*
382 * Below are functions specific to the 64-bit variant of Book3E though that
383 * may change in the future
384 */
385
386#ifdef CONFIG_PPC64
387
388/*
389 * Handling of virtual linear page tables or indirect TLB entries
390 * flushing when PTE pages are freed
391 */
392void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
393{
394	int tsize = mmu_psize_defs[mmu_pte_psize].enc;
395
396	if (book3e_htw_mode != PPC_HTW_NONE) {
397		unsigned long start = address & PMD_MASK;
398		unsigned long end = address + PMD_SIZE;
399		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
400
401		/* This isn't the most optimal, ideally we would factor out the
402		 * while preempt & CPU mask mucking around, or even the IPI but
403		 * it will do for now
404		 */
405		while (start < end) {
406			__flush_tlb_page(tlb->mm, start, tsize, 1);
407			start += size;
408		}
409	} else {
410		unsigned long rmask = 0xf000000000000000ul;
411		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
412		unsigned long vpte = address & ~rmask;
413
414#ifdef CONFIG_PPC_64K_PAGES
415		vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
416#else
417		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
418#endif
419		vpte |= rid;
420		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
421	}
422}
423
424static void setup_page_sizes(void)
425{
426	unsigned int tlb0cfg;
427	unsigned int tlb0ps;
428	unsigned int eptcfg;
429	int i, psize;
430
431#ifdef CONFIG_PPC_FSL_BOOK3E
432	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
433	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
434
435	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
436		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
437		unsigned int min_pg, max_pg;
438
439		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
440		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
441
442		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
443			struct mmu_psize_def *def;
444			unsigned int shift;
445
446			def = &mmu_psize_defs[psize];
447			shift = def->shift;
448
449			if (shift == 0 || shift & 1)
450				continue;
451
452			/* adjust to be in terms of 4^shift Kb */
453			shift = (shift - 10) >> 1;
454
455			if ((shift >= min_pg) && (shift <= max_pg))
456				def->flags |= MMU_PAGE_SIZE_DIRECT;
457		}
458
459		goto out;
460	}
461
462	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
463		u32 tlb1cfg, tlb1ps;
464
465		tlb0cfg = mfspr(SPRN_TLB0CFG);
466		tlb1cfg = mfspr(SPRN_TLB1CFG);
467		tlb1ps = mfspr(SPRN_TLB1PS);
468		eptcfg = mfspr(SPRN_EPTCFG);
469
470		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
471			book3e_htw_mode = PPC_HTW_E6500;
472
473		/*
474		 * We expect 4K subpage size and unrestricted indirect size.
475		 * The lack of a restriction on indirect size is a Freescale
476		 * extension, indicated by PSn = 0 but SPSn != 0.
477		 */
478		if (eptcfg != 2)
479			book3e_htw_mode = PPC_HTW_NONE;
480
481		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
482			struct mmu_psize_def *def = &mmu_psize_defs[psize];
483
484			if (tlb1ps & (1U << (def->shift - 10))) {
485				def->flags |= MMU_PAGE_SIZE_DIRECT;
486
487				if (book3e_htw_mode && psize == MMU_PAGE_2M)
488					def->flags |= MMU_PAGE_SIZE_INDIRECT;
489			}
490		}
491
492		goto out;
493	}
494#endif
495
496	tlb0cfg = mfspr(SPRN_TLB0CFG);
497	tlb0ps = mfspr(SPRN_TLB0PS);
498	eptcfg = mfspr(SPRN_EPTCFG);
499
500	/* Look for supported direct sizes */
501	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
502		struct mmu_psize_def *def = &mmu_psize_defs[psize];
503
504		if (tlb0ps & (1U << (def->shift - 10)))
505			def->flags |= MMU_PAGE_SIZE_DIRECT;
506	}
507
508	/* Indirect page sizes supported ? */
509	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
510	    (tlb0cfg & TLBnCFG_PT) == 0)
511		goto out;
512
513	book3e_htw_mode = PPC_HTW_IBM;
514
515	/* Now, we only deal with one IND page size for each
516	 * direct size. Hopefully all implementations today are
517	 * unambiguous, but we might want to be careful in the
518	 * future.
519	 */
520	for (i = 0; i < 3; i++) {
521		unsigned int ps, sps;
522
523		sps = eptcfg & 0x1f;
524		eptcfg >>= 5;
525		ps = eptcfg & 0x1f;
526		eptcfg >>= 5;
527		if (!ps || !sps)
528			continue;
529		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
530			struct mmu_psize_def *def = &mmu_psize_defs[psize];
531
532			if (ps == (def->shift - 10))
533				def->flags |= MMU_PAGE_SIZE_INDIRECT;
534			if (sps == (def->shift - 10))
535				def->ind = ps + 10;
536		}
537	}
538
539out:
540	/* Cleanup array and print summary */
541	pr_info("MMU: Supported page sizes\n");
542	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
543		struct mmu_psize_def *def = &mmu_psize_defs[psize];
544		const char *__page_type_names[] = {
545			"unsupported",
546			"direct",
547			"indirect",
548			"direct & indirect"
549		};
550		if (def->flags == 0) {
551			def->shift = 0;	
552			continue;
553		}
554		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
555			__page_type_names[def->flags & 0x3]);
556	}
557}
558
559static void setup_mmu_htw(void)
560{
561	/*
562	 * If we want to use HW tablewalk, enable it by patching the TLB miss
563	 * handlers to branch to the one dedicated to it.
564	 */
565
566	switch (book3e_htw_mode) {
567	case PPC_HTW_IBM:
568		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
569		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
570		break;
571#ifdef CONFIG_PPC_FSL_BOOK3E
572	case PPC_HTW_E6500:
573		extlb_level_exc = EX_TLB_SIZE;
574		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
575		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
576		break;
577#endif
578	}
579	pr_info("MMU: Book3E HW tablewalk %s\n",
580		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
581}
582
583/*
584 * Early initialization of the MMU TLB code
585 */
586static void early_init_this_mmu(void)
587{
588	unsigned int mas4;
589
590	/* Set MAS4 based on page table setting */
591
592	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
593	switch (book3e_htw_mode) {
594	case PPC_HTW_E6500:
595		mas4 |= MAS4_INDD;
596		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
597		mas4 |= MAS4_TLBSELD(1);
598		mmu_pte_psize = MMU_PAGE_2M;
599		break;
600
601	case PPC_HTW_IBM:
602		mas4 |= MAS4_INDD;
603#ifdef CONFIG_PPC_64K_PAGES
604		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
605		mmu_pte_psize = MMU_PAGE_256M;
606#else
607		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
608		mmu_pte_psize = MMU_PAGE_1M;
609#endif
610		break;
611
612	case PPC_HTW_NONE:
613#ifdef CONFIG_PPC_64K_PAGES
614		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
615#else
616		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
617#endif
618		mmu_pte_psize = mmu_virtual_psize;
619		break;
620	}
621	mtspr(SPRN_MAS4, mas4);
622
623#ifdef CONFIG_PPC_FSL_BOOK3E
624	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
625		unsigned int num_cams;
626		int __maybe_unused cpu = smp_processor_id();
627		bool map = true;
628
629		/* use a quarter of the TLBCAM for bolted linear map */
630		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
631
632		/*
633		 * Only do the mapping once per core, or else the
634		 * transient mapping would cause problems.
635		 */
636#ifdef CONFIG_SMP
637		if (hweight32(get_tensr()) > 1)
638			map = false;
639#endif
640
641		if (map)
642			linear_map_top = map_mem_in_cams(linear_map_top,
643							 num_cams, false);
644	}
645#endif
646
647	/* A sync won't hurt us after mucking around with
648	 * the MMU configuration
649	 */
650	mb();
651}
652
653static void __init early_init_mmu_global(void)
654{
655	/* XXX This will have to be decided at runtime, but right
656	 * now our boot and TLB miss code hard wires it. Ideally
657	 * we should find out a suitable page size and patch the
658	 * TLB miss code (either that or use the PACA to store
659	 * the value we want)
660	 */
661	mmu_linear_psize = MMU_PAGE_1G;
662
663	/* XXX This should be decided at runtime based on supported
664	 * page sizes in the TLB, but for now let's assume 16M is
665	 * always there and a good fit (which it probably is)
666	 *
667	 * Freescale booke only supports 4K pages in TLB0, so use that.
668	 */
669	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
670		mmu_vmemmap_psize = MMU_PAGE_4K;
671	else
672		mmu_vmemmap_psize = MMU_PAGE_16M;
673
674	/* XXX This code only checks for TLB 0 capabilities and doesn't
675	 *     check what page size combos are supported by the HW. It
676	 *     also doesn't handle the case where a separate array holds
677	 *     the IND entries from the array loaded by the PT.
678	 */
679	/* Look for supported page sizes */
680	setup_page_sizes();
681
682	/* Look for HW tablewalk support */
683	setup_mmu_htw();
684
685#ifdef CONFIG_PPC_FSL_BOOK3E
686	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
687		if (book3e_htw_mode == PPC_HTW_NONE) {
688			extlb_level_exc = EX_TLB_SIZE;
689			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
690			patch_exception(0x1e0,
691				exc_instruction_tlb_miss_bolted_book3e);
692		}
693	}
694#endif
695
696	/* Set the global containing the top of the linear mapping
697	 * for use by the TLB miss code
698	 */
699	linear_map_top = memblock_end_of_DRAM();
700}
701
702static void __init early_mmu_set_memory_limit(void)
703{
704#ifdef CONFIG_PPC_FSL_BOOK3E
705	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
706		/*
707		 * Limit memory so we dont have linear faults.
708		 * Unlike memblock_set_current_limit, which limits
709		 * memory available during early boot, this permanently
710		 * reduces the memory available to Linux.  We need to
711		 * do this because highmem is not supported on 64-bit.
712		 */
713		memblock_enforce_memory_limit(linear_map_top);
714	}
715#endif
716
717	memblock_set_current_limit(linear_map_top);
718}
719
720/* boot cpu only */
721void __init early_init_mmu(void)
722{
723	early_init_mmu_global();
724	early_init_this_mmu();
725	early_mmu_set_memory_limit();
726}
727
728void early_init_mmu_secondary(void)
729{
730	early_init_this_mmu();
731}
732
733void setup_initial_memory_limit(phys_addr_t first_memblock_base,
734				phys_addr_t first_memblock_size)
735{
736	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
737	 * the bolted TLB entry. We know for now that only 1G
738	 * entries are supported though that may eventually
739	 * change.
740	 *
741	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
742	 * unusual memory sizes it's possible for some RAM to not be mapped
743	 * (such RAM is not used at all by Linux, since we don't support
744	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
745	 * mappable if this memblock is the only one.  Additional memblocks
746	 * can only increase, not decrease, the amount that ends up getting
747	 * mapped.  We still limit max to 1G even if we'll eventually map
748	 * more.  This is due to what the early init code is set up to do.
749	 *
750	 * We crop it to the size of the first MEMBLOCK to
751	 * avoid going over total available memory just in case...
752	 */
753#ifdef CONFIG_PPC_FSL_BOOK3E
754	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
755		unsigned long linear_sz;
756		unsigned int num_cams;
757
758		/* use a quarter of the TLBCAM for bolted linear map */
759		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
760
761		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
762					    true);
763
764		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
765	} else
766#endif
767		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
768
769	/* Finally limit subsequent allocations */
770	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
771}
772#else /* ! CONFIG_PPC64 */
773void __init early_init_mmu(void)
774{
775#ifdef CONFIG_PPC_47x
776	early_init_mmu_47x();
777#endif
778}
779#endif /* CONFIG_PPC64 */