PageRenderTime 23ms CodeModel.GetById 9ms app.highlight 11ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/sparc64/mm/tlb.c

https://bitbucket.org/evzijst/gittest
C | 151 lines | 105 code | 30 blank | 16 comment | 28 complexity | 47f102896cf1e84010f1997f4b02a10e MD5 | raw file
  1/* arch/sparc64/mm/tlb.c
  2 *
  3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/init.h>
  8#include <linux/percpu.h>
  9#include <linux/mm.h>
 10#include <linux/swap.h>
 11
 12#include <asm/pgtable.h>
 13#include <asm/pgalloc.h>
 14#include <asm/tlbflush.h>
 15#include <asm/cacheflush.h>
 16#include <asm/mmu_context.h>
 17#include <asm/tlb.h>
 18
 19/* Heavily inspired by the ppc64 code.  */
 20
 21DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
 22	{ NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
 23
 24void flush_tlb_pending(void)
 25{
 26	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
 27
 28	if (mp->tlb_nr) {
 29		if (CTX_VALID(mp->mm->context)) {
 30#ifdef CONFIG_SMP
 31			smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
 32					      &mp->vaddrs[0]);
 33#else
 34			__flush_tlb_pending(CTX_HWBITS(mp->mm->context),
 35					    mp->tlb_nr, &mp->vaddrs[0]);
 36#endif
 37		}
 38		mp->tlb_nr = 0;
 39	}
 40}
 41
 42void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
 43{
 44	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
 45	unsigned long nr;
 46
 47	vaddr &= PAGE_MASK;
 48	if (pte_exec(orig))
 49		vaddr |= 0x1UL;
 50
 51	if (pte_dirty(orig)) {
 52		unsigned long paddr, pfn = pte_pfn(orig);
 53		struct address_space *mapping;
 54		struct page *page;
 55
 56		if (!pfn_valid(pfn))
 57			goto no_cache_flush;
 58
 59		page = pfn_to_page(pfn);
 60		if (PageReserved(page))
 61			goto no_cache_flush;
 62
 63		/* A real file page? */
 64		mapping = page_mapping(page);
 65		if (!mapping)
 66			goto no_cache_flush;
 67
 68		paddr = (unsigned long) page_address(page);
 69		if ((paddr ^ vaddr) & (1 << 13))
 70			flush_dcache_page_all(mm, page);
 71	}
 72
 73no_cache_flush:
 74
 75	if (mp->tlb_frozen)
 76		return;
 77
 78	nr = mp->tlb_nr;
 79
 80	if (unlikely(nr != 0 && mm != mp->mm)) {
 81		flush_tlb_pending();
 82		nr = 0;
 83	}
 84
 85	if (nr == 0)
 86		mp->mm = mm;
 87
 88	mp->vaddrs[nr] = vaddr;
 89	mp->tlb_nr = ++nr;
 90	if (nr >= TLB_BATCH_NR)
 91		flush_tlb_pending();
 92}
 93
 94void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
 95{
 96	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
 97	unsigned long nr = mp->tlb_nr;
 98	long s = start, e = end, vpte_base;
 99
100	if (mp->tlb_frozen)
101		return;
102
103	/* If start is greater than end, that is a real problem.  */
104	BUG_ON(start > end);
105
106	/* However, straddling the VA space hole is quite normal. */
107	s &= PMD_MASK;
108	e = (e + PMD_SIZE - 1) & PMD_MASK;
109
110	vpte_base = (tlb_type == spitfire ?
111		     VPTE_BASE_SPITFIRE :
112		     VPTE_BASE_CHEETAH);
113
114	if (unlikely(nr != 0 && mm != mp->mm)) {
115		flush_tlb_pending();
116		nr = 0;
117	}
118
119	if (nr == 0)
120		mp->mm = mm;
121
122	start = vpte_base + (s >> (PAGE_SHIFT - 3));
123	end = vpte_base + (e >> (PAGE_SHIFT - 3));
124
125	/* If the request straddles the VA space hole, we
126	 * need to swap start and end.  The reason this
127	 * occurs is that "vpte_base" is the center of
128	 * the linear page table mapping area.  Thus,
129	 * high addresses with the sign bit set map to
130	 * addresses below vpte_base and non-sign bit
131	 * addresses map to addresses above vpte_base.
132	 */
133	if (end < start) {
134		unsigned long tmp = start;
135
136		start = end;
137		end = tmp;
138	}
139
140	while (start < end) {
141		mp->vaddrs[nr] = start;
142		mp->tlb_nr = ++nr;
143		if (nr >= TLB_BATCH_NR) {
144			flush_tlb_pending();
145			nr = 0;
146		}
147		start += PAGE_SIZE;
148	}
149	if (nr)
150		flush_tlb_pending();
151}