PageRenderTime 20ms CodeModel.GetById 14ms app.highlight 4ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/alpha/include/asm/tlbflush.h

http://github.com/mirrors/linux
C Header | 153 lines | 112 code | 25 blank | 16 comment | 7 complexity | ee20963a76719e912697b7444c409f0b MD5 | raw file
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ALPHA_TLBFLUSH_H
  3#define _ALPHA_TLBFLUSH_H
  4
  5#include <linux/mm.h>
  6#include <linux/sched.h>
  7#include <asm/compiler.h>
  8#include <asm/pgalloc.h>
  9
 10#ifndef __EXTERN_INLINE
 11#define __EXTERN_INLINE extern inline
 12#define __MMU_EXTERN_INLINE
 13#endif
 14
 15extern void __load_new_mm_context(struct mm_struct *);
 16
 17
 18/* Use a few helper functions to hide the ugly broken ASN
 19   numbers on early Alphas (ev4 and ev45).  */
 20
 21__EXTERN_INLINE void
 22ev4_flush_tlb_current(struct mm_struct *mm)
 23{
 24	__load_new_mm_context(mm);
 25	tbiap();
 26}
 27
 28__EXTERN_INLINE void
 29ev5_flush_tlb_current(struct mm_struct *mm)
 30{
 31	__load_new_mm_context(mm);
 32}
 33
 34/* Flush just one page in the current TLB set.  We need to be very
 35   careful about the icache here, there is no way to invalidate a
 36   specific icache page.  */
 37
 38__EXTERN_INLINE void
 39ev4_flush_tlb_current_page(struct mm_struct * mm,
 40			   struct vm_area_struct *vma,
 41			   unsigned long addr)
 42{
 43	int tbi_flag = 2;
 44	if (vma->vm_flags & VM_EXEC) {
 45		__load_new_mm_context(mm);
 46		tbi_flag = 3;
 47	}
 48	tbi(tbi_flag, addr);
 49}
 50
 51__EXTERN_INLINE void
 52ev5_flush_tlb_current_page(struct mm_struct * mm,
 53			   struct vm_area_struct *vma,
 54			   unsigned long addr)
 55{
 56	if (vma->vm_flags & VM_EXEC)
 57		__load_new_mm_context(mm);
 58	else
 59		tbi(2, addr);
 60}
 61
 62
 63#ifdef CONFIG_ALPHA_GENERIC
 64# define flush_tlb_current		alpha_mv.mv_flush_tlb_current
 65# define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
 66#else
 67# ifdef CONFIG_ALPHA_EV4
 68#  define flush_tlb_current		ev4_flush_tlb_current
 69#  define flush_tlb_current_page	ev4_flush_tlb_current_page
 70# else
 71#  define flush_tlb_current		ev5_flush_tlb_current
 72#  define flush_tlb_current_page	ev5_flush_tlb_current_page
 73# endif
 74#endif
 75
 76#ifdef __MMU_EXTERN_INLINE
 77#undef __EXTERN_INLINE
 78#undef __MMU_EXTERN_INLINE
 79#endif
 80
 81/* Flush current user mapping.  */
 82static inline void
 83flush_tlb(void)
 84{
 85	flush_tlb_current(current->active_mm);
 86}
 87
 88/* Flush someone else's user mapping.  */
 89static inline void
 90flush_tlb_other(struct mm_struct *mm)
 91{
 92	unsigned long *mmc = &mm->context[smp_processor_id()];
 93	/* Check it's not zero first to avoid cacheline ping pong
 94	   when possible.  */
 95	if (*mmc) *mmc = 0;
 96}
 97
 98#ifndef CONFIG_SMP
 99/* Flush everything (kernel mapping may also have changed
100   due to vmalloc/vfree).  */
101static inline void flush_tlb_all(void)
102{
103	tbia();
104}
105
106/* Flush a specified user mapping.  */
107static inline void
108flush_tlb_mm(struct mm_struct *mm)
109{
110	if (mm == current->active_mm)
111		flush_tlb_current(mm);
112	else
113		flush_tlb_other(mm);
114}
115
116/* Page-granular tlb flush.  */
117static inline void
118flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
119{
120	struct mm_struct *mm = vma->vm_mm;
121
122	if (mm == current->active_mm)
123		flush_tlb_current_page(mm, vma, addr);
124	else
125		flush_tlb_other(mm);
126}
127
128/* Flush a specified range of user mapping.  On the Alpha we flush
129   the whole user tlb.  */
130static inline void
131flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
132		unsigned long end)
133{
134	flush_tlb_mm(vma->vm_mm);
135}
136
137#else /* CONFIG_SMP */
138
139extern void flush_tlb_all(void);
140extern void flush_tlb_mm(struct mm_struct *);
141extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
142extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
143			    unsigned long);
144
145#endif /* CONFIG_SMP */
146
147static inline void flush_tlb_kernel_range(unsigned long start,
148					unsigned long end)
149{
150	flush_tlb_all();
151}
152
153#endif /* _ALPHA_TLBFLUSH_H */