PageRenderTime 11ms CodeModel.GetById 2ms app.highlight 6ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/parisc/include/asm/tlbflush.h

https://github.com/aicjofs/android_kernel_lge_v500_20d_f2fs
C Header | 83 lines | 41 code | 16 blank | 26 comment | 7 complexity | ab90cb4cbbd1f2c059f95e38f10fecf0 MD5 | raw file
 1#ifndef _PARISC_TLBFLUSH_H
 2#define _PARISC_TLBFLUSH_H
 3
 4/* TLB flushing routines.... */
 5
 6#include <linux/mm.h>
 7#include <linux/sched.h>
 8#include <asm/mmu_context.h>
 9
10
11/* This is for the serialisation of PxTLB broadcasts.  At least on the
12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus.  This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all systems not just the N class.
16 */
17extern spinlock_t pa_tlb_lock;
18
19#define purge_tlb_start(flags)	spin_lock_irqsave(&pa_tlb_lock, flags)
20#define purge_tlb_end(flags)	spin_unlock_irqrestore(&pa_tlb_lock, flags)
21
22extern void flush_tlb_all(void);
23extern void flush_tlb_all_local(void *);
24
25/*
26 * flush_tlb_mm()
27 *
28 * XXX This code is NOT valid for HP-UX compatibility processes,
29 * (although it will probably work 99% of the time). HP-UX
30 * processes are free to play with the space id's and save them
31 * over long periods of time, etc. so we have to preserve the
32 * space and just flush the entire tlb. We need to check the
33 * personality in order to do that, but the personality is not
34 * currently being set correctly.
35 *
36 * Of course, Linux processes could do the same thing, but
37 * we don't support that (and the compilers, dynamic linker,
38 * etc. do not do that).
39 */
40
41static inline void flush_tlb_mm(struct mm_struct *mm)
42{
43	BUG_ON(mm == &init_mm); /* Should never happen */
44
45#if 1 || defined(CONFIG_SMP)
46	flush_tlb_all();
47#else
48	/* FIXME: currently broken, causing space id and protection ids
49	 *  to go out of sync, resulting in faults on userspace accesses.
50	 */
51	if (mm) {
52		if (mm->context != 0)
53			free_sid(mm->context);
54		mm->context = alloc_sid();
55		if (mm == current->active_mm)
56			load_context(mm->context);
57	}
58#endif
59}
60
61static inline void flush_tlb_page(struct vm_area_struct *vma,
62	unsigned long addr)
63{
64	unsigned long flags;
65
66	/* For one page, it's not worth testing the split_tlb variable */
67
68	mb();
69	mtsp(vma->vm_mm->context,1);
70	purge_tlb_start(flags);
71	pdtlb(addr);
72	pitlb(addr);
73	purge_tlb_end(flags);
74}
75
76void __flush_tlb_range(unsigned long sid,
77	unsigned long start, unsigned long end);
78
79#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
80
81#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
82
83#endif