PageRenderTime 28ms CodeModel.GetById 10ms app.highlight 12ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/arm/mm/tlb-v6.S

https://bitbucket.org/evzijst/gittest
Assembly | 92 lines | 87 code | 5 blank | 0 comment | 0 complexity | 6e71efe417fe52f695b1af82b8029e18 MD5 | raw file
 1/*
 2 *  linux/arch/arm/mm/tlb-v6.S
 3 *
 4 *  Copyright (C) 1997-2002 Russell King
 5 *
 6 * This program is free software; you can redistribute it and/or modify
 7 * it under the terms of the GNU General Public License version 2 as
 8 * published by the Free Software Foundation.
 9 *
10 *  ARM architecture version 6 TLB handling functions.
11 *  These assume a split I/D TLB.
12 */
13#include <linux/linkage.h>
14#include <asm/constants.h>
15#include <asm/page.h>
16#include <asm/tlbflush.h>
17#include "proc-macros.S"
18
19#define HARVARD_TLB
20
21/*
22 *	v6wbi_flush_user_tlb_range(start, end, vma)
23 *
24 *	Invalidate a range of TLB entries in the specified address space.
25 *
26 *	- start - start address (may not be aligned)
27 *	- end   - end address (exclusive, may not be aligned)
28 *	- vma   - vma_struct describing address range
29 *
30 *	It is assumed that:
31 *	- the "Invalidate single entry" instruction will invalidate
32 *	  both the I and the D TLBs on Harvard-style TLBs
33 */
34ENTRY(v6wbi_flush_user_tlb_range)
35	vma_vm_mm r3, r2			@ get vma->vm_mm
36	mov	ip, #0
37	mmid	r3, r3				@ get vm_mm->context.id
38	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
39	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
40	mov	r1, r1, lsr #PAGE_SHIFT
41	asid	r3, r3				@ mask ASID
42	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA
43	mov	r1, r1, lsl #PAGE_SHIFT
44	vma_vm_flags r2, r2			@ get vma->vm_flags
451:
46#ifdef HARVARD_TLB
47	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA (was 1)
48	tst	r2, #VM_EXEC			@ Executable area ?
49	mcrne	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA (was 1)
50#else
51	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA (was 1)
52#endif
53	add	r0, r0, #PAGE_SZ
54	cmp	r0, r1
55	blo	1b
56	mov	pc, lr
57
58/*
59 *	v6wbi_flush_kern_tlb_range(start,end)
60 *
61 *	Invalidate a range of kernel TLB entries
62 *
63 *	- start - start address (may not be aligned)
64 *	- end   - end address (exclusive, may not be aligned)
65 */
66ENTRY(v6wbi_flush_kern_tlb_range)
67	mov	r2, #0
68	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
69	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
70	mov	r1, r1, lsr #PAGE_SHIFT
71	mov	r0, r0, lsl #PAGE_SHIFT
72	mov	r1, r1, lsl #PAGE_SHIFT
731:
74#ifdef HARVARD_TLB
75	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA
76	mcr	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA
77#else
78	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA
79#endif
80	add	r0, r0, #PAGE_SZ
81	cmp	r0, r1
82	blo	1b
83	mov	pc, lr
84
85	.section ".text.init", #alloc, #execinstr
86
87	.type	v6wbi_tlb_fns, #object
88ENTRY(v6wbi_tlb_fns)
89	.long	v6wbi_flush_user_tlb_range
90	.long	v6wbi_flush_kern_tlb_range
91	.long	v6wbi_tlb_flags
92	.size	v6wbi_tlb_fns, . - v6wbi_tlb_fns