PageRenderTime 48ms CodeModel.GetById 32ms app.highlight 11ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/arm/mm/cache-v4wt.S

https://bitbucket.org/evzijst/gittest
Assembly | 188 lines | 173 code | 15 blank | 0 comment | 0 complexity | f7005d83f4470d09573ee4764c126a4b MD5 | raw file
  1/*
  2 *  linux/arch/arm/mm/cache-v4wt.S
  3 *
  4 *  Copyright (C) 1997-2002 Russell king
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 *  ARMv4 write through cache operations support.
 11 *
 12 *  We assume that the write buffer is not enabled.
 13 */
 14#include <linux/linkage.h>
 15#include <linux/init.h>
 16#include <asm/hardware.h>
 17#include <asm/page.h>
 18#include "proc-macros.S"
 19
 20/*
 21 * The size of one data cache line.
 22 */
 23#define CACHE_DLINESIZE	32
 24
 25/*
 26 * The number of data cache segments.
 27 */
 28#define CACHE_DSEGMENTS	8
 29
 30/*
 31 * The number of lines in a cache segment.
 32 */
 33#define CACHE_DENTRIES	64
 34
 35/*
 36 * This is the size at which it becomes more efficient to
 37 * clean the whole cache, rather than using the individual
 38 * cache line maintainence instructions.
 39 *
 40 * *** This needs benchmarking
 41 */
 42#define CACHE_DLIMIT	16384
 43
 44/*
 45 *	flush_user_cache_all()
 46 *
 47 *	Invalidate all cache entries in a particular address
 48 *	space.
 49 */
 50ENTRY(v4wt_flush_user_cache_all)
 51	/* FALLTHROUGH */
 52/*
 53 *	flush_kern_cache_all()
 54 *
 55 *	Clean and invalidate the entire cache.
 56 */
 57ENTRY(v4wt_flush_kern_cache_all)
 58	mov	r2, #VM_EXEC
 59	mov	ip, #0
 60__flush_whole_cache:
 61	tst	r2, #VM_EXEC
 62	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 63	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
 64	mov	pc, lr
 65
 66/*
 67 *	flush_user_cache_range(start, end, flags)
 68 *
 69 *	Clean and invalidate a range of cache entries in the specified
 70 *	address space.
 71 *
 72 *	- start - start address (inclusive, page aligned)
 73 *	- end	- end address (exclusive, page aligned)
 74 *	- flags	- vma_area_struct flags describing address space
 75 */
 76ENTRY(v4wt_flush_user_cache_range)
 77	sub	r3, r1, r0			@ calculate total size
 78	cmp	r3, #CACHE_DLIMIT
 79	bhs	__flush_whole_cache
 80
 811:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
 82	tst	r2, #VM_EXEC
 83	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
 84	add	r0, r0, #CACHE_DLINESIZE
 85	cmp	r0, r1
 86	blo	1b
 87	mov	pc, lr
 88
 89/*
 90 *	coherent_kern_range(start, end)
 91 *
 92 *	Ensure coherency between the Icache and the Dcache in the
 93 *	region described by start.  If you have non-snooping
 94 *	Harvard caches, you need to implement this function.
 95 *
 96 *	- start  - virtual start address
 97 *	- end	 - virtual end address
 98 */
 99ENTRY(v4wt_coherent_kern_range)
100	/* FALLTRHOUGH */
101
102/*
103 *	coherent_user_range(start, end)
104 *
105 *	Ensure coherency between the Icache and the Dcache in the
106 *	region described by start.  If you have non-snooping
107 *	Harvard caches, you need to implement this function.
108 *
109 *	- start  - virtual start address
110 *	- end	 - virtual end address
111 */
112ENTRY(v4wt_coherent_user_range)
113	bic	r0, r0, #CACHE_DLINESIZE - 1
1141:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
115	add	r0, r0, #CACHE_DLINESIZE
116	cmp	r0, r1
117	blo	1b
118	mov	pc, lr
119
120/*
121 *	flush_kern_dcache_page(void *page)
122 *
123 *	Ensure no D cache aliasing occurs, either with itself or
124 *	the I cache
125 *
126 *	- addr	- page aligned address
127 */
128ENTRY(v4wt_flush_kern_dcache_page)
129	mov	r2, #0
130	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
131	add	r1, r0, #PAGE_SZ
132	/* fallthrough */
133
134/*
135 *	dma_inv_range(start, end)
136 *
137 *	Invalidate (discard) the specified virtual address range.
138 *	May not write back any entries.  If 'start' or 'end'
139 *	are not cache line aligned, those lines must be written
140 *	back.
141 *
142 *	- start  - virtual start address
143 *	- end	 - virtual end address
144 */
145ENTRY(v4wt_dma_inv_range)
146	bic	r0, r0, #CACHE_DLINESIZE - 1
1471:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
148	add	r0, r0, #CACHE_DLINESIZE
149	cmp	r0, r1
150	blo	1b
151	/* FALLTHROUGH */
152
153/*
154 *	dma_clean_range(start, end)
155 *
156 *	Clean the specified virtual address range.
157 *
158 *	- start  - virtual start address
159 *	- end	 - virtual end address
160 */
161ENTRY(v4wt_dma_clean_range)
162	mov	pc, lr
163
164/*
165 *	dma_flush_range(start, end)
166 *
167 *	Clean and invalidate the specified virtual address range.
168 *
169 *	- start  - virtual start address
170 *	- end	 - virtual end address
171 */
172	.globl	v4wt_dma_flush_range
173	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
174
175	__INITDATA
176
177	.type	v4wt_cache_fns, #object
178ENTRY(v4wt_cache_fns)
179	.long	v4wt_flush_kern_cache_all
180	.long	v4wt_flush_user_cache_all
181	.long	v4wt_flush_user_cache_range
182	.long	v4wt_coherent_kern_range
183	.long	v4wt_coherent_user_range
184	.long	v4wt_flush_kern_dcache_page
185	.long	v4wt_dma_inv_range
186	.long	v4wt_dma_clean_range
187	.long	v4wt_dma_flush_range
188	.size	v4wt_cache_fns, . - v4wt_cache_fns