PageRenderTime 41ms CodeModel.GetById 25ms app.highlight 12ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/arm/include/asm/tlbflush.h

https://bitbucket.org/sammyz/iscream_thunderc-2.6.35-rebase
C++ Header | 574 lines | 383 code | 80 blank | 111 comment | 53 complexity | 757e82fc8a482931ed50340674246028 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 *  arch/arm/include/asm/tlbflush.h
  3 *
  4 *  Copyright (C) 1999-2003 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#ifndef _ASMARM_TLBFLUSH_H
 11#define _ASMARM_TLBFLUSH_H
 12
 13
 14#ifndef CONFIG_MMU
 15
 16#define tlb_flush(tlb)	((void) tlb)
 17
 18#else /* CONFIG_MMU */
 19
 20#include <asm/glue.h>
 21
 22#define TLB_V3_PAGE	(1 << 0)
 23#define TLB_V4_U_PAGE	(1 << 1)
 24#define TLB_V4_D_PAGE	(1 << 2)
 25#define TLB_V4_I_PAGE	(1 << 3)
 26#define TLB_V6_U_PAGE	(1 << 4)
 27#define TLB_V6_D_PAGE	(1 << 5)
 28#define TLB_V6_I_PAGE	(1 << 6)
 29
 30#define TLB_V3_FULL	(1 << 8)
 31#define TLB_V4_U_FULL	(1 << 9)
 32#define TLB_V4_D_FULL	(1 << 10)
 33#define TLB_V4_I_FULL	(1 << 11)
 34#define TLB_V6_U_FULL	(1 << 12)
 35#define TLB_V6_D_FULL	(1 << 13)
 36#define TLB_V6_I_FULL	(1 << 14)
 37
 38#define TLB_V6_U_ASID	(1 << 16)
 39#define TLB_V6_D_ASID	(1 << 17)
 40#define TLB_V6_I_ASID	(1 << 18)
 41
 42#define TLB_BTB		(1 << 28)
 43
 44/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
 45#define TLB_V7_UIS_PAGE	(1 << 19)
 46#define TLB_V7_UIS_FULL (1 << 20)
 47#define TLB_V7_UIS_ASID (1 << 21)
 48
 49/* Inner Shareable BTB operation (ARMv7 MP extensions) */
 50#define TLB_V7_IS_BTB	(1 << 22)
 51
 52#define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */
 53#define TLB_DCLEAN	(1 << 30)
 54#define TLB_WB		(1 << 31)
 55
 56/*
 57 *	MMU TLB Model
 58 *	=============
 59 *
 60 *	We have the following to choose from:
 61 *	  v3    - ARMv3
 62 *	  v4    - ARMv4 without write buffer
 63 *	  v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
 64 *	  v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
 65 *	  fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
 66 *	  fa    - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB))
 67 *	  v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
 68 *	  v7wbi - identical to v6wbi
 69 */
 70#undef _TLB
 71#undef MULTI_TLB
 72
 73#define v3_tlb_flags	(TLB_V3_FULL | TLB_V3_PAGE)
 74
 75#ifdef CONFIG_CPU_TLB_V3
 76# define v3_possible_flags	v3_tlb_flags
 77# define v3_always_flags	v3_tlb_flags
 78# ifdef _TLB
 79#  define MULTI_TLB 1
 80# else
 81#  define _TLB v3
 82# endif
 83#else
 84# define v3_possible_flags	0
 85# define v3_always_flags	(-1UL)
 86#endif
 87
 88#define v4_tlb_flags	(TLB_V4_U_FULL | TLB_V4_U_PAGE)
 89
 90#ifdef CONFIG_CPU_TLB_V4WT
 91# define v4_possible_flags	v4_tlb_flags
 92# define v4_always_flags	v4_tlb_flags
 93# ifdef _TLB
 94#  define MULTI_TLB 1
 95# else
 96#  define _TLB v4
 97# endif
 98#else
 99# define v4_possible_flags	0
100# define v4_always_flags	(-1UL)
101#endif
102
103#define fa_tlb_flags	(TLB_WB | TLB_BTB | TLB_DCLEAN | \
104			 TLB_V4_U_FULL | TLB_V4_U_PAGE)
105
106#ifdef CONFIG_CPU_TLB_FA
107# define fa_possible_flags	fa_tlb_flags
108# define fa_always_flags	fa_tlb_flags
109# ifdef _TLB
110#  define MULTI_TLB 1
111# else
112#  define _TLB fa
113# endif
114#else
115# define fa_possible_flags	0
116# define fa_always_flags	(-1UL)
117#endif
118
119#define v4wbi_tlb_flags	(TLB_WB | TLB_DCLEAN | \
120			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
121			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
122
123#ifdef CONFIG_CPU_TLB_V4WBI
124# define v4wbi_possible_flags	v4wbi_tlb_flags
125# define v4wbi_always_flags	v4wbi_tlb_flags
126# ifdef _TLB
127#  define MULTI_TLB 1
128# else
129#  define _TLB v4wbi
130# endif
131#else
132# define v4wbi_possible_flags	0
133# define v4wbi_always_flags	(-1UL)
134#endif
135
136#define fr_tlb_flags	(TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
137			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
138			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
139
140#ifdef CONFIG_CPU_TLB_FEROCEON
141# define fr_possible_flags	fr_tlb_flags
142# define fr_always_flags	fr_tlb_flags
143# ifdef _TLB
144#  define MULTI_TLB 1
145# else
146#  define _TLB v4wbi
147# endif
148#else
149# define fr_possible_flags	0
150# define fr_always_flags	(-1UL)
151#endif
152
153#define v4wb_tlb_flags	(TLB_WB | TLB_DCLEAN | \
154			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
155			 TLB_V4_D_PAGE)
156
157#ifdef CONFIG_CPU_TLB_V4WB
158# define v4wb_possible_flags	v4wb_tlb_flags
159# define v4wb_always_flags	v4wb_tlb_flags
160# ifdef _TLB
161#  define MULTI_TLB 1
162# else
163#  define _TLB v4wb
164# endif
165#else
166# define v4wb_possible_flags	0
167# define v4wb_always_flags	(-1UL)
168#endif
169
170#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
171			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
172			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
173			 TLB_V6_I_ASID | TLB_V6_D_ASID)
174
175#ifdef CONFIG_CPU_TLB_V6
176# define v6wbi_possible_flags	v6wbi_tlb_flags
177# define v6wbi_always_flags	v6wbi_tlb_flags
178# ifdef _TLB
179#  define MULTI_TLB 1
180# else
181#  define _TLB v6wbi
182# endif
183#else
184# define v6wbi_possible_flags	0
185# define v6wbi_always_flags	(-1UL)
186#endif
187
188#ifdef CONFIG_SMP
189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
190			 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
191#else
192#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
193			 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
194#endif
195
196#ifdef CONFIG_CPU_TLB_V7
197# define v7wbi_possible_flags	v7wbi_tlb_flags
198# define v7wbi_always_flags	v7wbi_tlb_flags
199# ifdef _TLB
200#  define MULTI_TLB 1
201# else
202#  define _TLB v7wbi
203# endif
204#else
205# define v7wbi_possible_flags	0
206# define v7wbi_always_flags	(-1UL)
207#endif
208
209#ifndef _TLB
210#error Unknown TLB model
211#endif
212
213#ifndef __ASSEMBLY__
214
215#include <linux/sched.h>
216
217struct cpu_tlb_fns {
218	void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
219	void (*flush_kern_range)(unsigned long, unsigned long);
220	unsigned long tlb_flags;
221};
222
223/*
224 * Select the calling method
225 */
226#ifdef MULTI_TLB
227
228#define __cpu_flush_user_tlb_range	cpu_tlb.flush_user_range
229#define __cpu_flush_kern_tlb_range	cpu_tlb.flush_kern_range
230
231#else
232
233#define __cpu_flush_user_tlb_range	__glue(_TLB,_flush_user_tlb_range)
234#define __cpu_flush_kern_tlb_range	__glue(_TLB,_flush_kern_tlb_range)
235
236extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
237extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
238
239#endif
240
241extern struct cpu_tlb_fns cpu_tlb;
242
243#define __cpu_tlb_flags			cpu_tlb.tlb_flags
244
245/*
246 *	TLB Management
247 *	==============
248 *
249 *	The arch/arm/mm/tlb-*.S files implement these methods.
250 *
251 *	The TLB specific code is expected to perform whatever tests it
252 *	needs to determine if it should invalidate the TLB for each
253 *	call.  Start addresses are inclusive and end addresses are
254 *	exclusive; it is safe to round these addresses down.
255 *
256 *	flush_tlb_all()
257 *
258 *		Invalidate the entire TLB.
259 *
260 *	flush_tlb_mm(mm)
261 *
262 *		Invalidate all TLB entries in a particular address
263 *		space.
264 *		- mm	- mm_struct describing address space
265 *
266 *	flush_tlb_range(mm,start,end)
267 *
268 *		Invalidate a range of TLB entries in the specified
269 *		address space.
270 *		- mm	- mm_struct describing address space
271 *		- start - start address (may not be aligned)
272 *		- end	- end address (exclusive, may not be aligned)
273 *
274 *	flush_tlb_page(vaddr,vma)
275 *
276 *		Invalidate the specified page in the specified address range.
277 *		- vaddr - virtual address (may not be aligned)
278 *		- vma	- vma_struct describing address range
279 *
280 *	flush_kern_tlb_page(kaddr)
281 *
282 *		Invalidate the TLB entry for the specified page.  The address
283 *		will be in the kernels virtual memory space.  Current uses
284 *		only require the D-TLB to be invalidated.
285 *		- kaddr - Kernel virtual memory address
286 */
287
288/*
289 * We optimise the code below by:
290 *  - building a set of TLB flags that might be set in __cpu_tlb_flags
291 *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
292 *  - if we're going to need __cpu_tlb_flags, access it once and only once
293 *
294 * This allows us to build optimal assembly for the single-CPU type case,
295 * and as close to optimal given the compiler constrants for multi-CPU
296 * case.  We could do better for the multi-CPU case if the compiler
297 * implemented the "%?" method, but this has been discontinued due to too
298 * many people getting it wrong.
299 */
300#define possible_tlb_flags	(v3_possible_flags | \
301				 v4_possible_flags | \
302				 v4wbi_possible_flags | \
303				 fr_possible_flags | \
304				 v4wb_possible_flags | \
305				 fa_possible_flags | \
306				 v6wbi_possible_flags | \
307				 v7wbi_possible_flags)
308
309#define always_tlb_flags	(v3_always_flags & \
310				 v4_always_flags & \
311				 v4wbi_always_flags & \
312				 fr_always_flags & \
313				 v4wb_always_flags & \
314				 fa_always_flags & \
315				 v6wbi_always_flags & \
316				 v7wbi_always_flags)
317
318#define tlb_flag(f)	((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
319
320static inline void local_flush_tlb_all(void)
321{
322	const int zero = 0;
323	const unsigned int __tlb_flag = __cpu_tlb_flags;
324
325	if (tlb_flag(TLB_WB))
326		dsb();
327
328	if (tlb_flag(TLB_V3_FULL))
329		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
330	if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
331		asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
332	if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
333		asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
334	if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
335		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
336	if (tlb_flag(TLB_V7_UIS_FULL))
337		asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
338
339	if (tlb_flag(TLB_BTB)) {
340		/* flush the branch target cache */
341		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
342		dsb();
343		isb();
344	}
345	if (tlb_flag(TLB_V7_IS_BTB)) {
346		/* flush the branch target cache */
347		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
348		dsb();
349		isb();
350	}
351}
352
353static inline void local_flush_tlb_mm(struct mm_struct *mm)
354{
355	const int zero = 0;
356	const int asid = ASID(mm);
357	const unsigned int __tlb_flag = __cpu_tlb_flags;
358
359	if (tlb_flag(TLB_WB))
360		dsb();
361
362	if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
363		if (tlb_flag(TLB_V3_FULL))
364			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
365		if (tlb_flag(TLB_V4_U_FULL))
366			asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
367		if (tlb_flag(TLB_V4_D_FULL))
368			asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
369		if (tlb_flag(TLB_V4_I_FULL))
370			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
371	}
372	put_cpu();
373
374	if (tlb_flag(TLB_V6_U_ASID))
375		asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
376	if (tlb_flag(TLB_V6_D_ASID))
377		asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
378	if (tlb_flag(TLB_V6_I_ASID))
379		asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
380	if (tlb_flag(TLB_V7_UIS_ASID))
381#ifdef CONFIG_ARM_ERRATA_720789
382		asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
383#else
384		asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
385#endif
386
387	if (tlb_flag(TLB_BTB)) {
388		/* flush the branch target cache */
389		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
390		dsb();
391	}
392	if (tlb_flag(TLB_V7_IS_BTB)) {
393		/* flush the branch target cache */
394		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
395		dsb();
396		isb();
397	}
398}
399
400static inline void
401local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
402{
403	const int zero = 0;
404	const unsigned int __tlb_flag = __cpu_tlb_flags;
405
406	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
407
408	if (tlb_flag(TLB_WB))
409		dsb();
410
411	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
412		if (tlb_flag(TLB_V3_PAGE))
413			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
414		if (tlb_flag(TLB_V4_U_PAGE))
415			asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
416		if (tlb_flag(TLB_V4_D_PAGE))
417			asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
418		if (tlb_flag(TLB_V4_I_PAGE))
419			asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
420		if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
421			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
422	}
423
424	if (tlb_flag(TLB_V6_U_PAGE))
425		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
426	if (tlb_flag(TLB_V6_D_PAGE))
427		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
428	if (tlb_flag(TLB_V6_I_PAGE))
429		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
430	if (tlb_flag(TLB_V7_UIS_PAGE))
431#ifdef CONFIG_ARM_ERRATA_720789
432		asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
433#else
434		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
435#endif
436
437	if (tlb_flag(TLB_BTB)) {
438		/* flush the branch target cache */
439		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
440		dsb();
441	}
442	if (tlb_flag(TLB_V7_IS_BTB)) {
443		/* flush the branch target cache */
444		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
445		dsb();
446		isb();
447	}
448}
449
450static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
451{
452	const int zero = 0;
453	const unsigned int __tlb_flag = __cpu_tlb_flags;
454
455	kaddr &= PAGE_MASK;
456
457	if (tlb_flag(TLB_WB))
458		dsb();
459
460	if (tlb_flag(TLB_V3_PAGE))
461		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
462	if (tlb_flag(TLB_V4_U_PAGE))
463		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
464	if (tlb_flag(TLB_V4_D_PAGE))
465		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
466	if (tlb_flag(TLB_V4_I_PAGE))
467		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
468	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
469		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
470
471	if (tlb_flag(TLB_V6_U_PAGE))
472		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
473	if (tlb_flag(TLB_V6_D_PAGE))
474		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
475	if (tlb_flag(TLB_V6_I_PAGE))
476		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
477	if (tlb_flag(TLB_V7_UIS_PAGE))
478		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
479
480	if (tlb_flag(TLB_BTB)) {
481		/* flush the branch target cache */
482		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
483		dsb();
484		isb();
485	}
486	if (tlb_flag(TLB_V7_IS_BTB)) {
487		/* flush the branch target cache */
488		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
489		dsb();
490		isb();
491	}
492}
493
494/*
495 *	flush_pmd_entry
496 *
497 *	Flush a PMD entry (word aligned, or double-word aligned) to
498 *	RAM if the TLB for the CPU we are running on requires this.
499 *	This is typically used when we are creating PMD entries.
500 *
501 *	clean_pmd_entry
502 *
503 *	Clean (but don't drain the write buffer) if the CPU requires
504 *	these operations.  This is typically used when we are removing
505 *	PMD entries.
506 */
507static inline void flush_pmd_entry(pmd_t *pmd)
508{
509	const unsigned int __tlb_flag = __cpu_tlb_flags;
510
511	if (tlb_flag(TLB_DCLEAN))
512		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd"
513			: : "r" (pmd) : "cc");
514
515	if (tlb_flag(TLB_L2CLEAN_FR))
516		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
517			: : "r" (pmd) : "cc");
518
519	if (tlb_flag(TLB_WB))
520		dsb();
521}
522
523static inline void clean_pmd_entry(pmd_t *pmd)
524{
525	const unsigned int __tlb_flag = __cpu_tlb_flags;
526
527	if (tlb_flag(TLB_DCLEAN))
528		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd"
529			: : "r" (pmd) : "cc");
530
531	if (tlb_flag(TLB_L2CLEAN_FR))
532		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
533			: : "r" (pmd) : "cc");
534}
535
536#undef tlb_flag
537#undef always_tlb_flags
538#undef possible_tlb_flags
539
540/*
541 * Convert calls to our calling convention.
542 */
543#define local_flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
544#define local_flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
545
546#ifndef CONFIG_SMP
547#define flush_tlb_all		local_flush_tlb_all
548#define flush_tlb_mm		local_flush_tlb_mm
549#define flush_tlb_page		local_flush_tlb_page
550#define flush_tlb_kernel_page	local_flush_tlb_kernel_page
551#define flush_tlb_range		local_flush_tlb_range
552#define flush_tlb_kernel_range	local_flush_tlb_kernel_range
553#else
554extern void flush_tlb_all(void);
555extern void flush_tlb_mm(struct mm_struct *mm);
556extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
557extern void flush_tlb_kernel_page(unsigned long kaddr);
558extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
559extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
560#endif
561
562/*
563 * if PG_dcache_dirty is set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written
565 * back to the page.
566 */
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568	pte_t *ptep);
569
570#endif
571
572#endif /* CONFIG_MMU */
573
574#endif