PageRenderTime 66ms CodeModel.GetById 12ms app.highlight 51ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/um/kernel/tlb.c

https://bitbucket.org/evzijst/gittest
C | 369 lines | 325 code | 40 blank | 4 comment | 74 complexity | ebb333741ffec908994aa7db8b7ec8d8 MD5 | raw file
  1/* 
  2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include "linux/mm.h"
  7#include "asm/page.h"
  8#include "asm/pgalloc.h"
  9#include "asm/tlbflush.h"
 10#include "choose-mode.h"
 11#include "mode_kern.h"
 12#include "user_util.h"
 13#include "tlb.h"
 14#include "mem.h"
 15#include "mem_user.h"
 16#include "os.h"
 17
 18#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
 19
 20void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
 21                      unsigned long end_addr, int force, int data,
 22                      void (*do_ops)(int, struct host_vm_op *, int))
 23{
 24        pgd_t *npgd;
 25        pud_t *npud;
 26        pmd_t *npmd;
 27        pte_t *npte;
 28        unsigned long addr, end;
 29        int r, w, x;
 30        struct host_vm_op ops[16];
 31        int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
 32
 33        if(mm == NULL) return;
 34
 35        for(addr = start_addr; addr < end_addr;){
 36                npgd = pgd_offset(mm, addr);
 37                if(!pgd_present(*npgd)){
 38                        end = ADD_ROUND(addr, PGDIR_SIZE);
 39                        if(end > end_addr)
 40                                end = end_addr;
 41                        if(force || pgd_newpage(*npgd)){
 42                                op_index = add_munmap(addr, end - addr, ops,
 43                                                      op_index, last_op, data,
 44                                                      do_ops);
 45                                pgd_mkuptodate(*npgd);
 46                        }
 47                        addr = end;
 48                        continue;
 49                }
 50
 51                npud = pud_offset(npgd, addr);
 52                if(!pud_present(*npud)){
 53                        end = ADD_ROUND(addr, PUD_SIZE);
 54                        if(end > end_addr)
 55                                end = end_addr;
 56                        if(force || pud_newpage(*npud)){
 57                                op_index = add_munmap(addr, end - addr, ops,
 58                                                      op_index, last_op, data,
 59                                                      do_ops);
 60                                pud_mkuptodate(*npud);
 61                        }
 62                        addr = end;
 63                        continue;
 64                }
 65
 66                npmd = pmd_offset(npud, addr);
 67                if(!pmd_present(*npmd)){
 68                        end = ADD_ROUND(addr, PMD_SIZE);
 69                        if(end > end_addr)
 70                                end = end_addr;
 71                        if(force || pmd_newpage(*npmd)){
 72                                op_index = add_munmap(addr, end - addr, ops,
 73                                                      op_index, last_op, data,
 74                                                      do_ops);
 75                                pmd_mkuptodate(*npmd);
 76                        }
 77                        addr = end;
 78                        continue;
 79                }
 80
 81                npte = pte_offset_kernel(npmd, addr);
 82                r = pte_read(*npte);
 83                w = pte_write(*npte);
 84                x = pte_exec(*npte);
 85                if(!pte_dirty(*npte))
 86                        w = 0;
 87                if(!pte_young(*npte)){
 88                        r = 0;
 89                        w = 0;
 90                }
 91                if(force || pte_newpage(*npte)){
 92                        if(pte_present(*npte))
 93                                op_index = add_mmap(addr,
 94                                                    pte_val(*npte) & PAGE_MASK,
 95                                                    PAGE_SIZE, r, w, x, ops,
 96                                                    op_index, last_op, data,
 97                                                    do_ops);
 98                        else op_index = add_munmap(addr, PAGE_SIZE, ops,
 99                                                   op_index, last_op, data,
100                                                   do_ops);
101                }
102                else if(pte_newprot(*npte))
103                        op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
104                                                op_index, last_op, data,
105                                                do_ops);
106
107                *npte = pte_mkuptodate(*npte);
108                addr += PAGE_SIZE;
109        }
110        (*do_ops)(data, ops, op_index);
111}
112
113int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
114{
115        struct mm_struct *mm;
116        pgd_t *pgd;
117        pud_t *pud;
118        pmd_t *pmd;
119        pte_t *pte;
120        unsigned long addr, last;
121        int updated = 0, err;
122
123        mm = &init_mm;
124        for(addr = start; addr < end;){
125                pgd = pgd_offset(mm, addr);
126                if(!pgd_present(*pgd)){
127                        last = ADD_ROUND(addr, PGDIR_SIZE);
128                        if(last > end)
129                                last = end;
130                        if(pgd_newpage(*pgd)){
131                                updated = 1;
132                                err = os_unmap_memory((void *) addr,
133                                                      last - addr);
134                                if(err < 0)
135                                        panic("munmap failed, errno = %d\n",
136                                              -err);
137                        }
138                        addr = last;
139                        continue;
140                }
141
142                pud = pud_offset(pgd, addr);
143                if(!pud_present(*pud)){
144                        last = ADD_ROUND(addr, PUD_SIZE);
145                        if(last > end)
146                                last = end;
147                        if(pud_newpage(*pud)){
148                                updated = 1;
149                                err = os_unmap_memory((void *) addr,
150                                                      last - addr);
151                                if(err < 0)
152                                        panic("munmap failed, errno = %d\n",
153                                              -err);
154                        }
155                        addr = last;
156                        continue;
157                }
158
159                pmd = pmd_offset(pud, addr);
160                if(!pmd_present(*pmd)){
161                        last = ADD_ROUND(addr, PMD_SIZE);
162                        if(last > end)
163                                last = end;
164                        if(pmd_newpage(*pmd)){
165                                updated = 1;
166                                err = os_unmap_memory((void *) addr,
167                                                      last - addr);
168                                if(err < 0)
169                                        panic("munmap failed, errno = %d\n",
170                                              -err);
171                        }
172                        addr = last;
173                        continue;
174                }
175
176                pte = pte_offset_kernel(pmd, addr);
177                if(!pte_present(*pte) || pte_newpage(*pte)){
178                        updated = 1;
179                        err = os_unmap_memory((void *) addr,
180                                              PAGE_SIZE);
181                        if(err < 0)
182                                panic("munmap failed, errno = %d\n",
183                                      -err);
184                        if(pte_present(*pte))
185                                map_memory(addr,
186                                           pte_val(*pte) & PAGE_MASK,
187                                           PAGE_SIZE, 1, 1, 1);
188                }
189                else if(pte_newprot(*pte)){
190                        updated = 1;
191                        protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
192                }
193                addr += PAGE_SIZE;
194        }
195        return(updated);
196}
197
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
199{
200        address &= PAGE_MASK;
201        flush_tlb_range(vma, address, address + PAGE_SIZE);
202}
203
204void flush_tlb_all(void)
205{
206        flush_tlb_mm(current->mm);
207}
208  
209void flush_tlb_kernel_range(unsigned long start, unsigned long end)
210{
211        CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
212                         flush_tlb_kernel_range_common, start, end);
213}
214
215void flush_tlb_kernel_vm(void)
216{
217        CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
218                    flush_tlb_kernel_range_common(start_vm, end_vm));
219}
220
221void __flush_tlb_one(unsigned long addr)
222{
223        CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
224}
225
226void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 
227     unsigned long end)
228{
229        CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
230                         end);
231}
232
233void flush_tlb_mm(struct mm_struct *mm)
234{
235        CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
236}
237
238void force_flush_all(void)
239{
240        CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
241}
242
243pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
244{
245        return(pgd_offset(mm, address));
246}
247
248pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
249{
250        return(pud_offset(pgd, address));
251}
252
253pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
254{
255        return(pmd_offset(pud, address));
256}
257
258pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
259{
260        return(pte_offset_kernel(pmd, address));
261}
262
263pte_t *addr_pte(struct task_struct *task, unsigned long addr)
264{
265        pgd_t *pgd = pgd_offset(task->mm, addr);
266        pud_t *pud = pud_offset(pgd, addr);
267        pmd_t *pmd = pmd_offset(pud, addr);
268
269        return(pte_offset_map(pmd, addr));
270}
271
272int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
273     int r, int w, int x, struct host_vm_op *ops, int index,
274     int last_filled, int data,
275     void (*do_ops)(int, struct host_vm_op *, int))
276{
277        __u64 offset;
278	struct host_vm_op *last;
279	int fd;
280
281	fd = phys_mapping(phys, &offset);
282	if(index != -1){
283		last = &ops[index];
284		if((last->type == MMAP) &&
285		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
286		   (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
287		   (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
288		   (last->u.mmap.offset + last->u.mmap.len == offset)){
289			last->u.mmap.len += len;
290			return(index);
291		}
292	}
293
294	if(index == last_filled){
295		(*do_ops)(data, ops, last_filled);
296		index = -1;
297	}
298
299	ops[++index] = ((struct host_vm_op) { .type	= MMAP,
300					      .u = { .mmap = {
301						      .addr	= virt,
302						      .len	= len,
303						      .r	= r,
304						      .w	= w,
305						      .x	= x,
306						      .fd	= fd,
307						      .offset	= offset }
308					      } });
309	return(index);
310}
311
312int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
313	       int index, int last_filled, int data,
314	       void (*do_ops)(int, struct host_vm_op *, int))
315{
316	struct host_vm_op *last;
317
318	if(index != -1){
319		last = &ops[index];
320		if((last->type == MUNMAP) &&
321		   (last->u.munmap.addr + last->u.mmap.len == addr)){
322			last->u.munmap.len += len;
323			return(index);
324		}
325	}
326
327	if(index == last_filled){
328		(*do_ops)(data, ops, last_filled);
329		index = -1;
330	}
331
332	ops[++index] = ((struct host_vm_op) { .type	= MUNMAP,
333					      .u = { .munmap = {
334						      .addr	= addr,
335						      .len	= len } } });
336	return(index);
337}
338
339int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
340		 struct host_vm_op *ops, int index, int last_filled, int data,
341		 void (*do_ops)(int, struct host_vm_op *, int))
342{
343	struct host_vm_op *last;
344
345	if(index != -1){
346		last = &ops[index];
347		if((last->type == MPROTECT) &&
348		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
349		   (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
350		   (last->u.mprotect.x == x)){
351			last->u.mprotect.len += len;
352			return(index);
353		}
354	}
355
356	if(index == last_filled){
357		(*do_ops)(data, ops, last_filled);
358		index = -1;
359	}
360
361	ops[++index] = ((struct host_vm_op) { .type	= MPROTECT,
362					      .u = { .mprotect = {
363						      .addr	= addr,
364						      .len	= len,
365						      .r	= r,
366						      .w	= w,
367						      .x	= x } } });
368	return(index);
369}