/arch/arm/mm/flush.c

https://bitbucket.org/evzijst/gittest · C · 94 lines · 40 code · 10 blank · 44 comment · 8 complexity · 8273484219548c58e8b80507fc6eeb9f MD5 · raw file

  1. /*
  2. * linux/arch/arm/mm/flush.c
  3. *
  4. * Copyright (C) 1995-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/mm.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/system.h>
  15. static void __flush_dcache_page(struct address_space *mapping, struct page *page)
  16. {
  17. struct mm_struct *mm = current->active_mm;
  18. struct vm_area_struct *mpnt;
  19. struct prio_tree_iter iter;
  20. pgoff_t pgoff;
  21. /*
  22. * Writeback any data associated with the kernel mapping of this
  23. * page. This ensures that data in the physical page is mutually
  24. * coherent with the kernels mapping.
  25. */
  26. __cpuc_flush_dcache_page(page_address(page));
  27. /*
  28. * If there's no mapping pointer here, then this page isn't
  29. * visible to userspace yet, so there are no cache lines
  30. * associated with any other aliases.
  31. */
  32. if (!mapping)
  33. return;
  34. /*
  35. * There are possible user space mappings of this page:
  36. * - VIVT cache: we need to also write back and invalidate all user
  37. * data in the current VM view associated with this page.
  38. * - aliasing VIPT: we only need to find one mapping of this page.
  39. */
  40. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  41. flush_dcache_mmap_lock(mapping);
  42. vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
  43. unsigned long offset;
  44. /*
  45. * If this VMA is not in our MM, we can ignore it.
  46. */
  47. if (mpnt->vm_mm != mm)
  48. continue;
  49. if (!(mpnt->vm_flags & VM_MAYSHARE))
  50. continue;
  51. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  52. flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
  53. if (cache_is_vipt())
  54. break;
  55. }
  56. flush_dcache_mmap_unlock(mapping);
  57. }
  58. /*
  59. * Ensure cache coherency between kernel mapping and userspace mapping
  60. * of this page.
  61. *
  62. * We have three cases to consider:
  63. * - VIPT non-aliasing cache: fully coherent so nothing required.
  64. * - VIVT: fully aliasing, so we need to handle every alias in our
  65. * current VM view.
  66. * - VIPT aliasing: need to handle one alias in our current VM view.
  67. *
  68. * If we need to handle aliasing:
  69. * If the page only exists in the page cache and there are no user
  70. * space mappings, we can be lazy and remember that we may have dirty
  71. * kernel cache lines for later. Otherwise, we assume we have
  72. * aliasing mappings.
  73. */
  74. void flush_dcache_page(struct page *page)
  75. {
  76. struct address_space *mapping = page_mapping(page);
  77. if (cache_is_vipt_nonaliasing())
  78. return;
  79. if (mapping && !mapping_mapped(mapping))
  80. set_bit(PG_dcache_dirty, &page->flags);
  81. else
  82. __flush_dcache_page(mapping, page);
  83. }
  84. EXPORT_SYMBOL(flush_dcache_page);