/arch/sh64/mm/hugetlbpage.c

https://bitbucket.org/evzijst/gittest · C · 264 lines · 211 code · 39 blank · 14 comment · 29 complexity · 706bbb23694f5b5f29b6f63b33e942af MD5 · raw file

  1. /*
  2. * arch/sh64/mm/hugetlbpage.c
  3. *
  4. * SuperH HugeTLB page support.
  5. *
  6. * Cloned from sparc64 by Paul Mundt.
  7. *
  8. * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
  9. */
  10. #include <linux/config.h>
  11. #include <linux/init.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/smp_lock.h>
  17. #include <linux/slab.h>
  18. #include <linux/sysctl.h>
  19. #include <asm/mman.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/cacheflush.h>
  24. static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
  25. {
  26. pgd_t *pgd;
  27. pmd_t *pmd;
  28. pte_t *pte = NULL;
  29. pgd = pgd_offset(mm, addr);
  30. if (pgd) {
  31. pmd = pmd_alloc(mm, pgd, addr);
  32. if (pmd)
  33. pte = pte_alloc_map(mm, pmd, addr);
  34. }
  35. return pte;
  36. }
  37. static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  38. {
  39. pgd_t *pgd;
  40. pmd_t *pmd;
  41. pte_t *pte = NULL;
  42. pgd = pgd_offset(mm, addr);
  43. if (pgd) {
  44. pmd = pmd_offset(pgd, addr);
  45. if (pmd)
  46. pte = pte_offset_map(pmd, addr);
  47. }
  48. return pte;
  49. }
  50. #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
  51. static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  52. struct page *page, pte_t * page_table, int write_access)
  53. {
  54. unsigned long i;
  55. pte_t entry;
  56. add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
  57. if (write_access)
  58. entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
  59. vma->vm_page_prot)));
  60. else
  61. entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  62. entry = pte_mkyoung(entry);
  63. mk_pte_huge(entry);
  64. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  65. set_pte(page_table, entry);
  66. page_table++;
  67. pte_val(entry) += PAGE_SIZE;
  68. }
  69. }
  70. /*
  71. * This function checks for proper alignment of input addr and len parameters.
  72. */
  73. int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
  74. {
  75. if (len & ~HPAGE_MASK)
  76. return -EINVAL;
  77. if (addr & ~HPAGE_MASK)
  78. return -EINVAL;
  79. return 0;
  80. }
  81. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  82. struct vm_area_struct *vma)
  83. {
  84. pte_t *src_pte, *dst_pte, entry;
  85. struct page *ptepage;
  86. unsigned long addr = vma->vm_start;
  87. unsigned long end = vma->vm_end;
  88. int i;
  89. while (addr < end) {
  90. dst_pte = huge_pte_alloc(dst, addr);
  91. if (!dst_pte)
  92. goto nomem;
  93. src_pte = huge_pte_offset(src, addr);
  94. BUG_ON(!src_pte || pte_none(*src_pte));
  95. entry = *src_pte;
  96. ptepage = pte_page(entry);
  97. get_page(ptepage);
  98. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  99. set_pte(dst_pte, entry);
  100. pte_val(entry) += PAGE_SIZE;
  101. dst_pte++;
  102. }
  103. add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
  104. addr += HPAGE_SIZE;
  105. }
  106. return 0;
  107. nomem:
  108. return -ENOMEM;
  109. }
  110. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  111. struct page **pages, struct vm_area_struct **vmas,
  112. unsigned long *position, int *length, int i)
  113. {
  114. unsigned long vaddr = *position;
  115. int remainder = *length;
  116. WARN_ON(!is_vm_hugetlb_page(vma));
  117. while (vaddr < vma->vm_end && remainder) {
  118. if (pages) {
  119. pte_t *pte;
  120. struct page *page;
  121. pte = huge_pte_offset(mm, vaddr);
  122. /* hugetlb should be locked, and hence, prefaulted */
  123. BUG_ON(!pte || pte_none(*pte));
  124. page = pte_page(*pte);
  125. WARN_ON(!PageCompound(page));
  126. get_page(page);
  127. pages[i] = page;
  128. }
  129. if (vmas)
  130. vmas[i] = vma;
  131. vaddr += PAGE_SIZE;
  132. --remainder;
  133. ++i;
  134. }
  135. *length = remainder;
  136. *position = vaddr;
  137. return i;
  138. }
  139. struct page *follow_huge_addr(struct mm_struct *mm,
  140. unsigned long address, int write)
  141. {
  142. return ERR_PTR(-EINVAL);
  143. }
  144. int pmd_huge(pmd_t pmd)
  145. {
  146. return 0;
  147. }
  148. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  149. pmd_t *pmd, int write)
  150. {
  151. return NULL;
  152. }
  153. void unmap_hugepage_range(struct vm_area_struct *vma,
  154. unsigned long start, unsigned long end)
  155. {
  156. struct mm_struct *mm = vma->vm_mm;
  157. unsigned long address;
  158. pte_t *pte;
  159. struct page *page;
  160. int i;
  161. BUG_ON(start & (HPAGE_SIZE - 1));
  162. BUG_ON(end & (HPAGE_SIZE - 1));
  163. for (address = start; address < end; address += HPAGE_SIZE) {
  164. pte = huge_pte_offset(mm, address);
  165. BUG_ON(!pte);
  166. if (pte_none(*pte))
  167. continue;
  168. page = pte_page(*pte);
  169. put_page(page);
  170. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  171. pte_clear(mm, address+(i*PAGE_SIZE), pte);
  172. pte++;
  173. }
  174. }
  175. add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
  176. flush_tlb_range(vma, start, end);
  177. }
  178. int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
  179. {
  180. struct mm_struct *mm = current->mm;
  181. unsigned long addr;
  182. int ret = 0;
  183. BUG_ON(vma->vm_start & ~HPAGE_MASK);
  184. BUG_ON(vma->vm_end & ~HPAGE_MASK);
  185. spin_lock(&mm->page_table_lock);
  186. for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
  187. unsigned long idx;
  188. pte_t *pte = huge_pte_alloc(mm, addr);
  189. struct page *page;
  190. if (!pte) {
  191. ret = -ENOMEM;
  192. goto out;
  193. }
  194. if (!pte_none(*pte))
  195. continue;
  196. idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
  197. + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
  198. page = find_get_page(mapping, idx);
  199. if (!page) {
  200. /* charge the fs quota first */
  201. if (hugetlb_get_quota(mapping)) {
  202. ret = -ENOMEM;
  203. goto out;
  204. }
  205. page = alloc_huge_page();
  206. if (!page) {
  207. hugetlb_put_quota(mapping);
  208. ret = -ENOMEM;
  209. goto out;
  210. }
  211. ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
  212. if (! ret) {
  213. unlock_page(page);
  214. } else {
  215. hugetlb_put_quota(mapping);
  216. free_huge_page(page);
  217. goto out;
  218. }
  219. }
  220. set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
  221. }
  222. out:
  223. spin_unlock(&mm->page_table_lock);
  224. return ret;
  225. }