/arch/sparc64/mm/generic.c

https://bitbucket.org/evzijst/gittest · C · 182 lines · 151 code · 18 blank · 13 comment · 26 complexity · 1ad0f05bde78f6da16b38a69b5201455 MD5 · raw file

  1. /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
  2. * generic.c: Generic Sparc mm routines that are not dependent upon
  3. * MMU type but are Sparc specific.
  4. *
  5. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/pagemap.h>
  11. #include <asm/pgalloc.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/page.h>
  14. #include <asm/tlbflush.h>
  15. /* Remap IO memory, the same way as remap_pfn_range(), but use
  16. * the obio memory space.
  17. *
  18. * They use a pgprot that sets PAGE_IO and does not check the
  19. * mem_map table as this is independent of normal memory.
  20. */
  21. static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
  22. unsigned long address,
  23. unsigned long size,
  24. unsigned long offset, pgprot_t prot,
  25. int space)
  26. {
  27. unsigned long end;
  28. /* clear hack bit that was used as a write_combine side-effect flag */
  29. offset &= ~0x1UL;
  30. address &= ~PMD_MASK;
  31. end = address + size;
  32. if (end > PMD_SIZE)
  33. end = PMD_SIZE;
  34. do {
  35. pte_t entry;
  36. unsigned long curend = address + PAGE_SIZE;
  37. entry = mk_pte_io(offset, prot, space);
  38. if (!(address & 0xffff)) {
  39. if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
  40. entry = mk_pte_io(offset,
  41. __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
  42. space);
  43. curend = address + 0x400000;
  44. offset += 0x400000;
  45. } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
  46. entry = mk_pte_io(offset,
  47. __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
  48. space);
  49. curend = address + 0x80000;
  50. offset += 0x80000;
  51. } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
  52. entry = mk_pte_io(offset,
  53. __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
  54. space);
  55. curend = address + 0x10000;
  56. offset += 0x10000;
  57. } else
  58. offset += PAGE_SIZE;
  59. } else
  60. offset += PAGE_SIZE;
  61. do {
  62. BUG_ON(!pte_none(*pte));
  63. set_pte_at(mm, address, pte, entry);
  64. address += PAGE_SIZE;
  65. pte++;
  66. } while (address < curend);
  67. } while (address < end);
  68. }
  69. static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
  70. unsigned long offset, pgprot_t prot, int space)
  71. {
  72. unsigned long end;
  73. address &= ~PGDIR_MASK;
  74. end = address + size;
  75. if (end > PGDIR_SIZE)
  76. end = PGDIR_SIZE;
  77. offset -= address;
  78. do {
  79. pte_t * pte = pte_alloc_map(mm, pmd, address);
  80. if (!pte)
  81. return -ENOMEM;
  82. io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
  83. pte_unmap(pte);
  84. address = (address + PMD_SIZE) & PMD_MASK;
  85. pmd++;
  86. } while (address < end);
  87. return 0;
  88. }
  89. static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
  90. unsigned long offset, pgprot_t prot, int space)
  91. {
  92. unsigned long end;
  93. address &= ~PUD_MASK;
  94. end = address + size;
  95. if (end > PUD_SIZE)
  96. end = PUD_SIZE;
  97. offset -= address;
  98. do {
  99. pmd_t *pmd = pmd_alloc(mm, pud, address);
  100. if (!pud)
  101. return -ENOMEM;
  102. io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
  103. address = (address + PUD_SIZE) & PUD_MASK;
  104. pud++;
  105. } while (address < end);
  106. return 0;
  107. }
  108. int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
  109. {
  110. int error = 0;
  111. pgd_t * dir;
  112. unsigned long beg = from;
  113. unsigned long end = from + size;
  114. struct mm_struct *mm = vma->vm_mm;
  115. prot = __pgprot(pg_iobits);
  116. offset -= from;
  117. dir = pgd_offset(mm, from);
  118. flush_cache_range(vma, beg, end);
  119. spin_lock(&mm->page_table_lock);
  120. while (from < end) {
  121. pud_t *pud = pud_alloc(mm, dir, from);
  122. error = -ENOMEM;
  123. if (!pud)
  124. break;
  125. error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
  126. if (error)
  127. break;
  128. from = (from + PGDIR_SIZE) & PGDIR_MASK;
  129. dir++;
  130. }
  131. flush_tlb_range(vma, beg, end);
  132. spin_unlock(&mm->page_table_lock);
  133. return error;
  134. }
  135. int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  136. unsigned long pfn, unsigned long size, pgprot_t prot)
  137. {
  138. int error = 0;
  139. pgd_t * dir;
  140. unsigned long beg = from;
  141. unsigned long end = from + size;
  142. struct mm_struct *mm = vma->vm_mm;
  143. int space = GET_IOSPACE(pfn);
  144. unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
  145. prot = __pgprot(pg_iobits);
  146. offset -= from;
  147. dir = pgd_offset(mm, from);
  148. flush_cache_range(vma, beg, end);
  149. spin_lock(&mm->page_table_lock);
  150. while (from < end) {
  151. pud_t *pud = pud_alloc(current->mm, dir, from);
  152. error = -ENOMEM;
  153. if (!pud)
  154. break;
  155. error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
  156. if (error)
  157. break;
  158. from = (from + PGDIR_SIZE) & PGDIR_MASK;
  159. dir++;
  160. }
  161. flush_tlb_range(vma, beg, end);
  162. spin_unlock(&mm->page_table_lock);
  163. return error;
  164. }