/drivers/staging/tidspbridge/hw/hw_mmu.h

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C Header · 163 lines · 98 code · 40 blank · 25 comment · 8 complexity · 0ad1e0650a3060b6b9d61df87c745e64 MD5 · raw file

  1. /*
  2. * hw_mmu.h
  3. *
  4. * DSP-BIOS Bridge driver support functions for TI OMAP processors.
  5. *
  6. * MMU types and API declarations
  7. *
  8. * Copyright (C) 2007 Texas Instruments, Inc.
  9. *
  10. * This package is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  15. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  16. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  17. */
  18. #ifndef _HW_MMU_H
  19. #define _HW_MMU_H
  20. #include <linux/types.h>
  21. /* Bitmasks for interrupt sources */
  22. #define HW_MMU_TRANSLATION_FAULT 0x2
  23. #define HW_MMU_ALL_INTERRUPTS 0x1F
  24. #define HW_MMU_COARSE_PAGE_SIZE 0x400
  25. /* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
  26. CPU/TLB Element size */
  27. enum hw_mmu_mixed_size_t {
  28. HW_MMU_TLBES,
  29. HW_MMU_CPUES
  30. };
  31. /* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
  32. struct hw_mmu_map_attrs_t {
  33. enum hw_endianism_t endianism;
  34. enum hw_element_size_t element_size;
  35. enum hw_mmu_mixed_size_t mixed_size;
  36. bool donotlockmpupage;
  37. };
  38. extern hw_status hw_mmu_enable(const void __iomem *base_address);
  39. extern hw_status hw_mmu_disable(const void __iomem *base_address);
  40. extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
  41. u32 num_locked_entries);
  42. extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
  43. u32 victim_entry_num);
  44. /* For MMU faults */
  45. extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
  46. u32 irq_mask);
  47. extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
  48. u32 irq_mask);
  49. extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
  50. u32 irq_mask);
  51. extern hw_status hw_mmu_event_status(const void __iomem *base_address,
  52. u32 *irq_mask);
  53. extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
  54. u32 *addr);
  55. /* Set the TT base address */
  56. extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
  57. u32 ttb_phys_addr);
  58. extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
  59. extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
  60. extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
  61. u32 virtual_addr, u32 page_sz);
  62. extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
  63. u32 physical_addr,
  64. u32 virtual_addr,
  65. u32 page_sz,
  66. u32 entry_num,
  67. struct hw_mmu_map_attrs_t *map_attrs,
  68. s8 preserved_bit, s8 valid_bit);
  69. /* For PTEs */
  70. extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
  71. u32 physical_addr,
  72. u32 virtual_addr,
  73. u32 page_sz,
  74. struct hw_mmu_map_attrs_t *map_attrs);
  75. extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
  76. u32 virtual_addr, u32 page_size);
  77. void hw_mmu_tlb_flush_all(const void __iomem *base);
  78. static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
  79. {
  80. u32 pte_addr;
  81. u32 va31_to20;
  82. va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
  83. va31_to20 &= 0xFFFFFFFCUL;
  84. pte_addr = l1_base + va31_to20;
  85. return pte_addr;
  86. }
  87. static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
  88. {
  89. u32 pte_addr;
  90. pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
  91. return pte_addr;
  92. }
  93. static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
  94. {
  95. u32 pte_coarse;
  96. pte_coarse = pte_val & 0xFFFFFC00;
  97. return pte_coarse;
  98. }
  99. static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
  100. {
  101. u32 pte_size = 0;
  102. if ((pte_val & 0x3) == 0x1) {
  103. /* Points to L2 PT */
  104. pte_size = HW_MMU_COARSE_PAGE_SIZE;
  105. }
  106. if ((pte_val & 0x3) == 0x2) {
  107. if (pte_val & (1 << 18))
  108. pte_size = HW_PAGE_SIZE16MB;
  109. else
  110. pte_size = HW_PAGE_SIZE1MB;
  111. }
  112. return pte_size;
  113. }
  114. static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
  115. {
  116. u32 pte_size = 0;
  117. if (pte_val & 0x2)
  118. pte_size = HW_PAGE_SIZE4KB;
  119. else if (pte_val & 0x1)
  120. pte_size = HW_PAGE_SIZE64KB;
  121. return pte_size;
  122. }
  123. #endif /* _HW_MMU_H */