/drivers/gpu/mali/mali/common/mali_mmu.c

https://bitbucket.org/ndreys/linux-sunxi · C · 619 lines · 443 code · 100 blank · 76 comment · 64 complexity · 8e20e54644182be5e8ef364bbe37d2a4 MD5 · raw file

  1. /*
  2. * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
  3. *
  4. * This program is free software and is provided to you under the terms of the GNU General Public License version 2
  5. * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
  6. *
  7. * A copy of the licence is included with the program, and can also be obtained from Free Software
  8. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  9. */
  10. #include "mali_kernel_common.h"
  11. #include "mali_osk.h"
  12. #include "mali_osk_bitops.h"
  13. #include "mali_osk_list.h"
  14. #include "mali_ukk.h"
  15. #include "mali_mmu.h"
  16. #include "mali_hw_core.h"
  17. #include "mali_group.h"
  18. #include "mali_mmu_page_directory.h"
  19. /**
  20. * Size of the MMU registers in bytes
  21. */
  22. #define MALI_MMU_REGISTERS_SIZE 0x24
  23. /**
  24. * MMU register numbers
  25. * Used in the register read/write routines.
  26. * See the hardware documentation for more information about each register
  27. */
  28. typedef enum mali_mmu_register {
  29. MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
  30. MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
  31. MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
  32. MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
  33. MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
  34. MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
  35. MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
  36. MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
  37. MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
  38. } mali_mmu_register;
  39. /**
  40. * MMU interrupt register bits
  41. * Each cause of the interrupt is reported
  42. * through the (raw) interrupt status registers.
  43. * Multiple interrupts can be pending, so multiple bits
  44. * can be set at once.
  45. */
  46. typedef enum mali_mmu_interrupt
  47. {
  48. MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
  49. MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
  50. } mali_mmu_interrupt;
  51. /**
  52. * MMU commands
  53. * These are the commands that can be sent
  54. * to the MMU unit.
  55. */
  56. typedef enum mali_mmu_command
  57. {
  58. MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
  59. MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
  60. MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
  61. MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
  62. MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
  63. MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
  64. MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
  65. } mali_mmu_command;
  66. typedef enum mali_mmu_status_bits
  67. {
  68. MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
  69. MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
  70. MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
  71. MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
  72. MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
  73. MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
  74. } mali_mmu_status_bits;
  75. /**
  76. * Definition of the MMU struct
  77. * Used to track a MMU unit in the system.
  78. * Contains information about the mapping of the registers
  79. */
  80. struct mali_mmu_core
  81. {
  82. struct mali_hw_core hw_core; /**< Common for all HW cores */
  83. struct mali_group *group; /**< Parent core group */
  84. _mali_osk_irq_t *irq; /**< IRQ handler */
  85. };
  86. /**
  87. * The MMU interrupt handler
  88. * Upper half of the MMU interrupt processing.
  89. * Called by the kernel when the MMU has triggered an interrupt.
  90. * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
  91. * @param irq The irq number (not used)
  92. * @param dev_id Points to the MMU object being handled
  93. * @param regs Registers of interrupted process (not used)
  94. * @return Standard Linux interrupt result.
  95. * Subset used by the driver is IRQ_HANDLED processed
  96. * IRQ_NONE Not processed
  97. */
  98. static _mali_osk_errcode_t mali_mmu_upper_half(void * data);
  99. /**
  100. * The MMU reset hander
  101. * Bottom half of the MMU interrupt processing for page faults and bus errors
  102. * @param work The item to operate on, NULL in our case
  103. */
  104. static void mali_mmu_bottom_half(void *data);
  105. static void mali_mmu_probe_trigger(void *data);
  106. static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
  107. MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
  108. /* page fault queue flush helper pages
  109. * note that the mapping pointers are currently unused outside of the initialization functions */
  110. static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
  111. static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
  112. static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
  113. /* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
  114. static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
  115. _mali_osk_errcode_t mali_mmu_initialize(void)
  116. {
  117. /* allocate the helper pages */
  118. mali_empty_page_directory = mali_allocate_empty_page();
  119. if(0 == mali_empty_page_directory)
  120. {
  121. mali_empty_page_directory = MALI_INVALID_PAGE;
  122. return _MALI_OSK_ERR_NOMEM;
  123. }
  124. if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
  125. &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page))
  126. {
  127. mali_free_empty_page(mali_empty_page_directory);
  128. return _MALI_OSK_ERR_FAULT;
  129. }
  130. return _MALI_OSK_ERR_OK;
  131. }
  132. void mali_mmu_terminate(void)
  133. {
  134. MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
  135. /* Free global helper pages */
  136. mali_free_empty_page(mali_empty_page_directory);
  137. /* Free the page fault flush pages */
  138. mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
  139. &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page);
  140. }
  141. struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource)
  142. {
  143. struct mali_mmu_core* mmu = NULL;
  144. MALI_DEBUG_ASSERT_POINTER(resource);
  145. MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
  146. mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
  147. if (NULL != mmu)
  148. {
  149. if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
  150. {
  151. if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
  152. {
  153. /* Setup IRQ handlers (which will do IRQ probing if needed) */
  154. mmu->irq = _mali_osk_irq_init(resource->irq,
  155. mali_mmu_upper_half,
  156. mali_mmu_bottom_half,
  157. mali_mmu_probe_trigger,
  158. mali_mmu_probe_ack,
  159. mmu,
  160. "mali_mmu_irq_handlers");
  161. if (NULL != mmu->irq)
  162. {
  163. return mmu;
  164. }
  165. else
  166. {
  167. MALI_PRINT_ERROR(("Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
  168. }
  169. }
  170. mali_hw_core_delete(&mmu->hw_core);
  171. }
  172. _mali_osk_free(mmu);
  173. }
  174. else
  175. {
  176. MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
  177. }
  178. return NULL;
  179. }
  180. void mali_mmu_delete(struct mali_mmu_core *mmu)
  181. {
  182. _mali_osk_irq_term(mmu->irq);
  183. mali_hw_core_delete(&mmu->hw_core);
  184. _mali_osk_free(mmu);
  185. }
  186. void mali_mmu_set_group(struct mali_mmu_core *mmu, struct mali_group *group)
  187. {
  188. mmu->group = group;
  189. }
  190. static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
  191. {
  192. const int max_loop_count = 100;
  193. const int delay_in_usecs = 1;
  194. int i;
  195. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
  196. for (i = 0; i < max_loop_count; ++i)
  197. {
  198. if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
  199. {
  200. break;
  201. }
  202. _mali_osk_time_ubusydelay(delay_in_usecs);
  203. }
  204. if (max_loop_count == i)
  205. {
  206. MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
  207. }
  208. }
  209. mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
  210. {
  211. const int max_loop_count = 100;
  212. const int delay_in_usecs = 999;
  213. int i;
  214. u32 mmu_status;
  215. /* There are no group when it is called from mali_mmu_create */
  216. if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
  217. mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  218. if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) )
  219. {
  220. MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n"));
  221. return MALI_TRUE;
  222. }
  223. if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
  224. {
  225. MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
  226. return MALI_FALSE;
  227. }
  228. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
  229. for (i = 0; i < max_loop_count; ++i)
  230. {
  231. mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  232. if ( mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE))
  233. {
  234. break;
  235. }
  236. if ( 0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED )))
  237. {
  238. break;
  239. }
  240. _mali_osk_time_ubusydelay(delay_in_usecs);
  241. }
  242. if (max_loop_count == i)
  243. {
  244. MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
  245. return MALI_FALSE;
  246. }
  247. if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
  248. {
  249. MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n"));
  250. return MALI_FALSE;
  251. }
  252. return MALI_TRUE;
  253. }
  254. void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
  255. {
  256. const int max_loop_count = 100;
  257. const int delay_in_usecs = 1;
  258. int i;
  259. u32 mmu_status;
  260. /* There are no group when it is called from mali_mmu_create */
  261. if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
  262. mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  263. if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
  264. {
  265. MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
  266. return;
  267. }
  268. if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
  269. {
  270. MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
  271. return;
  272. }
  273. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
  274. for (i = 0; i < max_loop_count; ++i)
  275. {
  276. u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  277. if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) )
  278. {
  279. break;
  280. }
  281. if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
  282. {
  283. break;
  284. }
  285. if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
  286. {
  287. break;
  288. }
  289. _mali_osk_time_ubusydelay(delay_in_usecs);
  290. }
  291. if (max_loop_count == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
  292. }
  293. void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
  294. {
  295. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  296. MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
  297. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
  298. }
  299. MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
  300. {
  301. const int max_loop_count = 100;
  302. const int delay_in_usecs = 1;
  303. int i;
  304. /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
  305. if (mmu->group)MALI_ASSERT_GROUP_LOCKED(mmu->group);
  306. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
  307. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
  308. for (i = 0; i < max_loop_count; ++i)
  309. {
  310. if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0)
  311. {
  312. break;
  313. }
  314. _mali_osk_time_ubusydelay(delay_in_usecs);
  315. }
  316. if (max_loop_count == i)
  317. {
  318. MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
  319. return _MALI_OSK_ERR_FAULT;
  320. }
  321. return _MALI_OSK_ERR_OK;
  322. }
  323. _mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
  324. {
  325. _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
  326. mali_bool stall_success;
  327. MALI_DEBUG_ASSERT_POINTER(mmu);
  328. /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
  329. if (mmu->group)
  330. {
  331. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  332. }
  333. stall_success = mali_mmu_enable_stall(mmu);
  334. /* The stall can not fail in current hw-state */
  335. MALI_DEBUG_ASSERT(stall_success);
  336. MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
  337. if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
  338. {
  339. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
  340. /* no session is active, so just activate the empty page directory */
  341. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
  342. mali_mmu_enable_paging(mmu);
  343. err = _MALI_OSK_ERR_OK;
  344. }
  345. mali_mmu_disable_stall(mmu);
  346. return err;
  347. }
  348. /* ------------- interrupt handling below ------------------ */
  349. static _mali_osk_errcode_t mali_mmu_upper_half(void * data)
  350. {
  351. struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
  352. u32 int_stat;
  353. MALI_DEBUG_ASSERT_POINTER(mmu);
  354. /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
  355. int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
  356. if (0 != int_stat)
  357. {
  358. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
  359. mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  360. if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
  361. {
  362. _mali_osk_irq_schedulework(mmu->irq);
  363. }
  364. if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
  365. {
  366. /* clear interrupt flag */
  367. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
  368. /* reenable it */
  369. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK,
  370. mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
  371. MALI_PRINT_ERROR(("Mali MMU: Read bus error\n"));
  372. }
  373. return _MALI_OSK_ERR_OK;
  374. }
  375. return _MALI_OSK_ERR_FAULT;
  376. }
  377. static void mali_mmu_bottom_half(void * data)
  378. {
  379. struct mali_mmu_core *mmu = (struct mali_mmu_core*)data;
  380. u32 raw, status, fault_address;
  381. MALI_DEBUG_ASSERT_POINTER(mmu);
  382. MALI_DEBUG_PRINT(3, ("Mali MMU: Page fault bottom half: Locking subsystems\n"));
  383. mali_group_lock(mmu->group); /* Unlocked in mali_group_bottom_half */
  384. if ( MALI_FALSE == mali_group_power_is_on(mmu->group) )
  385. {
  386. MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.",mmu->hw_core.description));
  387. mali_group_unlock(mmu->group);
  388. return;
  389. }
  390. raw = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
  391. status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
  392. if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
  393. {
  394. MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault bottom half: No Irq found.\n"));
  395. mali_group_unlock(mmu->group);
  396. /* MALI_DEBUG_ASSERT(0); */
  397. return;
  398. }
  399. /* An actual page fault has occurred. */
  400. fault_address = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
  401. MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
  402. (void*)fault_address,
  403. (status >> 6) & 0x1F,
  404. (status & 32) ? "write" : "read",
  405. mmu->hw_core.description));
  406. mali_group_bottom_half(mmu->group, GROUP_EVENT_MMU_PAGE_FAULT); /* Unlocks the group lock */
  407. }
  408. mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
  409. {
  410. mali_bool stall_success;
  411. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  412. stall_success = mali_mmu_enable_stall(mmu);
  413. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
  414. if (MALI_FALSE == stall_success)
  415. {
  416. /* False means that it is in Pagefault state. Not possible to disable_stall then */
  417. return MALI_FALSE;
  418. }
  419. mali_mmu_disable_stall(mmu);
  420. return MALI_TRUE;
  421. }
  422. void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
  423. {
  424. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  425. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
  426. }
  427. void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
  428. {
  429. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  430. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
  431. }
  432. static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
  433. {
  434. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  435. /* The MMU must be in stalled or page fault mode, for this writing to work */
  436. MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
  437. & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
  438. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
  439. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
  440. }
  441. mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
  442. {
  443. mali_bool stall_success;
  444. MALI_DEBUG_ASSERT_POINTER(mmu);
  445. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  446. MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
  447. stall_success = mali_mmu_enable_stall(mmu);
  448. if ( MALI_FALSE==stall_success ) return MALI_FALSE;
  449. mali_mmu_activate_address_space(mmu, pagedir->page_directory);
  450. mali_mmu_disable_stall(mmu);
  451. return MALI_TRUE;
  452. }
  453. void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
  454. {
  455. mali_bool stall_success;
  456. MALI_DEBUG_ASSERT_POINTER(mmu);
  457. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  458. MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
  459. stall_success = mali_mmu_enable_stall(mmu);
  460. /* This function can only be called when the core is idle, so it could not fail. */
  461. MALI_DEBUG_ASSERT( stall_success );
  462. mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
  463. mali_mmu_disable_stall(mmu);
  464. }
  465. void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
  466. {
  467. mali_bool stall_success;
  468. MALI_DEBUG_ASSERT_POINTER(mmu);
  469. MALI_ASSERT_GROUP_LOCKED(mmu->group);
  470. MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
  471. stall_success = mali_mmu_enable_stall(mmu);
  472. /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
  473. mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
  474. if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
  475. }
  476. /* Is called when we want the mmu to give an interrupt */
  477. static void mali_mmu_probe_trigger(void *data)
  478. {
  479. struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
  480. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
  481. }
  482. /* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
  483. static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
  484. {
  485. struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
  486. u32 int_stat;
  487. int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
  488. MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
  489. if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
  490. {
  491. MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
  492. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
  493. }
  494. else
  495. {
  496. MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
  497. }
  498. if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
  499. {
  500. MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
  501. mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
  502. }
  503. else
  504. {
  505. MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
  506. }
  507. if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
  508. (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
  509. {
  510. return _MALI_OSK_ERR_OK;
  511. }
  512. return _MALI_OSK_ERR_FAULT;
  513. }
  514. #if 0
  515. void mali_mmu_print_state(struct mali_mmu_core *mmu)
  516. {
  517. MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
  518. }
  519. #endif