/drivers/gpu/msm/kgsl_mmu.c

https://gitlab.com/TeamTators/hp-kernel-tenderloin · C · 1106 lines · 768 code · 213 blank · 125 comment · 121 complexity · 14ad0174636db495e894f8c5291dadb7 MD5 · raw file

  1. /* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15. * 02110-1301, USA.
  16. *
  17. */
  18. #include <linux/types.h>
  19. #include <linux/mutex.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/genalloc.h>
  22. #include <linux/slab.h>
  23. #include <linux/io.h>
  24. #include <linux/bitmap.h>
  25. #ifdef CONFIG_MSM_KGSL_MMU
  26. #include <asm/pgalloc.h>
  27. #include <asm/pgtable.h>
  28. #endif
  29. #include "kgsl_mmu.h"
  30. #include "kgsl_drawctxt.h"
  31. #include "kgsl.h"
  32. #include "kgsl_log.h"
  33. #include "yamato_reg.h"
  34. #include "g12_reg.h"
  35. #include "kgsl_device.h"
  36. #include "kgsl_g12.h"
  37. #include "kgsl_yamato.h"
  38. struct kgsl_pte_debug {
  39. unsigned int read:1;
  40. unsigned int write:1;
  41. unsigned int dirty:1;
  42. unsigned int reserved:9;
  43. unsigned int phyaddr:20;
  44. };
  45. #define GSL_PT_PAGE_BITS_MASK 0x00000007
  46. #define GSL_PT_PAGE_ADDR_MASK (~(KGSL_PAGESIZE - 1))
  47. #define GSL_MMU_INT_MASK \
  48. (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
  49. MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
  50. static const struct kgsl_mmu_reg mmu_reg[KGSL_DEVICE_MAX] = {
  51. {
  52. .config = REG_MH_MMU_CONFIG,
  53. .mpu_base = REG_MH_MMU_MPU_BASE,
  54. .mpu_end = REG_MH_MMU_MPU_END,
  55. .va_range = REG_MH_MMU_VA_RANGE,
  56. .pt_page = REG_MH_MMU_PT_BASE,
  57. .page_fault = REG_MH_MMU_PAGE_FAULT,
  58. .tran_error = REG_MH_MMU_TRAN_ERROR,
  59. .invalidate = REG_MH_MMU_INVALIDATE,
  60. .interrupt_mask = REG_MH_INTERRUPT_MASK,
  61. .interrupt_status = REG_MH_INTERRUPT_STATUS,
  62. .interrupt_clear = REG_MH_INTERRUPT_CLEAR
  63. },
  64. {
  65. .config = ADDR_MH_MMU_CONFIG,
  66. .mpu_base = ADDR_MH_MMU_MPU_BASE,
  67. .mpu_end = ADDR_MH_MMU_MPU_END,
  68. .va_range = ADDR_MH_MMU_VA_RANGE,
  69. .pt_page = ADDR_MH_MMU_PT_BASE,
  70. .page_fault = ADDR_MH_MMU_PAGE_FAULT,
  71. .tran_error = ADDR_MH_MMU_TRAN_ERROR,
  72. .invalidate = ADDR_MH_MMU_INVALIDATE,
  73. .interrupt_mask = ADDR_MH_INTERRUPT_MASK,
  74. .interrupt_status = ADDR_MH_INTERRUPT_STATUS,
  75. .interrupt_clear = ADDR_MH_INTERRUPT_CLEAR
  76. },
  77. {
  78. .config = ADDR_MH_MMU_CONFIG,
  79. .mpu_base = ADDR_MH_MMU_MPU_BASE,
  80. .mpu_end = ADDR_MH_MMU_MPU_END,
  81. .va_range = ADDR_MH_MMU_VA_RANGE,
  82. .pt_page = ADDR_MH_MMU_PT_BASE,
  83. .page_fault = ADDR_MH_MMU_PAGE_FAULT,
  84. .tran_error = ADDR_MH_MMU_TRAN_ERROR,
  85. .invalidate = ADDR_MH_MMU_INVALIDATE,
  86. .interrupt_mask = ADDR_MH_INTERRUPT_MASK,
  87. .interrupt_status = ADDR_MH_INTERRUPT_STATUS,
  88. .interrupt_clear = ADDR_MH_INTERRUPT_CLEAR
  89. }
  90. };
  91. static ssize_t
  92. sysfs_show_ptpool_entries(struct kobject *kobj,
  93. struct kobj_attribute *attr,
  94. char *buf)
  95. {
  96. return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
  97. }
  98. static ssize_t
  99. sysfs_show_ptpool_min(struct kobject *kobj,
  100. struct kobj_attribute *attr,
  101. char *buf)
  102. {
  103. return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
  104. }
  105. static ssize_t
  106. sysfs_show_ptpool_chunks(struct kobject *kobj,
  107. struct kobj_attribute *attr,
  108. char *buf)
  109. {
  110. return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
  111. }
  112. static ssize_t
  113. sysfs_show_ptpool_ptsize(struct kobject *kobj,
  114. struct kobj_attribute *attr,
  115. char *buf)
  116. {
  117. return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
  118. }
  119. static struct kobj_attribute attr_ptpool_entries = {
  120. .attr = { .name = "ptpool_entries", .mode = 0444 },
  121. .show = sysfs_show_ptpool_entries,
  122. .store = NULL,
  123. };
  124. static struct kobj_attribute attr_ptpool_min = {
  125. .attr = { .name = "ptpool_min", .mode = 0444 },
  126. .show = sysfs_show_ptpool_min,
  127. .store = NULL,
  128. };
  129. static struct kobj_attribute attr_ptpool_chunks = {
  130. .attr = { .name = "ptpool_chunks", .mode = 0444 },
  131. .show = sysfs_show_ptpool_chunks,
  132. .store = NULL,
  133. };
  134. static struct kobj_attribute attr_ptpool_ptsize = {
  135. .attr = { .name = "ptpool_ptsize", .mode = 0444 },
  136. .show = sysfs_show_ptpool_ptsize,
  137. .store = NULL,
  138. };
  139. static struct attribute *ptpool_attrs[] = {
  140. &attr_ptpool_entries.attr,
  141. &attr_ptpool_min.attr,
  142. &attr_ptpool_chunks.attr,
  143. &attr_ptpool_ptsize.attr,
  144. NULL,
  145. };
  146. static struct attribute_group ptpool_attr_group = {
  147. .attrs = ptpool_attrs,
  148. };
  149. static int
  150. _kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
  151. {
  152. struct kgsl_ptpool_chunk *chunk;
  153. size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
  154. BUG_ON(count == 0);
  155. if (get_order(size) >= MAX_ORDER) {
  156. KGSL_DRV_ERR("ptpool allocation is too big: %d\n", size);
  157. return -EINVAL;
  158. }
  159. chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
  160. if (chunk == NULL) {
  161. KGSL_DRV_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
  162. return -ENOMEM;
  163. }
  164. chunk->size = size;
  165. chunk->count = count;
  166. chunk->dynamic = dynamic;
  167. chunk->data = dma_alloc_coherent(NULL, size,
  168. &chunk->phys, GFP_KERNEL);
  169. if (chunk->data == NULL) {
  170. KGSL_DRV_ERR("dma_alloc_coherent(%d) failed\n", size);
  171. goto err;
  172. }
  173. chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
  174. if (chunk->bitmap == NULL) {
  175. KGSL_DRV_ERR("kzalloc(%d) failed\n",
  176. BITS_TO_LONGS(count) * 4);
  177. goto err_dma;
  178. }
  179. list_add_tail(&chunk->list, &pool->list);
  180. pool->chunks++;
  181. pool->entries += count;
  182. if (!dynamic)
  183. pool->static_entries += count;
  184. return 0;
  185. err_dma:
  186. dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
  187. err:
  188. kfree(chunk);
  189. return -ENOMEM;
  190. }
  191. static void *
  192. _kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
  193. {
  194. struct kgsl_ptpool_chunk *chunk;
  195. list_for_each_entry(chunk, &pool->list, list) {
  196. int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
  197. if (bit >= chunk->count)
  198. continue;
  199. set_bit(bit, chunk->bitmap);
  200. *physaddr = chunk->phys + (bit * pool->ptsize);
  201. return chunk->data + (bit * pool->ptsize);
  202. }
  203. return NULL;
  204. }
  205. /**
  206. * kgsl_ptpool_add
  207. * @pool: A pointer to a ptpool structure
  208. * @entries: Number of entries to add
  209. *
  210. * Add static entries to the pagetable pool.
  211. */
  212. int
  213. kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
  214. {
  215. int ret = 0;
  216. BUG_ON(count == 0);
  217. mutex_lock(&pool->lock);
  218. /* Only 4MB can be allocated in one chunk, so larger allocations
  219. need to be split into multiple sections */
  220. while (count) {
  221. int entries = ((count * pool->ptsize) > SZ_4M) ?
  222. SZ_4M / pool->ptsize : count;
  223. /* Add the entries as static, i.e. they don't ever stand
  224. a chance of being removed */
  225. ret = _kgsl_ptpool_add_entries(pool, entries, 0);
  226. if (ret)
  227. break;
  228. count -= entries;
  229. }
  230. mutex_unlock(&pool->lock);
  231. return ret;
  232. }
  233. /**
  234. * kgsl_ptpool_alloc
  235. * @pool: A pointer to a ptpool structure
  236. * @addr: A pointer to store the physical address of the chunk
  237. *
  238. * Allocate a pagetable from the pool. Returns the virtual address
  239. * of the pagetable, the physical address is returned in physaddr
  240. */
  241. void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
  242. {
  243. void *addr = NULL;
  244. int ret;
  245. mutex_lock(&pool->lock);
  246. addr = _kgsl_ptpool_get_entry(pool, physaddr);
  247. if (addr)
  248. goto done;
  249. /* Add a chunk for 1 more pagetable and mark it as dynamic */
  250. ret = _kgsl_ptpool_add_entries(pool, 1, 1);
  251. if (ret)
  252. goto done;
  253. addr = _kgsl_ptpool_get_entry(pool, physaddr);
  254. done:
  255. mutex_unlock(&pool->lock);
  256. return addr;
  257. }
  258. static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
  259. {
  260. list_del(&chunk->list);
  261. if (chunk->data)
  262. dma_free_coherent(NULL, chunk->size, chunk->data,
  263. chunk->phys);
  264. kfree(chunk->bitmap);
  265. kfree(chunk);
  266. }
  267. /**
  268. * kgsl_ptpool_free
  269. * @pool: A pointer to a ptpool structure
  270. * @addr: A pointer to the virtual address to free
  271. *
  272. * Free a pagetable allocated from the pool
  273. */
  274. void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
  275. {
  276. struct kgsl_ptpool_chunk *chunk, *tmp;
  277. if (pool == NULL || addr == NULL)
  278. return;
  279. mutex_lock(&pool->lock);
  280. list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
  281. if (addr >= chunk->data &&
  282. addr < chunk->data + chunk->size) {
  283. int bit = ((unsigned long) (addr - chunk->data)) /
  284. pool->ptsize;
  285. clear_bit(bit, chunk->bitmap);
  286. memset(addr, 0, pool->ptsize);
  287. if (chunk->dynamic &&
  288. bitmap_empty(chunk->bitmap, chunk->count))
  289. _kgsl_ptpool_rm_chunk(chunk);
  290. break;
  291. }
  292. }
  293. mutex_unlock(&pool->lock);
  294. }
  295. void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
  296. {
  297. struct kgsl_ptpool_chunk *chunk, *tmp;
  298. if (pool == NULL)
  299. return;
  300. mutex_lock(&pool->lock);
  301. list_for_each_entry_safe(chunk, tmp, &pool->list, list)
  302. _kgsl_ptpool_rm_chunk(chunk);
  303. mutex_unlock(&pool->lock);
  304. memset(pool, 0, sizeof(*pool));
  305. }
  306. /**
  307. * kgsl_ptpool_init
  308. * @pool: A pointer to a ptpool structure to initialize
  309. * @ptsize: The size of each pagetable entry
  310. * @entries: The number of inital entries to add to the pool
  311. *
  312. * Initalize a pool and allocate an initial chunk of entries.
  313. */
  314. int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
  315. {
  316. int ret = 0;
  317. BUG_ON(ptsize == 0);
  318. pool->ptsize = ptsize;
  319. mutex_init(&pool->lock);
  320. INIT_LIST_HEAD(&pool->list);
  321. if (entries) {
  322. ret = kgsl_ptpool_add(pool, entries);
  323. if (ret)
  324. return ret;
  325. }
  326. /* PALM: sysfs not supported yet
  327. return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
  328. */
  329. return 0;
  330. }
  331. /* pt_mutex needs to be held in this function */
  332. static struct kgsl_pagetable *
  333. kgsl_get_pagetable(unsigned long name)
  334. {
  335. struct kgsl_pagetable *pt;
  336. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  337. if (pt->name == name)
  338. return pt;
  339. }
  340. };
  341. static inline uint32_t
  342. kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
  343. {
  344. return (va - pt->va_base) >> KGSL_PAGESIZE_SHIFT;
  345. }
  346. static inline void
  347. kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
  348. {
  349. uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
  350. writel(val, &baseptr[pte]);
  351. }
  352. static inline uint32_t
  353. kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
  354. {
  355. uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
  356. return readl(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
  357. }
  358. void kgsl_mh_intrcallback(struct kgsl_device *device)
  359. {
  360. unsigned int status = 0;
  361. unsigned int reg;
  362. KGSL_MEM_VDBG("enter (device=%p)\n", device);
  363. kgsl_regread_isr(device, mmu_reg[device->id].interrupt_status, &status);
  364. if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR) {
  365. KGSL_MEM_FATAL("axi read error interrupt\n");
  366. } else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR) {
  367. KGSL_MEM_FATAL("axi write error interrupt\n");
  368. } else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
  369. kgsl_regread_isr(device, mmu_reg[device->id].page_fault, &reg);
  370. KGSL_MEM_FATAL("mmu page fault interrupt: %08x\n", reg);
  371. } else {
  372. KGSL_MEM_DBG("bad bits in REG_MH_INTERRUPT_STATUS %08x\n",
  373. status);
  374. }
  375. kgsl_regwrite_isr(device, mmu_reg[device->id].interrupt_clear, status);
  376. /*TODO: figure out how to handle errror interupts.
  377. * specifically, page faults should probably nuke the client that
  378. * caused them, but we don't have enough info to figure that out yet.
  379. */
  380. KGSL_MEM_VDBG("return\n");
  381. }
  382. int
  383. kgsl_get_ptname_from_ptbase(unsigned int pt_base)
  384. {
  385. struct kgsl_pagetable *pt;
  386. int ptid = -1;
  387. mutex_lock(&kgsl_driver.pt_mutex);
  388. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  389. if (pt_base == pt->base.gpuaddr) {
  390. ptid = (int) pt->name;
  391. break;
  392. }
  393. }
  394. mutex_unlock(&kgsl_driver.pt_mutex);
  395. return ptid;
  396. }
  397. static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
  398. struct kgsl_mmu *mmu,
  399. unsigned int name)
  400. {
  401. int status = 0;
  402. struct kgsl_pagetable *pagetable = NULL;
  403. KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);
  404. pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
  405. if (pagetable == NULL) {
  406. KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
  407. return NULL;
  408. }
  409. pagetable->magic_number = KGSL_PAGETABLE_INIT_NUMBER;
  410. pagetable->refcnt = 1;
  411. spin_lock_init(&pagetable->lock);
  412. pagetable->tlb_flags = 0;
  413. pagetable->name = name;
  414. pagetable->va_base = KGSL_PAGETABLE_BASE;
  415. pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
  416. pagetable->last_superpte = 0;
  417. pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
  418. pagetable->tlbflushfilter.size = (pagetable->va_range /
  419. (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
  420. pagetable->tlbflushfilter.base = (unsigned int *)
  421. kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
  422. if (!pagetable->tlbflushfilter.base) {
  423. KGSL_MEM_ERR("Failed to create tlbflushfilter\n");
  424. goto err_alloc;
  425. }
  426. GSL_TLBFLUSH_FILTER_RESET();
  427. pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
  428. if (pagetable->pool == NULL) {
  429. KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
  430. goto err_flushfilter;
  431. }
  432. if (gen_pool_add(pagetable->pool, pagetable->va_base,
  433. pagetable->va_range, -1)) {
  434. KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
  435. pagetable);
  436. goto err_pool;
  437. }
  438. pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
  439. &pagetable->base.physaddr);
  440. if (pagetable->base.hostptr == NULL)
  441. goto err_pool;
  442. pagetable->base.gpuaddr = pagetable->base.physaddr;
  443. status = kgsl_setup_pt(pagetable);
  444. if (status)
  445. goto err_free_sharedmem;
  446. list_add(&pagetable->list, &kgsl_driver.pagetable_list);
  447. KGSL_MEM_VDBG("return %p\n", pagetable);
  448. return pagetable;
  449. err_free_sharedmem:
  450. kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
  451. err_pool:
  452. gen_pool_destroy(pagetable->pool);
  453. err_flushfilter:
  454. kfree(pagetable->tlbflushfilter.base);
  455. err_alloc:
  456. kfree(pagetable);
  457. return NULL;
  458. }
  459. static void kgsl_mmu_destroypagetable(struct kgsl_pagetable *pagetable)
  460. {
  461. KGSL_MEM_VDBG("enter (pagetable=%p)\n", pagetable);
  462. list_del(&pagetable->list);
  463. kgsl_cleanup_pt(pagetable);
  464. kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
  465. if (pagetable->pool) {
  466. gen_pool_destroy(pagetable->pool);
  467. pagetable->pool = NULL;
  468. }
  469. if (pagetable->tlbflushfilter.base) {
  470. pagetable->tlbflushfilter.size = 0;
  471. kfree(pagetable->tlbflushfilter.base);
  472. pagetable->tlbflushfilter.base = NULL;
  473. }
  474. pagetable->magic_number = KGSL_PAGETABLE_POISON_NUMBER;
  475. kfree(pagetable);
  476. }
  477. struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
  478. unsigned long name)
  479. {
  480. struct kgsl_pagetable *pt;
  481. if (mmu == NULL)
  482. return NULL;
  483. mutex_lock(&kgsl_driver.pt_mutex);
  484. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  485. if (pt->name == name) {
  486. spin_lock(&pt->lock);
  487. pt->refcnt++;
  488. spin_unlock(&pt->lock);
  489. mutex_unlock(&kgsl_driver.pt_mutex);
  490. return pt;
  491. }
  492. }
  493. pt = kgsl_mmu_createpagetableobject(mmu, name);
  494. mutex_unlock(&kgsl_driver.pt_mutex);
  495. return pt;
  496. }
  497. void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
  498. {
  499. bool dead;
  500. if (pagetable == NULL)
  501. return;
  502. mutex_lock(&kgsl_driver.pt_mutex);
  503. spin_lock(&pagetable->lock);
  504. dead = (--pagetable->refcnt) == 0;
  505. spin_unlock(&pagetable->lock);
  506. if (dead)
  507. kgsl_mmu_destroypagetable(pagetable);
  508. mutex_unlock(&kgsl_driver.pt_mutex);
  509. }
  510. int kgsl_mmu_setstate(struct kgsl_device *device,
  511. struct kgsl_pagetable *pagetable)
  512. {
  513. int status = 0;
  514. struct kgsl_mmu *mmu = &device->mmu;
  515. KGSL_MEM_VDBG("enter (device=%p, pagetable=%p)\n", device, pagetable);
  516. if (mmu->flags & KGSL_FLAGS_STARTED) {
  517. /* page table not current, then setup mmu to use new
  518. * specified page table
  519. */
  520. // printk(KERN_DEBUG"from %p to %p magic %x\n", mmu->hwpagetable, pagetable,
  521. // pagetable->magic_number);
  522. if (mmu->hwpagetable != pagetable) {
  523. // KGSL_MEM_ERR("In kgsl_mmu_setstate() - pagetable = %x pagetable->magic_number = %x\n"
  524. // , pagetable, pagetable->magic_number);
  525. mmu->hwpagetable = pagetable;
  526. spin_lock(&mmu->hwpagetable->lock);
  527. mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
  528. spin_unlock(&mmu->hwpagetable->lock);
  529. /* call device specific set page table */
  530. status = kgsl_setstate(mmu->device,
  531. KGSL_MMUFLAGS_TLBFLUSH |
  532. KGSL_MMUFLAGS_PTUPDATE);
  533. }
  534. }
  535. // else {
  536. // printk(KERN_DEBUG"kgsl flags not started (device=%p, pagetable=%p)\n", device, pagetable);
  537. // }
  538. KGSL_MEM_VDBG("return %d\n", status);
  539. return status;
  540. }
  541. int kgsl_mmu_init(struct kgsl_device *device)
  542. {
  543. /*
  544. * intialize device mmu
  545. *
  546. * call this with the global lock held
  547. */
  548. int status;
  549. struct kgsl_mmu *mmu = &device->mmu;
  550. KGSL_MEM_VDBG("enter (device=%p)\n", device);
  551. mmu->device = device;
  552. #ifndef CONFIG_MSM_KGSL_MMU
  553. mmu->config = 0x00000000;
  554. #endif
  555. /* MMU not enabled */
  556. if ((mmu->config & 0x1) == 0) {
  557. KGSL_MEM_VDBG("return %d\n", 0);
  558. return 0;
  559. }
  560. /* make sure aligned to pagesize */
  561. BUG_ON(mmu->mpu_base & (KGSL_PAGESIZE - 1));
  562. BUG_ON((mmu->mpu_base + mmu->mpu_range) & (KGSL_PAGESIZE - 1));
  563. /* sub-client MMU lookups require address translation */
  564. if ((mmu->config & ~0x1) > 0) {
  565. /*make sure virtual address range is a multiple of 64Kb */
  566. BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
  567. /* allocate memory used for completing r/w operations that
  568. * cannot be mapped by the MMU
  569. */
  570. status = kgsl_sharedmem_alloc_coherent(&mmu->dummyspace, 64);
  571. if (status != 0) {
  572. KGSL_MEM_ERR
  573. ("Unable to allocate dummy space memory.\n");
  574. goto error;
  575. }
  576. kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
  577. mmu->dummyspace.size);
  578. }
  579. KGSL_MEM_VDBG("return %d\n", 0);
  580. return 0;
  581. error:
  582. return status;
  583. }
  584. int kgsl_mmu_start(struct kgsl_device *device)
  585. {
  586. /*
  587. * intialize device mmu
  588. *
  589. * call this with the global lock held
  590. */
  591. int status;
  592. struct kgsl_mmu *mmu = &device->mmu;
  593. KGSL_MEM_VDBG("enter (device=%p)\n", device);
  594. if (mmu->flags & KGSL_FLAGS_STARTED) {
  595. KGSL_MEM_INFO("MMU already started.\n");
  596. return 0;
  597. }
  598. /* MMU not enabled */
  599. if ((mmu->config & 0x1) == 0) {
  600. KGSL_MEM_VDBG("return %d\n", 0);
  601. return 0;
  602. }
  603. mmu->flags |= KGSL_FLAGS_STARTED;
  604. /* setup MMU and sub-client behavior */
  605. kgsl_regwrite(device, mmu_reg[device->id].config, mmu->config);
  606. /* enable axi interrupts */
  607. KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
  608. GSL_MMU_INT_MASK);
  609. kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask,
  610. GSL_MMU_INT_MASK);
  611. /* idle device */
  612. kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
  613. /* define physical memory range accessible by the core */
  614. kgsl_regwrite(device, mmu_reg[device->id].mpu_base, mmu->mpu_base);
  615. kgsl_regwrite(device, mmu_reg[device->id].mpu_end,
  616. mmu->mpu_base + mmu->mpu_range);
  617. /* enable axi interrupts */
  618. KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
  619. GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
  620. kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask,
  621. GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
  622. /* sub-client MMU lookups require address translation */
  623. if ((mmu->config & ~0x1) > 0) {
  624. kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
  625. mmu->dummyspace.size);
  626. /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
  627. * to complete transactions in case of an MMU fault. Note that
  628. * we'll leave the bottom 32 bytes of the dummyspace for other
  629. * purposes (e.g. use it when dummy read cycles are needed
  630. * for other blocks */
  631. kgsl_regwrite(device, mmu_reg[device->id].tran_error,
  632. mmu->dummyspace.physaddr + 32);
  633. BUG_ON(mmu->defaultpagetable == NULL);
  634. mmu->hwpagetable = mmu->defaultpagetable;
  635. kgsl_regwrite(device, mmu_reg[device->id].pt_page,
  636. mmu->hwpagetable->base.gpuaddr);
  637. kgsl_regwrite(device, mmu_reg[device->id].va_range,
  638. (mmu->hwpagetable->va_base |
  639. (mmu->hwpagetable->va_range >> 16)));
  640. status = kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
  641. if (status) {
  642. KGSL_MEM_ERR("Failed to setstate TLBFLUSH\n");
  643. goto error;
  644. }
  645. }
  646. KGSL_MEM_VDBG("return %d\n", 0);
  647. return 0;
  648. error:
  649. /* disable MMU */
  650. kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask, 0);
  651. kgsl_regwrite(device, mmu_reg[device->id].config, 0x00000000);
  652. return status;
  653. }
  654. #ifdef CONFIG_MSM_KGSL_MMU
  655. unsigned int kgsl_virtaddr_to_physaddr(unsigned int virtaddr)
  656. {
  657. unsigned int physaddr = 0;
  658. pgd_t *pgd_ptr = NULL;
  659. pmd_t *pmd_ptr = NULL;
  660. pte_t *pte_ptr = NULL, pte;
  661. pgd_ptr = pgd_offset(current->mm, virtaddr);
  662. if (pgd_none(*pgd) || pgd_bad(*pgd)) {
  663. KGSL_MEM_ERR
  664. ("Invalid pgd entry found while trying to convert virtual "
  665. "address to physical\n");
  666. return 0;
  667. }
  668. pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
  669. if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
  670. KGSL_MEM_ERR
  671. ("Invalid pmd entry found while trying to convert virtual "
  672. "address to physical\n");
  673. return 0;
  674. }
  675. pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
  676. if (!pte_ptr) {
  677. KGSL_MEM_ERR
  678. ("Unable to map pte entry while trying to convert virtual "
  679. "address to physical\n");
  680. return 0;
  681. }
  682. pte = *pte_ptr;
  683. physaddr = pte_pfn(pte);
  684. pte_unmap(pte_ptr);
  685. physaddr <<= PAGE_SHIFT;
  686. return physaddr;
  687. }
  688. int
  689. kgsl_mmu_map(struct kgsl_pagetable *pagetable,
  690. unsigned int address,
  691. int range,
  692. unsigned int protflags,
  693. unsigned int *gpuaddr,
  694. unsigned int flags)
  695. {
  696. int numpages;
  697. unsigned int pte, ptefirst, ptelast, physaddr;
  698. int flushtlb, alloc_size;
  699. unsigned int align = flags & KGSL_MEMFLAGS_ALIGN_MASK;
  700. KGSL_MEM_VDBG("enter (pt=%p, physaddr=%08x, range=%08d, gpuaddr=%p)\n",
  701. pagetable, address, range, gpuaddr);
  702. BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
  703. BUG_ON(protflags == 0);
  704. BUG_ON(range <= 0);
  705. /* Only support 4K and 8K alignment for now */
  706. if (align != KGSL_MEMFLAGS_ALIGN8K && align != KGSL_MEMFLAGS_ALIGN4K) {
  707. KGSL_MEM_ERR("Cannot map memory according to "
  708. "requested flags: %08x\n", flags);
  709. return -EINVAL;
  710. }
  711. /* Make sure address being mapped is at 4K boundary */
  712. if (!IS_ALIGNED(address, KGSL_PAGESIZE) || range & ~KGSL_PAGEMASK) {
  713. KGSL_MEM_ERR("Cannot map address not aligned "
  714. "at page boundary: address: %08x, range: %08x\n",
  715. address, range);
  716. return -EINVAL;
  717. }
  718. alloc_size = range;
  719. if (align == KGSL_MEMFLAGS_ALIGN8K)
  720. alloc_size += KGSL_PAGESIZE;
  721. *gpuaddr = gen_pool_alloc(pagetable->pool, alloc_size);
  722. if (*gpuaddr == 0) {
  723. KGSL_MEM_ERR("gen_pool_alloc failed: %d\n", alloc_size);
  724. return -ENOMEM;
  725. }
  726. if (align == KGSL_MEMFLAGS_ALIGN8K) {
  727. if (*gpuaddr & ((1 << 13) - 1)) {
  728. /* Not 8k aligned, align it */
  729. gen_pool_free(pagetable->pool, *gpuaddr, KGSL_PAGESIZE);
  730. *gpuaddr = *gpuaddr + KGSL_PAGESIZE;
  731. } else
  732. gen_pool_free(pagetable->pool, *gpuaddr + range,
  733. KGSL_PAGESIZE);
  734. }
  735. numpages = (range >> KGSL_PAGESIZE_SHIFT);
  736. ptefirst = kgsl_pt_entry_get(pagetable, *gpuaddr);
  737. ptelast = ptefirst + numpages;
  738. pte = ptefirst;
  739. flushtlb = 0;
  740. /* tlb needs to be flushed when the first and last pte are not at
  741. * superpte boundaries */
  742. if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
  743. ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
  744. flushtlb = 1;
  745. spin_lock(&pagetable->lock);
  746. for (pte = ptefirst; pte < ptelast; pte++) {
  747. #ifdef VERBOSE_DEBUG
  748. /* check if PTE exists */
  749. uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
  750. BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
  751. #endif
  752. if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
  753. if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
  754. flushtlb = 1;
  755. /* mark pte as in use */
  756. if (flags & KGSL_MEMFLAGS_CONPHYS)
  757. physaddr = address;
  758. else if (flags & KGSL_MEMFLAGS_VMALLOC_MEM) {
  759. physaddr = vmalloc_to_pfn((void *)address);
  760. physaddr <<= PAGE_SHIFT;
  761. } else if (flags & KGSL_MEMFLAGS_HOSTADDR)
  762. physaddr = kgsl_virtaddr_to_physaddr(address);
  763. else
  764. physaddr = 0;
  765. if (physaddr) {
  766. kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
  767. } else {
  768. KGSL_MEM_ERR
  769. ("Unable to find physaddr for address: %x\n",
  770. address);
  771. spin_unlock(&pagetable->lock);
  772. kgsl_mmu_unmap(pagetable, *gpuaddr, range);
  773. return -EFAULT;
  774. }
  775. address += KGSL_PAGESIZE;
  776. }
  777. KGSL_MEM_INFO("pt %p p %08x g %08x pte f %d l %d n %d f %d\n",
  778. pagetable, address, *gpuaddr, ptefirst, ptelast,
  779. numpages, flushtlb);
  780. mb();
  781. dsb();
  782. outer_sync();
  783. /* Invalidate tlb only if current page table used by GPU is the
  784. * pagetable that we used to allocate */
  785. if (flushtlb) {
  786. /*set all devices as needing flushing*/
  787. pagetable->tlb_flags = UINT_MAX;
  788. GSL_TLBFLUSH_FILTER_RESET();
  789. }
  790. spin_unlock(&pagetable->lock);
  791. KGSL_MEM_VDBG("return %d\n", 0);
  792. return 0;
  793. }
  794. int
  795. kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
  796. int range)
  797. {
  798. unsigned int numpages;
  799. unsigned int pte, ptefirst, ptelast, superpte;
  800. KGSL_MEM_VDBG("enter (pt=%p, gpuaddr=0x%08x, range=%d)\n",
  801. pagetable, gpuaddr, range);
  802. BUG_ON(range <= 0);
  803. numpages = (range >> KGSL_PAGESIZE_SHIFT);
  804. if (range & (KGSL_PAGESIZE - 1))
  805. numpages++;
  806. ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
  807. ptelast = ptefirst + numpages;
  808. KGSL_MEM_INFO("pt %p gpu %08x pte first %d last %d numpages %d\n",
  809. pagetable, gpuaddr, ptefirst, ptelast, numpages);
  810. spin_lock(&pagetable->lock);
  811. superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
  812. GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
  813. for (pte = ptefirst; pte < ptelast; pte++) {
  814. #ifdef VERBOSE_DEBUG
  815. /* check if PTE exists */
  816. BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
  817. #endif
  818. kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
  819. superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
  820. if (pte == superpte)
  821. GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
  822. GSL_PT_SUPER_PTE);
  823. }
  824. mb();
  825. dsb();
  826. outer_sync();
  827. spin_unlock(&pagetable->lock);
  828. gen_pool_free(pagetable->pool, gpuaddr, range);
  829. KGSL_MEM_VDBG("return %d\n", 0);
  830. return 0;
  831. }
  832. #endif /*CONFIG_MSM_KGSL_MMU*/
  833. int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
  834. struct kgsl_memdesc *memdesc, unsigned int protflags,
  835. unsigned int flags)
  836. {
  837. int result = -EINVAL;
  838. unsigned int gpuaddr = 0;
  839. if (memdesc == NULL)
  840. goto error;
  841. result = kgsl_mmu_map(pagetable, memdesc->physaddr, memdesc->size,
  842. protflags, &gpuaddr, flags);
  843. if (result)
  844. goto error;
  845. /*global mappings must have the same gpu address in all pagetables*/
  846. if (memdesc->gpuaddr == 0)
  847. memdesc->gpuaddr = gpuaddr;
  848. else if (memdesc->gpuaddr != gpuaddr) {
  849. KGSL_MEM_ERR("pt %p addr mismatch phys 0x%08x gpu 0x%0x 0x%08x",
  850. pagetable, memdesc->physaddr,
  851. memdesc->gpuaddr, gpuaddr);
  852. goto error_unmap;
  853. }
  854. return result;
  855. error_unmap:
  856. kgsl_mmu_unmap(pagetable, gpuaddr, memdesc->size);
  857. error:
  858. return result;
  859. }
  860. int kgsl_mmu_stop(struct kgsl_device *device)
  861. {
  862. /*
  863. * stop device mmu
  864. *
  865. * call this with the global lock held
  866. */
  867. struct kgsl_mmu *mmu = &device->mmu;
  868. KGSL_MEM_VDBG("enter (device=%p)\n", device);
  869. if (mmu->flags & KGSL_FLAGS_STARTED) {
  870. /* disable mh interrupts */
  871. KGSL_MEM_DBG("disabling mmu interrupts\n");
  872. /* disable MMU */
  873. kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask, 0);
  874. kgsl_regwrite(device, mmu_reg[device->id].config, 0x00000000);
  875. mmu->flags &= ~KGSL_FLAGS_STARTED;
  876. }
  877. KGSL_MEM_VDBG("return %d\n", 0);
  878. return 0;
  879. }
  880. int kgsl_mmu_close(struct kgsl_device *device)
  881. {
  882. /*
  883. * close device mmu
  884. *
  885. * call this with the global lock held
  886. */
  887. struct kgsl_mmu *mmu = &device->mmu;
  888. KGSL_MEM_VDBG("enter (device=%p)\n", device);
  889. if (mmu->dummyspace.gpuaddr)
  890. kgsl_sharedmem_free(&mmu->dummyspace);
  891. KGSL_MEM_VDBG("return %d\n", 0);
  892. return 0;
  893. }