PageRenderTime 88ms CodeModel.GetById 20ms app.highlight 59ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/gpu/msm/kgsl_mmu.c

https://gitlab.com/TeamTators/hp-kernel-tenderloin
C | 1106 lines | 768 code | 213 blank | 125 comment | 121 complexity | 14ad0174636db495e894f8c5291dadb7 MD5 | raw file
   1/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 *
  17 */
  18#include <linux/types.h>
  19#include <linux/mutex.h>
  20#include <linux/spinlock.h>
  21#include <linux/genalloc.h>
  22#include <linux/slab.h>
  23#include <linux/io.h>
  24#include <linux/bitmap.h>
  25#ifdef CONFIG_MSM_KGSL_MMU
  26#include <asm/pgalloc.h>
  27#include <asm/pgtable.h>
  28#endif
  29#include "kgsl_mmu.h"
  30#include "kgsl_drawctxt.h"
  31#include "kgsl.h"
  32#include "kgsl_log.h"
  33#include "yamato_reg.h"
  34#include "g12_reg.h"
  35#include "kgsl_device.h"
  36#include "kgsl_g12.h"
  37#include "kgsl_yamato.h"
  38
  39struct kgsl_pte_debug {
  40	unsigned int read:1;
  41	unsigned int write:1;
  42	unsigned int dirty:1;
  43	unsigned int reserved:9;
  44	unsigned int phyaddr:20;
  45};
  46
  47#define GSL_PT_PAGE_BITS_MASK	0x00000007
  48#define GSL_PT_PAGE_ADDR_MASK	(~(KGSL_PAGESIZE - 1))
  49
  50#define GSL_MMU_INT_MASK \
  51	(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
  52	 MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
  53
  54static const struct kgsl_mmu_reg mmu_reg[KGSL_DEVICE_MAX] = {
  55	{
  56		.config = REG_MH_MMU_CONFIG,
  57		.mpu_base = REG_MH_MMU_MPU_BASE,
  58		.mpu_end = REG_MH_MMU_MPU_END,
  59		.va_range = REG_MH_MMU_VA_RANGE,
  60		.pt_page = REG_MH_MMU_PT_BASE,
  61		.page_fault = REG_MH_MMU_PAGE_FAULT,
  62		.tran_error = REG_MH_MMU_TRAN_ERROR,
  63		.invalidate = REG_MH_MMU_INVALIDATE,
  64		.interrupt_mask = REG_MH_INTERRUPT_MASK,
  65		.interrupt_status = REG_MH_INTERRUPT_STATUS,
  66		.interrupt_clear = REG_MH_INTERRUPT_CLEAR
  67	},
  68	{
  69		.config = ADDR_MH_MMU_CONFIG,
  70		.mpu_base = ADDR_MH_MMU_MPU_BASE,
  71		.mpu_end = ADDR_MH_MMU_MPU_END,
  72		.va_range = ADDR_MH_MMU_VA_RANGE,
  73		.pt_page = ADDR_MH_MMU_PT_BASE,
  74		.page_fault = ADDR_MH_MMU_PAGE_FAULT,
  75		.tran_error = ADDR_MH_MMU_TRAN_ERROR,
  76		.invalidate = ADDR_MH_MMU_INVALIDATE,
  77		.interrupt_mask = ADDR_MH_INTERRUPT_MASK,
  78		.interrupt_status = ADDR_MH_INTERRUPT_STATUS,
  79		.interrupt_clear = ADDR_MH_INTERRUPT_CLEAR
  80	},
  81	{
  82		.config = ADDR_MH_MMU_CONFIG,
  83		.mpu_base = ADDR_MH_MMU_MPU_BASE,
  84		.mpu_end = ADDR_MH_MMU_MPU_END,
  85		.va_range = ADDR_MH_MMU_VA_RANGE,
  86		.pt_page = ADDR_MH_MMU_PT_BASE,
  87		.page_fault = ADDR_MH_MMU_PAGE_FAULT,
  88		.tran_error = ADDR_MH_MMU_TRAN_ERROR,
  89		.invalidate = ADDR_MH_MMU_INVALIDATE,
  90		.interrupt_mask = ADDR_MH_INTERRUPT_MASK,
  91		.interrupt_status = ADDR_MH_INTERRUPT_STATUS,
  92		.interrupt_clear = ADDR_MH_INTERRUPT_CLEAR
  93	}
  94};
  95
  96static ssize_t
  97sysfs_show_ptpool_entries(struct kobject *kobj,
  98			  struct kobj_attribute *attr,
  99			  char *buf)
 100{
 101	return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
 102}
 103
 104static ssize_t
 105sysfs_show_ptpool_min(struct kobject *kobj,
 106			 struct kobj_attribute *attr,
 107			 char *buf)
 108{
 109	return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
 110}
 111
 112static ssize_t
 113sysfs_show_ptpool_chunks(struct kobject *kobj,
 114			 struct kobj_attribute *attr,
 115			 char *buf)
 116{
 117	return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
 118}
 119
 120static ssize_t
 121sysfs_show_ptpool_ptsize(struct kobject *kobj,
 122			 struct kobj_attribute *attr,
 123			 char *buf)
 124{
 125	return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
 126}
 127
 128static struct kobj_attribute attr_ptpool_entries = {
 129	.attr = { .name = "ptpool_entries", .mode = 0444 },
 130	.show = sysfs_show_ptpool_entries,
 131	.store = NULL,
 132};
 133
 134static struct kobj_attribute attr_ptpool_min = {
 135	.attr = { .name = "ptpool_min", .mode = 0444 },
 136	.show = sysfs_show_ptpool_min,
 137	.store = NULL,
 138};
 139
 140static struct kobj_attribute attr_ptpool_chunks = {
 141	.attr = { .name = "ptpool_chunks", .mode = 0444 },
 142	.show = sysfs_show_ptpool_chunks,
 143	.store = NULL,
 144};
 145
 146static struct kobj_attribute attr_ptpool_ptsize = {
 147	.attr = { .name = "ptpool_ptsize", .mode = 0444 },
 148	.show = sysfs_show_ptpool_ptsize,
 149	.store = NULL,
 150};
 151
 152static struct attribute *ptpool_attrs[] = {
 153	&attr_ptpool_entries.attr,
 154	&attr_ptpool_min.attr,
 155	&attr_ptpool_chunks.attr,
 156	&attr_ptpool_ptsize.attr,
 157	NULL,
 158};
 159
 160static struct attribute_group ptpool_attr_group = {
 161	.attrs = ptpool_attrs,
 162};
 163
 164static int
 165_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
 166{
 167	struct kgsl_ptpool_chunk *chunk;
 168	size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
 169
 170	BUG_ON(count == 0);
 171
 172	if (get_order(size) >= MAX_ORDER) {
 173		KGSL_DRV_ERR("ptpool allocation is too big: %d\n", size);
 174		return -EINVAL;
 175	}
 176
 177	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
 178	if (chunk == NULL) {
 179		KGSL_DRV_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
 180		return -ENOMEM;
 181	}
 182
 183	chunk->size = size;
 184	chunk->count = count;
 185	chunk->dynamic = dynamic;
 186
 187	chunk->data = dma_alloc_coherent(NULL, size,
 188					 &chunk->phys, GFP_KERNEL);
 189
 190	if (chunk->data == NULL) {
 191		KGSL_DRV_ERR("dma_alloc_coherent(%d) failed\n", size);
 192		goto err;
 193	}
 194
 195	chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
 196
 197	if (chunk->bitmap == NULL) {
 198		KGSL_DRV_ERR("kzalloc(%d) failed\n",
 199			BITS_TO_LONGS(count) * 4);
 200		goto err_dma;
 201	}
 202
 203	list_add_tail(&chunk->list, &pool->list);
 204
 205	pool->chunks++;
 206	pool->entries += count;
 207
 208	if (!dynamic)
 209		pool->static_entries += count;
 210
 211	return 0;
 212
 213err_dma:
 214	dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
 215err:
 216	kfree(chunk);
 217	return -ENOMEM;
 218}
 219
 220static void *
 221_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
 222{
 223	struct kgsl_ptpool_chunk *chunk;
 224
 225	list_for_each_entry(chunk, &pool->list, list) {
 226		int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
 227
 228		if (bit >= chunk->count)
 229			continue;
 230
 231		set_bit(bit, chunk->bitmap);
 232		*physaddr = chunk->phys + (bit * pool->ptsize);
 233
 234		return chunk->data + (bit * pool->ptsize);
 235	}
 236
 237	return NULL;
 238}
 239
 240/**
 241 * kgsl_ptpool_add
 242 * @pool:  A pointer to a ptpool structure
 243 * @entries: Number of entries to add
 244 *
 245 * Add static entries to the pagetable pool.
 246 */
 247
 248int
 249kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
 250{
 251	int ret = 0;
 252	BUG_ON(count == 0);
 253
 254	mutex_lock(&pool->lock);
 255
 256	/* Only 4MB can be allocated in one chunk, so larger allocations
 257	   need to be split into multiple sections */
 258
 259	while (count) {
 260		int entries = ((count * pool->ptsize) > SZ_4M) ?
 261			SZ_4M / pool->ptsize : count;
 262
 263		/* Add the entries as static, i.e. they don't ever stand
 264		   a chance of being removed */
 265
 266		ret =  _kgsl_ptpool_add_entries(pool, entries, 0);
 267		if (ret)
 268			break;
 269
 270		count -= entries;
 271	}
 272
 273	mutex_unlock(&pool->lock);
 274	return ret;
 275}
 276
 277/**
 278 * kgsl_ptpool_alloc
 279 * @pool:  A pointer to a ptpool structure
 280 * @addr: A pointer to store the physical address of the chunk
 281 *
 282 * Allocate a pagetable from the pool.  Returns the virtual address
 283 * of the pagetable, the physical address is returned in physaddr
 284 */
 285
 286void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
 287{
 288	void *addr = NULL;
 289	int ret;
 290
 291	mutex_lock(&pool->lock);
 292	addr = _kgsl_ptpool_get_entry(pool, physaddr);
 293	if (addr)
 294		goto done;
 295
 296	/* Add a chunk for 1 more pagetable and mark it as dynamic */
 297	ret = _kgsl_ptpool_add_entries(pool, 1, 1);
 298
 299	if (ret)
 300		goto done;
 301
 302	addr = _kgsl_ptpool_get_entry(pool, physaddr);
 303done:
 304	mutex_unlock(&pool->lock);
 305	return addr;
 306}
 307
 308static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
 309{
 310	list_del(&chunk->list);
 311
 312	if (chunk->data)
 313		dma_free_coherent(NULL, chunk->size, chunk->data,
 314			chunk->phys);
 315	kfree(chunk->bitmap);
 316	kfree(chunk);
 317}
 318
 319/**
 320 * kgsl_ptpool_free
 321 * @pool:  A pointer to a ptpool structure
 322 * @addr: A pointer to the virtual address to free
 323 *
 324 * Free a pagetable allocated from the pool
 325 */
 326
 327void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
 328{
 329	struct kgsl_ptpool_chunk *chunk, *tmp;
 330
 331	if (pool == NULL || addr == NULL)
 332		return;
 333
 334	mutex_lock(&pool->lock);
 335	list_for_each_entry_safe(chunk, tmp, &pool->list, list)  {
 336		if (addr >=  chunk->data &&
 337		    addr < chunk->data + chunk->size) {
 338			int bit = ((unsigned long) (addr - chunk->data)) /
 339				pool->ptsize;
 340
 341			clear_bit(bit, chunk->bitmap);
 342			memset(addr, 0, pool->ptsize);
 343
 344			if (chunk->dynamic &&
 345				bitmap_empty(chunk->bitmap, chunk->count))
 346				_kgsl_ptpool_rm_chunk(chunk);
 347
 348			break;
 349		}
 350	}
 351
 352	mutex_unlock(&pool->lock);
 353}
 354
 355void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
 356{
 357	struct kgsl_ptpool_chunk *chunk, *tmp;
 358
 359	if (pool == NULL)
 360		return;
 361
 362	mutex_lock(&pool->lock);
 363	list_for_each_entry_safe(chunk, tmp, &pool->list, list)
 364		_kgsl_ptpool_rm_chunk(chunk);
 365	mutex_unlock(&pool->lock);
 366
 367	memset(pool, 0, sizeof(*pool));
 368}
 369
 370/**
 371 * kgsl_ptpool_init
 372 * @pool:  A pointer to a ptpool structure to initialize
 373 * @ptsize: The size of each pagetable entry
 374 * @entries:  The number of inital entries to add to the pool
 375 *
 376 * Initalize a pool and allocate an initial chunk of entries.
 377 */
 378
 379int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
 380{
 381	int ret = 0;
 382	BUG_ON(ptsize == 0);
 383
 384	pool->ptsize = ptsize;
 385	mutex_init(&pool->lock);
 386	INIT_LIST_HEAD(&pool->list);
 387
 388	if (entries) {
 389		ret = kgsl_ptpool_add(pool, entries);
 390		if (ret)
 391			return ret;
 392	}
 393
 394	/* PALM: sysfs not supported yet
 395	return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
 396	*/
 397	return 0;
 398}
 399
 400/* pt_mutex needs to be held in this function */
 401
 402static struct kgsl_pagetable *
 403kgsl_get_pagetable(unsigned long name)
 404{
 405	struct kgsl_pagetable *pt;
 406
 407	list_for_each_entry(pt,	&kgsl_driver.pagetable_list, list) {
 408		if (pt->name == name)
 409			return pt;
 410	}
 411};
 412
 413static inline uint32_t
 414kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
 415{
 416	return (va - pt->va_base) >> KGSL_PAGESIZE_SHIFT;
 417}
 418
 419static inline void
 420kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
 421{
 422	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
 423	writel(val, &baseptr[pte]);
 424}
 425
 426static inline uint32_t
 427kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
 428{
 429	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
 430	return readl(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
 431}
 432
 433void kgsl_mh_intrcallback(struct kgsl_device *device)
 434{
 435	unsigned int status = 0;
 436	unsigned int reg;
 437
 438	KGSL_MEM_VDBG("enter (device=%p)\n", device);
 439
 440	kgsl_regread_isr(device, mmu_reg[device->id].interrupt_status, &status);
 441
 442	if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR) {
 443		KGSL_MEM_FATAL("axi read error interrupt\n");
 444	} else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR) {
 445		KGSL_MEM_FATAL("axi write error interrupt\n");
 446	} else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
 447		kgsl_regread_isr(device, mmu_reg[device->id].page_fault, &reg);
 448		KGSL_MEM_FATAL("mmu page fault interrupt: %08x\n", reg);
 449	} else {
 450		KGSL_MEM_DBG("bad bits in REG_MH_INTERRUPT_STATUS %08x\n",
 451			     status);
 452	}
 453
 454	kgsl_regwrite_isr(device, mmu_reg[device->id].interrupt_clear, status);
 455
 456	/*TODO: figure out how to handle errror interupts.
 457	* specifically, page faults should probably nuke the client that
 458	* caused them, but we don't have enough info to figure that out yet.
 459	*/
 460
 461	KGSL_MEM_VDBG("return\n");
 462}
 463
 464int
 465kgsl_get_ptname_from_ptbase(unsigned int pt_base)
 466{
 467	struct kgsl_pagetable *pt;
 468	int ptid = -1;
 469
 470	mutex_lock(&kgsl_driver.pt_mutex);
 471
 472	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
 473		if (pt_base == pt->base.gpuaddr) {
 474			ptid = (int) pt->name;
 475			break;
 476		}
 477	}
 478	mutex_unlock(&kgsl_driver.pt_mutex);
 479
 480	return ptid;
 481}
 482
 483static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
 484				struct kgsl_mmu *mmu,
 485				unsigned int name)
 486{
 487	int status = 0;
 488	struct kgsl_pagetable *pagetable = NULL;
 489
 490	KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);
 491
 492	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
 493	if (pagetable == NULL) {
 494		KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
 495		return NULL;
 496	}
 497
 498	pagetable->magic_number = KGSL_PAGETABLE_INIT_NUMBER;
 499	pagetable->refcnt = 1;
 500
 501	spin_lock_init(&pagetable->lock);
 502	pagetable->tlb_flags = 0;
 503	pagetable->name = name;
 504	pagetable->va_base = KGSL_PAGETABLE_BASE;
 505	pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
 506	pagetable->last_superpte = 0;
 507	pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
 508
 509	pagetable->tlbflushfilter.size = (pagetable->va_range /
 510				(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
 511	pagetable->tlbflushfilter.base = (unsigned int *)
 512			kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
 513	if (!pagetable->tlbflushfilter.base) {
 514		KGSL_MEM_ERR("Failed to create tlbflushfilter\n");
 515		goto err_alloc;
 516	}
 517	GSL_TLBFLUSH_FILTER_RESET();
 518
 519	pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
 520	if (pagetable->pool == NULL) {
 521		KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
 522		goto err_flushfilter;
 523	}
 524
 525	if (gen_pool_add(pagetable->pool, pagetable->va_base,
 526				pagetable->va_range, -1)) {
 527		KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
 528				pagetable);
 529		goto err_pool;
 530	}
 531
 532	pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
 533		&pagetable->base.physaddr);
 534
 535	if (pagetable->base.hostptr == NULL)
 536		goto err_pool;
 537
 538	pagetable->base.gpuaddr = pagetable->base.physaddr;
 539
 540	status = kgsl_setup_pt(pagetable);
 541	if (status)
 542		goto err_free_sharedmem;
 543
 544	list_add(&pagetable->list, &kgsl_driver.pagetable_list);
 545
 546	KGSL_MEM_VDBG("return %p\n", pagetable);
 547	return pagetable;
 548
 549err_free_sharedmem:
 550	kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
 551err_pool:
 552	gen_pool_destroy(pagetable->pool);
 553err_flushfilter:
 554	kfree(pagetable->tlbflushfilter.base);
 555err_alloc:
 556	kfree(pagetable);
 557
 558	return NULL;
 559}
 560
 561static void kgsl_mmu_destroypagetable(struct kgsl_pagetable *pagetable)
 562{
 563	KGSL_MEM_VDBG("enter (pagetable=%p)\n", pagetable);
 564
 565	list_del(&pagetable->list);
 566
 567	kgsl_cleanup_pt(pagetable);
 568
 569	kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
 570
 571	if (pagetable->pool) {
 572		gen_pool_destroy(pagetable->pool);
 573		pagetable->pool = NULL;
 574	}
 575
 576	if (pagetable->tlbflushfilter.base) {
 577		pagetable->tlbflushfilter.size = 0;
 578		kfree(pagetable->tlbflushfilter.base);
 579		pagetable->tlbflushfilter.base = NULL;
 580	}
 581
 582	pagetable->magic_number = KGSL_PAGETABLE_POISON_NUMBER;
 583	kfree(pagetable);
 584}
 585
 586struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
 587					     unsigned long name)
 588{
 589	struct kgsl_pagetable *pt;
 590
 591	if (mmu == NULL)
 592		return NULL;
 593
 594	mutex_lock(&kgsl_driver.pt_mutex);
 595
 596	list_for_each_entry(pt,	&kgsl_driver.pagetable_list, list) {
 597		if (pt->name == name) {
 598			spin_lock(&pt->lock);
 599			pt->refcnt++;
 600			spin_unlock(&pt->lock);
 601			mutex_unlock(&kgsl_driver.pt_mutex);
 602			return pt;
 603		}
 604	}
 605
 606	pt = kgsl_mmu_createpagetableobject(mmu, name);
 607	mutex_unlock(&kgsl_driver.pt_mutex);
 608
 609	return pt;
 610}
 611
 612void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
 613{
 614	bool dead;
 615	if (pagetable == NULL)
 616		return;
 617
 618	mutex_lock(&kgsl_driver.pt_mutex);
 619
 620	spin_lock(&pagetable->lock);
 621	dead = (--pagetable->refcnt) == 0;
 622	spin_unlock(&pagetable->lock);
 623
 624	if (dead)
 625		kgsl_mmu_destroypagetable(pagetable);
 626
 627	mutex_unlock(&kgsl_driver.pt_mutex);
 628}
 629
 630int kgsl_mmu_setstate(struct kgsl_device *device,
 631				struct kgsl_pagetable *pagetable)
 632{
 633	int status = 0;
 634	struct kgsl_mmu *mmu = &device->mmu;
 635
 636	KGSL_MEM_VDBG("enter (device=%p, pagetable=%p)\n", device, pagetable);
 637
 638	if (mmu->flags & KGSL_FLAGS_STARTED) {
 639		/* page table not current, then setup mmu to use new
 640		 *  specified page table
 641		 */
 642//		printk(KERN_DEBUG"from %p to %p magic %x\n", mmu->hwpagetable, pagetable,
 643//			pagetable->magic_number);
 644
 645		if (mmu->hwpagetable != pagetable) {
 646//			KGSL_MEM_ERR("In kgsl_mmu_setstate() - pagetable = %x pagetable->magic_number = %x\n"
 647//					, pagetable, pagetable->magic_number);
 648			mmu->hwpagetable = pagetable;
 649			spin_lock(&mmu->hwpagetable->lock);
 650			mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
 651			spin_unlock(&mmu->hwpagetable->lock);
 652
 653			/* call device specific set page table */
 654			status = kgsl_setstate(mmu->device,
 655				KGSL_MMUFLAGS_TLBFLUSH |
 656				KGSL_MMUFLAGS_PTUPDATE);
 657
 658		}
 659	}
 660//	else {
 661//		printk(KERN_DEBUG"kgsl flags not started (device=%p, pagetable=%p)\n", device, pagetable);
 662//	}
 663
 664	KGSL_MEM_VDBG("return %d\n", status);
 665
 666	return status;
 667}
 668
 669int kgsl_mmu_init(struct kgsl_device *device)
 670{
 671	/*
 672	 * intialize device mmu
 673	 *
 674	 * call this with the global lock held
 675	 */
 676	int status;
 677	struct kgsl_mmu *mmu = &device->mmu;
 678
 679	KGSL_MEM_VDBG("enter (device=%p)\n", device);
 680
 681	mmu->device = device;
 682
 683#ifndef CONFIG_MSM_KGSL_MMU
 684	mmu->config = 0x00000000;
 685#endif
 686
 687	/* MMU not enabled */
 688	if ((mmu->config & 0x1) == 0) {
 689		KGSL_MEM_VDBG("return %d\n", 0);
 690		return 0;
 691	}
 692
 693	/* make sure aligned to pagesize */
 694	BUG_ON(mmu->mpu_base & (KGSL_PAGESIZE - 1));
 695	BUG_ON((mmu->mpu_base + mmu->mpu_range) & (KGSL_PAGESIZE - 1));
 696
 697	/* sub-client MMU lookups require address translation */
 698	if ((mmu->config & ~0x1) > 0) {
 699		/*make sure virtual address range is a multiple of 64Kb */
 700		BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
 701
 702		/* allocate memory used for completing r/w operations that
 703		 * cannot be mapped by the MMU
 704		 */
 705		status = kgsl_sharedmem_alloc_coherent(&mmu->dummyspace, 64);
 706		if (status != 0) {
 707			KGSL_MEM_ERR
 708			    ("Unable to allocate dummy space memory.\n");
 709			goto error;
 710		}
 711
 712		kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
 713				   mmu->dummyspace.size);
 714
 715	}
 716
 717	KGSL_MEM_VDBG("return %d\n", 0);
 718
 719	return 0;
 720
 721error:
 722	return status;
 723}
 724
 725int kgsl_mmu_start(struct kgsl_device *device)
 726{
 727	/*
 728	 * intialize device mmu
 729	 *
 730	 * call this with the global lock held
 731	 */
 732	int status;
 733	struct kgsl_mmu *mmu = &device->mmu;
 734
 735	KGSL_MEM_VDBG("enter (device=%p)\n", device);
 736
 737	if (mmu->flags & KGSL_FLAGS_STARTED) {
 738		KGSL_MEM_INFO("MMU already started.\n");
 739		return 0;
 740	}
 741
 742	/* MMU not enabled */
 743	if ((mmu->config & 0x1) == 0) {
 744		KGSL_MEM_VDBG("return %d\n", 0);
 745		return 0;
 746	}
 747
 748	mmu->flags |= KGSL_FLAGS_STARTED;
 749
 750	/* setup MMU and sub-client behavior */
 751	kgsl_regwrite(device, mmu_reg[device->id].config, mmu->config);
 752
 753	/* enable axi interrupts */
 754	KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
 755		     GSL_MMU_INT_MASK);
 756	kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask,
 757				GSL_MMU_INT_MASK);
 758
 759	/* idle device */
 760	kgsl_idle(device,  KGSL_TIMEOUT_DEFAULT);
 761
 762	/* define physical memory range accessible by the core */
 763	kgsl_regwrite(device, mmu_reg[device->id].mpu_base, mmu->mpu_base);
 764	kgsl_regwrite(device, mmu_reg[device->id].mpu_end,
 765			mmu->mpu_base + mmu->mpu_range);
 766
 767	/* enable axi interrupts */
 768	KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
 769		     GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
 770	kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask,
 771			GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
 772
 773	/* sub-client MMU lookups require address translation */
 774	if ((mmu->config & ~0x1) > 0) {
 775
 776		kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
 777				   mmu->dummyspace.size);
 778
 779		/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
 780		 * to complete transactions in case of an MMU fault. Note that
 781		 * we'll leave the bottom 32 bytes of the dummyspace for other
 782		 * purposes (e.g. use it when dummy read cycles are needed
 783		 * for other blocks */
 784		kgsl_regwrite(device, mmu_reg[device->id].tran_error,
 785						mmu->dummyspace.physaddr + 32);
 786
 787		BUG_ON(mmu->defaultpagetable == NULL);
 788		mmu->hwpagetable = mmu->defaultpagetable;
 789
 790		kgsl_regwrite(device, mmu_reg[device->id].pt_page,
 791			      mmu->hwpagetable->base.gpuaddr);
 792		kgsl_regwrite(device, mmu_reg[device->id].va_range,
 793			      (mmu->hwpagetable->va_base |
 794			      (mmu->hwpagetable->va_range >> 16)));
 795		status = kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
 796		if (status) {
 797			KGSL_MEM_ERR("Failed to setstate TLBFLUSH\n");
 798			goto error;
 799		}
 800	}
 801
 802	KGSL_MEM_VDBG("return %d\n", 0);
 803
 804	return 0;
 805error:
 806	/* disable MMU */
 807	kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask, 0);
 808	kgsl_regwrite(device, mmu_reg[device->id].config, 0x00000000);
 809	return status;
 810}
 811
 812
 813
 814#ifdef CONFIG_MSM_KGSL_MMU
 815
 816unsigned int kgsl_virtaddr_to_physaddr(unsigned int virtaddr)
 817{
 818	unsigned int physaddr = 0;
 819	pgd_t *pgd_ptr = NULL;
 820	pmd_t *pmd_ptr = NULL;
 821	pte_t *pte_ptr = NULL, pte;
 822
 823	pgd_ptr = pgd_offset(current->mm, virtaddr);
 824	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
 825		KGSL_MEM_ERR
 826		    ("Invalid pgd entry found while trying to convert virtual "
 827		     "address to physical\n");
 828		return 0;
 829	}
 830
 831	pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
 832	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
 833		KGSL_MEM_ERR
 834		    ("Invalid pmd entry found while trying to convert virtual "
 835		     "address to physical\n");
 836		return 0;
 837	}
 838
 839	pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
 840	if (!pte_ptr) {
 841		KGSL_MEM_ERR
 842		    ("Unable to map pte entry while trying to convert virtual "
 843		     "address to physical\n");
 844		return 0;
 845	}
 846	pte = *pte_ptr;
 847	physaddr = pte_pfn(pte);
 848	pte_unmap(pte_ptr);
 849	physaddr <<= PAGE_SHIFT;
 850	return physaddr;
 851}
 852
 853int
 854kgsl_mmu_map(struct kgsl_pagetable *pagetable,
 855				unsigned int address,
 856				int range,
 857				unsigned int protflags,
 858				unsigned int *gpuaddr,
 859				unsigned int flags)
 860{
 861	int numpages;
 862	unsigned int pte, ptefirst, ptelast, physaddr;
 863	int flushtlb, alloc_size;
 864	unsigned int align = flags & KGSL_MEMFLAGS_ALIGN_MASK;
 865
 866	KGSL_MEM_VDBG("enter (pt=%p, physaddr=%08x, range=%08d, gpuaddr=%p)\n",
 867		      pagetable, address, range, gpuaddr);
 868
 869	BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
 870	BUG_ON(protflags == 0);
 871	BUG_ON(range <= 0);
 872
 873	/* Only support 4K and 8K alignment for now */
 874	if (align != KGSL_MEMFLAGS_ALIGN8K && align != KGSL_MEMFLAGS_ALIGN4K) {
 875		KGSL_MEM_ERR("Cannot map memory according to "
 876			     "requested flags: %08x\n", flags);
 877		return -EINVAL;
 878	}
 879
 880	/* Make sure address being mapped is at 4K boundary */
 881	if (!IS_ALIGNED(address, KGSL_PAGESIZE) || range & ~KGSL_PAGEMASK) {
 882		KGSL_MEM_ERR("Cannot map address not aligned "
 883			     "at page boundary: address: %08x, range: %08x\n",
 884			     address, range);
 885		return -EINVAL;
 886	}
 887	alloc_size = range;
 888	if (align == KGSL_MEMFLAGS_ALIGN8K)
 889		alloc_size += KGSL_PAGESIZE;
 890
 891	*gpuaddr = gen_pool_alloc(pagetable->pool, alloc_size);
 892	if (*gpuaddr == 0) {
 893		KGSL_MEM_ERR("gen_pool_alloc failed: %d\n", alloc_size);
 894		return -ENOMEM;
 895	}
 896
 897	if (align == KGSL_MEMFLAGS_ALIGN8K) {
 898		if (*gpuaddr & ((1 << 13) - 1)) {
 899			/* Not 8k aligned, align it */
 900			gen_pool_free(pagetable->pool, *gpuaddr, KGSL_PAGESIZE);
 901			*gpuaddr = *gpuaddr + KGSL_PAGESIZE;
 902		} else
 903			gen_pool_free(pagetable->pool, *gpuaddr + range,
 904				      KGSL_PAGESIZE);
 905	}
 906
 907	numpages = (range >> KGSL_PAGESIZE_SHIFT);
 908
 909	ptefirst = kgsl_pt_entry_get(pagetable, *gpuaddr);
 910	ptelast = ptefirst + numpages;
 911
 912	pte = ptefirst;
 913	flushtlb = 0;
 914
 915	/* tlb needs to be flushed when the first and last pte are not at
 916	* superpte boundaries */
 917	if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
 918		((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
 919		flushtlb = 1;
 920
 921	spin_lock(&pagetable->lock);
 922	for (pte = ptefirst; pte < ptelast; pte++) {
 923#ifdef VERBOSE_DEBUG
 924		/* check if PTE exists */
 925		uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
 926		BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
 927#endif
 928		if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
 929			if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
 930				flushtlb = 1;
 931		/* mark pte as in use */
 932		if (flags & KGSL_MEMFLAGS_CONPHYS)
 933			physaddr = address;
 934		else if (flags & KGSL_MEMFLAGS_VMALLOC_MEM) {
 935			physaddr = vmalloc_to_pfn((void *)address);
 936			physaddr <<= PAGE_SHIFT;
 937		} else if (flags & KGSL_MEMFLAGS_HOSTADDR)
 938			physaddr = kgsl_virtaddr_to_physaddr(address);
 939		else
 940			physaddr = 0;
 941
 942		if (physaddr) {
 943			kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
 944		} else {
 945			KGSL_MEM_ERR
 946			("Unable to find physaddr for address: %x\n",
 947			     address);
 948			spin_unlock(&pagetable->lock);
 949			kgsl_mmu_unmap(pagetable, *gpuaddr, range);
 950			return -EFAULT;
 951		}
 952		address += KGSL_PAGESIZE;
 953	}
 954
 955	KGSL_MEM_INFO("pt %p p %08x g %08x pte f %d l %d n %d f %d\n",
 956		      pagetable, address, *gpuaddr, ptefirst, ptelast,
 957		      numpages, flushtlb);
 958
 959	mb();
 960	dsb();
 961	outer_sync();
 962
 963	/* Invalidate tlb only if current page table used by GPU is the
 964	 * pagetable that we used to allocate */
 965	if (flushtlb) {
 966		/*set all devices as needing flushing*/
 967		pagetable->tlb_flags = UINT_MAX;
 968		GSL_TLBFLUSH_FILTER_RESET();
 969	}
 970	spin_unlock(&pagetable->lock);
 971
 972
 973	KGSL_MEM_VDBG("return %d\n", 0);
 974
 975	return 0;
 976}
 977
 978int
 979kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
 980		int range)
 981{
 982	unsigned int numpages;
 983	unsigned int pte, ptefirst, ptelast, superpte;
 984
 985	KGSL_MEM_VDBG("enter (pt=%p, gpuaddr=0x%08x, range=%d)\n",
 986			pagetable, gpuaddr, range);
 987
 988	BUG_ON(range <= 0);
 989
 990	numpages = (range >> KGSL_PAGESIZE_SHIFT);
 991	if (range & (KGSL_PAGESIZE - 1))
 992		numpages++;
 993
 994	ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
 995	ptelast = ptefirst + numpages;
 996
 997	KGSL_MEM_INFO("pt %p gpu %08x pte first %d last %d numpages %d\n",
 998		      pagetable, gpuaddr, ptefirst, ptelast, numpages);
 999
1000	spin_lock(&pagetable->lock);
1001	superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1002	GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1003	for (pte = ptefirst; pte < ptelast; pte++) {
1004#ifdef VERBOSE_DEBUG
1005		/* check if PTE exists */
1006		BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1007#endif
1008		kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1009		superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1010		if (pte == superpte)
1011			GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1012				GSL_PT_SUPER_PTE);
1013	}
1014
1015	mb();
1016	dsb();
1017	outer_sync();
1018
1019	spin_unlock(&pagetable->lock);
1020
1021	gen_pool_free(pagetable->pool, gpuaddr, range);
1022
1023	KGSL_MEM_VDBG("return %d\n", 0);
1024
1025	return 0;
1026}
1027#endif /*CONFIG_MSM_KGSL_MMU*/
1028
1029int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1030			struct kgsl_memdesc *memdesc, unsigned int protflags,
1031			unsigned int flags)
1032{
1033	int result = -EINVAL;
1034	unsigned int gpuaddr = 0;
1035
1036	if (memdesc == NULL)
1037		goto error;
1038
1039	result = kgsl_mmu_map(pagetable, memdesc->physaddr, memdesc->size,
1040				protflags, &gpuaddr, flags);
1041	if (result)
1042		goto error;
1043
1044	/*global mappings must have the same gpu address in all pagetables*/
1045	if (memdesc->gpuaddr == 0)
1046		memdesc->gpuaddr = gpuaddr;
1047
1048	else if (memdesc->gpuaddr != gpuaddr) {
1049		KGSL_MEM_ERR("pt %p addr mismatch phys 0x%08x gpu 0x%0x 0x%08x",
1050				pagetable, memdesc->physaddr,
1051				memdesc->gpuaddr, gpuaddr);
1052		goto error_unmap;
1053	}
1054	return result;
1055error_unmap:
1056	kgsl_mmu_unmap(pagetable, gpuaddr, memdesc->size);
1057error:
1058	return result;
1059}
1060
1061int kgsl_mmu_stop(struct kgsl_device *device)
1062{
1063	/*
1064	 *  stop device mmu
1065	 *
1066	 *  call this with the global lock held
1067	 */
1068	struct kgsl_mmu *mmu = &device->mmu;
1069
1070	KGSL_MEM_VDBG("enter (device=%p)\n", device);
1071
1072	if (mmu->flags & KGSL_FLAGS_STARTED) {
1073		/* disable mh interrupts */
1074		KGSL_MEM_DBG("disabling mmu interrupts\n");
1075		/* disable MMU */
1076		kgsl_regwrite(device, mmu_reg[device->id].interrupt_mask, 0);
1077		kgsl_regwrite(device, mmu_reg[device->id].config, 0x00000000);
1078
1079		mmu->flags &= ~KGSL_FLAGS_STARTED;
1080	}
1081
1082	KGSL_MEM_VDBG("return %d\n", 0);
1083
1084	return 0;
1085
1086
1087}
1088
1089int kgsl_mmu_close(struct kgsl_device *device)
1090{
1091	/*
1092	 *  close device mmu
1093	 *
1094	 *  call this with the global lock held
1095	 */
1096	struct kgsl_mmu *mmu = &device->mmu;
1097
1098	KGSL_MEM_VDBG("enter (device=%p)\n", device);
1099
1100	if (mmu->dummyspace.gpuaddr)
1101		kgsl_sharedmem_free(&mmu->dummyspace);
1102
1103	KGSL_MEM_VDBG("return %d\n", 0);
1104
1105	return 0;
1106}