PageRenderTime 113ms CodeModel.GetById 10ms app.highlight 93ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/gpu/msm/kgsl_drm.c

https://gitlab.com/TeamTators/hp-kernel-tenderloin
C | 1741 lines | 1290 code | 354 blank | 97 comment | 219 complexity | 4dbcbdf358965cf4f6f41aaeb5497052 MD5 | raw file
   1/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18/* Implements an interface between KGSL and the DRM subsystem.  For now this
  19 * is pretty simple, but it will take on more of the workload as time goes
  20 * on
  21 */
  22#include "drmP.h"
  23#include "drm.h"
  24#include <linux/android_pmem.h>
  25#include <linux/notifier.h>
  26
  27#include "kgsl.h"
  28#include "kgsl_device.h"
  29#include "kgsl_drawctxt.h"
  30#include "kgsl_drm.h"
  31#include "kgsl_mmu.h"
  32#include "kgsl_yamato.h"
  33#include "kgsl_sharedmem.h"
  34
  35#define DRIVER_AUTHOR           "Qualcomm"
  36#define DRIVER_NAME             "kgsl"
  37#define DRIVER_DESC             "KGSL DRM"
  38#define DRIVER_DATE             "20100127"
  39
  40#define DRIVER_MAJOR            2
  41#define DRIVER_MINOR            1
  42#define DRIVER_PATCHLEVEL       1
  43
  44#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
  45
  46#define ENTRY_EMPTY -1
  47#define ENTRY_NEEDS_CLEANUP -2
  48
  49#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
  50#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
  51
  52/* Returns true if the memory type is in PMEM */
  53
  54#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
  55#define TYPE_IS_PMEM(_t) \
  56  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  57   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
  58   ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
  59#else
  60#define TYPE_IS_PMEM(_t) \
  61  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  62   ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
  63#endif
  64
  65/* Returns true if the memory type is regular */
  66
  67#define TYPE_IS_MEM(_t) \
  68  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
  69   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  70   ((_t) & DRM_KGSL_GEM_TYPE_MEM))
  71
  72#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
  73
  74/* Returns true if KMEM region is uncached */
  75
  76#define IS_MEM_UNCACHED(_t) \
  77  ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  78   (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
  79   (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
  80
  81struct drm_kgsl_gem_object_wait_list_entry {
  82	struct list_head list;
  83	int pid;
  84	int in_use;
  85	wait_queue_head_t process_wait_q;
  86};
  87
  88struct drm_kgsl_gem_object_fence {
  89	int32_t fence_id;
  90	unsigned int num_buffers;
  91	int ts_valid;
  92	unsigned int timestamp;
  93	int ts_device;
  94	int lockpid;
  95	struct list_head buffers_in_fence;
  96};
  97
  98struct drm_kgsl_gem_object_fence_list_entry {
  99	struct list_head list;
 100	int in_use;
 101	struct drm_gem_object *gem_obj;
 102};
 103
 104static int32_t fence_id = 0x1;
 105
 106static struct drm_kgsl_gem_object_fence
 107			  gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
 108
 109struct drm_kgsl_gem_object {
 110	struct drm_gem_object *obj;
 111	uint32_t cpuaddr;
 112	uint32_t type;
 113	uint32_t size;
 114	struct kgsl_pagetable *pagetable;
 115	uint64_t mmap_offset;
 116	int bufcount;
 117	int flags;
 118	struct list_head list;
 119	int active;
 120
 121	struct {
 122		uint32_t offset;
 123		uint32_t gpuaddr;
 124	} bufs[DRM_KGSL_GEM_MAX_BUFFERS];
 125
 126	int bound;
 127	int lockpid;
 128	/* Put these here to avoid allocing all the time */
 129	struct drm_kgsl_gem_object_wait_list_entry
 130	wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
 131	/* Each object can only appear in a single fence */
 132	struct drm_kgsl_gem_object_fence_list_entry
 133	fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
 134
 135	struct list_head wait_list;
 136};
 137
 138/* This is a global list of all the memory currently mapped in the MMU */
 139static struct list_head kgsl_mem_list;
 140
 141static void kgsl_gem_mem_flush(void *addr,
 142		unsigned long size, uint32_t type, int op)
 143{
 144	int flags = 0;
 145
 146	switch (op) {
 147	case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
 148		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
 149			    DRM_KGSL_GEM_CACHE_WBACKWA))
 150			flags |= KGSL_MEMFLAGS_CACHE_CLEAN;
 151
 152		break;
 153
 154	case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
 155		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
 156			    DRM_KGSL_GEM_CACHE_WBACKWA |
 157			    DRM_KGSL_GEM_CACHE_WTHROUGH))
 158			flags |= KGSL_MEMFLAGS_CACHE_INV;
 159	}
 160
 161	if (!flags)
 162		return;
 163
 164	if (TYPE_IS_PMEM(type) || type == DRM_KGSL_GEM_TYPE_FD_FBMEM) {
 165		flags |= KGSL_MEMFLAGS_CONPHYS;
 166		addr = __va(addr);
 167	}
 168	else if (TYPE_IS_MEM(type))
 169		flags |= KGSL_MEMFLAGS_VMALLOC_MEM;
 170	else
 171		return;
 172
 173	kgsl_cache_range_op((unsigned long) addr, size, flags);
 174}
 175
 176/* Flush all the memory mapped in the MMU */
 177
 178void kgsl_gpu_mem_flush(int op)
 179{
 180	struct drm_kgsl_gem_object *entry;
 181	int index;
 182
 183	list_for_each_entry(entry, &kgsl_mem_list, list) {
 184		for (index = 0;
 185		    entry->cpuaddr && (index < entry->bufcount); index++)
 186			kgsl_gem_mem_flush((void *)(entry->cpuaddr +
 187					    entry->bufs[index].offset),
 188					    entry->size, entry->type, op);
 189	}
 190
 191	/* Takes care of WT/WC case.
 192	 * More useful when we go barrierless
 193	 */
 194	dmb();
 195}
 196
 197/* TODO:
 198 * Add vsync wait */
 199
 200static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
 201{
 202	return 0;
 203}
 204
 205static int kgsl_drm_unload(struct drm_device *dev)
 206{
 207	return 0;
 208}
 209
 210struct kgsl_drm_device_priv {
 211	struct kgsl_device *device[KGSL_DEVICE_MAX];
 212	struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
 213};
 214
 215static int kgsl_ts_notifier_cb(struct notifier_block *blk,
 216			       unsigned long code, void *_param);
 217
 218static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
 219
 220static int kgsl_drm_firstopen(struct drm_device *dev)
 221{
 222	int i;
 223
 224	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 225		struct kgsl_device *device = kgsl_get_device(i);
 226
 227		if (device == NULL)
 228			continue;
 229
 230		kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
 231		kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
 232	}
 233
 234	return 0;
 235}
 236
 237void kgsl_drm_lastclose(struct drm_device *dev)
 238{
 239	int i;
 240
 241	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 242		struct kgsl_device *device = kgsl_get_device(i);
 243		if (device == NULL)
 244			continue;
 245
 246		kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
 247	}
 248}
 249
 250void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
 251{
 252}
 253
 254static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
 255{
 256	return 0;
 257}
 258
 259static int kgsl_drm_resume(struct drm_device *dev)
 260{
 261	return 0;
 262}
 263
 264static void
 265kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
 266{
 267	struct drm_device *dev = obj->dev;
 268	struct drm_gem_mm *mm = dev->mm_private;
 269	struct drm_kgsl_gem_object *priv = obj->driver_private;
 270	struct drm_map_list *list;
 271
 272	list = &obj->map_list;
 273	drm_ht_remove_item(&mm->offset_hash, &list->hash);
 274	if (list->file_offset_node) {
 275		drm_mm_put_block(list->file_offset_node);
 276		list->file_offset_node = NULL;
 277	}
 278
 279	kfree(list->map);
 280	list->map = NULL;
 281
 282	priv->mmap_offset = 0;
 283}
 284
 285static int
 286kgsl_gem_memory_allocated(struct drm_gem_object *obj)
 287{
 288	struct drm_kgsl_gem_object *priv = obj->driver_private;
 289	return priv->cpuaddr ? 1 : 0;
 290}
 291
 292static int
 293kgsl_gem_alloc_memory(struct drm_gem_object *obj)
 294{
 295	struct drm_kgsl_gem_object *priv = obj->driver_private;
 296	int index;
 297
 298	/* Return if the memory is already allocated */
 299
 300	if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
 301		return 0;
 302
 303	if (TYPE_IS_PMEM(priv->type)) {
 304		int type;
 305
 306		if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
 307		    priv->type & DRM_KGSL_GEM_PMEM_EBI)
 308			type = PMEM_MEMTYPE_EBI1;
 309		else
 310			type = PMEM_MEMTYPE_SMI;
 311
 312		priv->cpuaddr = pmem_kalloc(obj->size * priv->bufcount,
 313						type | PMEM_ALIGNMENT_4K);
 314
 315		if (IS_ERR((void *) priv->cpuaddr)) {
 316			DRM_ERROR("Unable to allocate PMEM memory\n");
 317			priv->cpuaddr = 0;
 318			return -ENOMEM;
 319		}
 320	} else if (TYPE_IS_MEM(priv->type)) {
 321		priv->cpuaddr = (uint32_t) vmalloc_user(obj->size *
 322			priv->bufcount);
 323
 324		if (priv->cpuaddr == 0) {
 325			DRM_ERROR("Unable to allocate vmalloc memory\n");
 326			return -ENOMEM;
 327		}
 328	} else
 329		return -EINVAL;
 330
 331	for (index = 0; index < priv->bufcount; index++)
 332		priv->bufs[index].offset = index * obj->size;
 333
 334	return 0;
 335}
 336
 337#ifdef CONFIG_MSM_KGSL_MMU
 338static void
 339kgsl_gem_unmap(struct drm_gem_object *obj)
 340{
 341	struct drm_kgsl_gem_object *priv = obj->driver_private;
 342	int index;
 343
 344	if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
 345		return;
 346
 347	for (index = 0; index < DRM_KGSL_GEM_MAX_BUFFERS; index++) {
 348		if (!priv->bufs[index].gpuaddr)
 349			continue;
 350
 351		kgsl_mmu_unmap(priv->pagetable,
 352			       priv->bufs[index].gpuaddr,
 353			       obj->size);
 354
 355		priv->bufs[index].gpuaddr = 0;
 356	}
 357
 358	kgsl_mmu_putpagetable(priv->pagetable);
 359	priv->pagetable = NULL;
 360
 361	if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
 362	    (priv->type & DRM_KGSL_GEM_CACHE_MASK))
 363		list_del(&priv->list);
 364
 365	priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
 366}
 367#else
 368static void
 369kgsl_gem_unmap(struct drm_gem_object *obj)
 370{
 371}
 372#endif
 373
 374static void
 375kgsl_gem_free_memory(struct drm_gem_object *obj)
 376{
 377	struct drm_kgsl_gem_object *priv = obj->driver_private;
 378	int index;
 379
 380	if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
 381		return;
 382
 383	/* invalidate cached region before releasing */
 384	kgsl_gem_mem_flush((void *)priv->cpuaddr, priv->size,
 385		priv->type, DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
 386
 387	kgsl_gem_unmap(obj);
 388
 389	if (TYPE_IS_PMEM(priv->type))
 390		pmem_kfree(priv->cpuaddr);
 391	else if (TYPE_IS_MEM(priv->type))
 392		vfree((void *) priv->cpuaddr);
 393
 394	priv->cpuaddr = 0;
 395
 396	for (index = 0; index < DRM_KGSL_GEM_MAX_BUFFERS; index++)
 397		priv->bufs[index].offset = 0;
 398}
 399
 400int
 401kgsl_gem_init_object(struct drm_gem_object *obj)
 402{
 403	struct drm_kgsl_gem_object *priv;
 404	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 405	if (priv == NULL) {
 406		DRM_ERROR("Unable to create GEM object\n");
 407		return -ENOMEM;
 408	}
 409
 410	obj->driver_private = priv;
 411	priv->obj = obj;
 412
 413	return 0;
 414}
 415
 416void
 417kgsl_gem_free_object(struct drm_gem_object *obj)
 418{
 419	kgsl_gem_free_memory(obj);
 420	kgsl_gem_free_mmap_offset(obj);
 421	drm_gem_object_release(obj);
 422	kfree(obj->driver_private);
 423}
 424
 425static int
 426kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
 427{
 428	struct drm_device *dev = obj->dev;
 429	struct drm_gem_mm *mm = dev->mm_private;
 430	struct drm_kgsl_gem_object *priv = obj->driver_private;
 431	struct drm_map_list *list;
 432	int msize;
 433
 434	list = &obj->map_list;
 435	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
 436	if (list->map == NULL) {
 437		DRM_ERROR("Unable to allocate drm_map_list\n");
 438		return -ENOMEM;
 439	}
 440
 441	msize = obj->size * priv->bufcount;
 442
 443	list->map->type = _DRM_GEM;
 444	list->map->size = msize;
 445	list->map->handle = obj;
 446
 447	/* Allocate a mmap offset */
 448	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
 449						    msize / PAGE_SIZE,
 450						    0, 0);
 451
 452	if (!list->file_offset_node) {
 453		DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
 454		kfree(list->map);
 455		return -ENOMEM;
 456	}
 457
 458	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
 459						  msize / PAGE_SIZE, 0);
 460
 461	if (!list->file_offset_node) {
 462		DRM_ERROR("Unable to create the file_offset_node\n");
 463		kfree(list->map);
 464		return -ENOMEM;
 465	}
 466
 467	list->hash.key = list->file_offset_node->start;
 468	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
 469		DRM_ERROR("Failed to add to map hash\n");
 470		drm_mm_put_block(list->file_offset_node);
 471		kfree(list->map);
 472		return -ENOMEM;
 473	}
 474
 475	priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
 476
 477	return 0;
 478}
 479
 480int
 481kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
 482			unsigned long *len)
 483{
 484	struct file *filp;
 485	struct drm_device *dev;
 486	struct drm_file *file_priv;
 487	struct drm_gem_object *obj;
 488	struct drm_kgsl_gem_object *priv;
 489	int ret = 0;
 490
 491	filp = fget(drm_fd);
 492	if (unlikely(filp == NULL)) {
 493		DRM_ERROR("Unable to ghet the DRM file descriptor\n");
 494		return -EINVAL;
 495	}
 496	file_priv = filp->private_data;
 497	if (unlikely(file_priv == NULL)) {
 498		DRM_ERROR("Unable to get the file private data\n");
 499		fput(filp);
 500		return -EINVAL;
 501	}
 502	dev = file_priv->minor->dev;
 503	if (unlikely(dev == NULL)) {
 504		DRM_ERROR("Unable to get the minor device\n");
 505		fput(filp);
 506		return -EINVAL;
 507	}
 508
 509	obj = drm_gem_object_lookup(dev, file_priv, handle);
 510	if (unlikely(obj == NULL)) {
 511		DRM_ERROR("Invalid GEM handle %x\n", handle);
 512		fput(filp);
 513		return -EBADF;
 514	}
 515
 516	mutex_lock(&dev->struct_mutex);
 517	priv = obj->driver_private;
 518
 519	/* We can only use the MDP for PMEM regions */
 520
 521	if (priv->cpuaddr && TYPE_IS_PMEM(priv->type)) {
 522		/* Return the address for the currently active buffer */
 523		*start = priv->cpuaddr + priv->bufs[priv->active].offset;
 524		/* priv->mmap_offset is used for virt addr */
 525		*len = obj->size;
 526		/* flush cached obj */
 527		kgsl_gem_mem_flush((void *)*start, *len, priv->type,
 528			DRM_KGSL_GEM_CACHE_OP_TO_DEV);
 529	} else {
 530		*start = 0;
 531		*len = 0;
 532		ret = -EINVAL;
 533	}
 534
 535	drm_gem_object_unreference(obj);
 536	mutex_unlock(&dev->struct_mutex);
 537
 538	fput(filp);
 539	return ret;
 540}
 541
 542static int
 543kgsl_gem_init_obj(struct drm_device *dev,
 544		  struct drm_file *file_priv,
 545		  struct drm_gem_object *obj,
 546		  int *handle)
 547{
 548	struct drm_kgsl_gem_object *priv;
 549	int ret, i;
 550
 551	mutex_lock(&dev->struct_mutex);
 552	priv = obj->driver_private;
 553
 554	priv->cpuaddr = 0;
 555	priv->size = obj->size;
 556	priv->bufcount = 1;
 557	priv->active = 0;
 558	priv->bound = 0;
 559
 560	/* To preserve backwards compatability, the default memory source
 561	   is EBI */
 562
 563	priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
 564
 565	ret = drm_gem_handle_create(file_priv, obj, handle);
 566
 567	drm_gem_object_handle_unreference(obj);
 568	INIT_LIST_HEAD(&priv->wait_list);
 569
 570	for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
 571		INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
 572		priv->wait_entries[i].pid = 0;
 573		init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
 574	}
 575
 576	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
 577		INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
 578		priv->fence_entries[i].in_use = 0;
 579		priv->fence_entries[i].gem_obj = obj;
 580	}
 581
 582	mutex_unlock(&dev->struct_mutex);
 583	return ret;
 584}
 585
 586int
 587kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
 588		      struct drm_file *file_priv)
 589{
 590	struct drm_kgsl_gem_create *create = data;
 591	struct drm_gem_object *obj;
 592	int ret, handle;
 593
 594	/* Page align the size so we can allocate multiple buffers */
 595	create->size = ALIGN(create->size, 4096);
 596
 597	obj = drm_gem_object_alloc(dev, create->size);
 598
 599	if (obj == NULL) {
 600		DRM_ERROR("Unable to allocate the GEM object\n");
 601		return -ENOMEM;
 602	}
 603
 604	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
 605	if (ret)
 606		return ret;
 607
 608	create->handle = handle;
 609	return 0;
 610}
 611
 612int
 613kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
 614			      struct drm_file *file_priv)
 615{
 616	struct drm_kgsl_gem_create_fd *args = data;
 617	struct file *file;
 618	dev_t rdev;
 619	struct fb_info *info;
 620	struct drm_gem_object *obj;
 621	struct drm_kgsl_gem_object *priv;
 622	int ret, put_needed, handle;
 623
 624	file = fget_light(args->fd, &put_needed);
 625
 626	if (file == NULL) {
 627		DRM_ERROR("Unable to get the file object\n");
 628		return -EBADF;
 629	}
 630
 631	rdev = file->f_dentry->d_inode->i_rdev;
 632
 633	/* Only framebuffer objects are supported ATM */
 634
 635	if (MAJOR(rdev) != FB_MAJOR) {
 636		DRM_ERROR("File descriptor is not a framebuffer\n");
 637		ret = -EBADF;
 638		goto error_fput;
 639	}
 640
 641	info = registered_fb[MINOR(rdev)];
 642
 643	if (info == NULL) {
 644		DRM_ERROR("Framebuffer minor %d is not registered\n",
 645			  MINOR(rdev));
 646		ret = -EBADF;
 647		goto error_fput;
 648	}
 649
 650	obj = drm_gem_object_alloc(dev, info->fix.smem_len);
 651
 652	if (obj == NULL) {
 653		DRM_ERROR("Unable to allocate GEM object\n");
 654		ret = -ENOMEM;
 655		goto error_fput;
 656	}
 657
 658	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
 659
 660	if (ret)
 661		goto error_fput;
 662
 663	mutex_lock(&dev->struct_mutex);
 664
 665	priv = obj->driver_private;
 666	priv->cpuaddr = info->fix.smem_start;
 667	priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
 668
 669	mutex_unlock(&dev->struct_mutex);
 670	args->handle = handle;
 671
 672error_fput:
 673	fput_light(file, put_needed);
 674
 675	return ret;
 676}
 677
 678int
 679kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
 680			struct drm_file *file_priv)
 681{
 682	struct drm_kgsl_gem_memtype *args = data;
 683	struct drm_gem_object *obj;
 684	struct drm_kgsl_gem_object *priv;
 685	int ret = 0;
 686
 687	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 688
 689	if (obj == NULL) {
 690		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 691		return -EBADF;
 692	}
 693
 694	mutex_lock(&dev->struct_mutex);
 695	priv = obj->driver_private;
 696
 697	if (TYPE_IS_FD(priv->type))
 698		ret = -EINVAL;
 699	else {
 700		if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
 701			priv->type = args->type;
 702		else
 703			ret = -EINVAL;
 704	}
 705
 706	drm_gem_object_unreference(obj);
 707	mutex_unlock(&dev->struct_mutex);
 708
 709	return ret;
 710}
 711
 712int
 713kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
 714			  struct drm_file *file_priv)
 715{
 716	struct drm_kgsl_gem_memtype *args = data;
 717	struct drm_gem_object *obj;
 718	struct drm_kgsl_gem_object *priv;
 719
 720	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 721
 722	if (obj == NULL) {
 723		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 724		return -EBADF;
 725	}
 726
 727	mutex_lock(&dev->struct_mutex);
 728	priv = obj->driver_private;
 729
 730	args->type = priv->type;
 731
 732	drm_gem_object_unreference(obj);
 733	mutex_unlock(&dev->struct_mutex);
 734
 735	return 0;
 736}
 737
 738int
 739kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
 740			struct drm_file *file_priv)
 741{
 742	struct drm_kgsl_gem_bind_gpu *args = data;
 743	struct drm_gem_object *obj;
 744	struct drm_kgsl_gem_object *priv;
 745
 746	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 747
 748	if (obj == NULL) {
 749		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 750		return -EBADF;
 751	}
 752
 753	mutex_lock(&dev->struct_mutex);
 754	priv = obj->driver_private;
 755
 756	if (--priv->bound == 0)
 757		kgsl_gem_unmap(obj);
 758
 759	drm_gem_object_unreference(obj);
 760	mutex_unlock(&dev->struct_mutex);
 761	return 0;
 762}
 763
 764#ifdef CONFIG_MSM_KGSL_MMU
 765static int
 766kgsl_gem_map(struct drm_gem_object *obj)
 767{
 768	struct drm_kgsl_gem_object *priv = obj->driver_private;
 769	int index;
 770	int ret = -EINVAL;
 771	int flags = KGSL_MEMFLAGS_CONPHYS;
 772
 773	if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
 774		return 0;
 775
 776	if (TYPE_IS_PMEM(priv->type) ||
 777	    priv->type == DRM_KGSL_GEM_TYPE_FD_FBMEM)
 778		flags = KGSL_MEMFLAGS_CONPHYS;
 779	else
 780		flags = KGSL_MEMFLAGS_VMALLOC_MEM;
 781
 782	/* Get the global page table */
 783
 784	if (priv->pagetable == NULL) {
 785		struct kgsl_device *kgsldev =
 786			kgsl_get_device(KGSL_DEVICE_YAMATO);
 787		struct kgsl_mmu *mmu = kgsl_get_mmu(kgsldev);
 788
 789		if (mmu == NULL) {
 790			DRM_ERROR("The GPU MMU is not enabled\n");
 791			return -EINVAL;
 792		}
 793
 794		priv->pagetable =
 795			kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
 796
 797		if (priv->pagetable == NULL) {
 798			DRM_ERROR("Unable to get the GPU MMU pagetable\n");
 799			return -EINVAL;
 800		}
 801	}
 802
 803	for (index = 0; index < priv->bufcount; index++) {
 804		ret = kgsl_mmu_map(priv->pagetable,
 805				   (unsigned long) priv->cpuaddr +
 806				   priv->bufs[index].offset,
 807				   obj->size,
 808				   GSL_PT_PAGE_RV | GSL_PT_PAGE_WV,
 809				   &priv->bufs[index].gpuaddr,
 810				   flags | KGSL_MEMFLAGS_ALIGN4K);
 811	}
 812
 813	/* Add cached memory to the list to be cached */
 814
 815	if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
 816	    priv->type & DRM_KGSL_GEM_CACHE_MASK)
 817		list_add(&priv->list, &kgsl_mem_list);
 818
 819	priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
 820
 821	return ret;
 822}
 823#else
 824static int
 825kgsl_gem_map(struct drm_gem_object *obj)
 826{
 827	if (TYPE_IS_PMEM(priv->type)) {
 828		for (index = 0; index < priv->bufcount; index++)
 829			priv->bufs[index].gpuaddr =
 830			priv->cpuaddr + priv->bufs[index].offset;
 831
 832		return 0;
 833	}
 834
 835	return -EINVAL;
 836}
 837#endif
 838
 839int
 840kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
 841			struct drm_file *file_priv)
 842{
 843	struct drm_kgsl_gem_bind_gpu *args = data;
 844	struct drm_gem_object *obj;
 845	struct drm_kgsl_gem_object *priv;
 846	int ret = 0;
 847
 848	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 849
 850	if (obj == NULL) {
 851		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 852		return -EBADF;
 853	}
 854
 855	mutex_lock(&dev->struct_mutex);
 856	priv = obj->driver_private;
 857
 858	if (priv->bound++ == 0) {
 859
 860		if (!kgsl_gem_memory_allocated(obj)) {
 861			DRM_ERROR("Memory not allocated for this object\n");
 862			ret = -ENOMEM;
 863			goto out;
 864		}
 865
 866		ret = kgsl_gem_map(obj);
 867
 868		/* This is legacy behavior - use GET_BUFFERINFO instead */
 869		args->gpuptr = priv->bufs[0].gpuaddr;
 870	}
 871out:
 872	drm_gem_object_unreference(obj);
 873	mutex_unlock(&dev->struct_mutex);
 874	return ret;
 875}
 876
 877/* Allocate the memory and prepare it for CPU mapping */
 878
 879int
 880kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
 881		    struct drm_file *file_priv)
 882{
 883	struct drm_kgsl_gem_alloc *args = data;
 884	struct drm_gem_object *obj;
 885	struct drm_kgsl_gem_object *priv;
 886	int ret;
 887
 888	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 889
 890	if (obj == NULL) {
 891		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 892		return -EBADF;
 893	}
 894
 895	mutex_lock(&dev->struct_mutex);
 896	priv = obj->driver_private;
 897
 898	ret = kgsl_gem_alloc_memory(obj);
 899
 900	if (ret) {
 901		DRM_ERROR("Unable to allocate object memory\n");
 902	} else if (!priv->mmap_offset) {
 903		ret = kgsl_gem_create_mmap_offset(obj);
 904		if (ret)
 905			DRM_ERROR("Unable to create a mmap offset\n");
 906	}
 907
 908	args->offset = priv->mmap_offset;
 909
 910	drm_gem_object_unreference(obj);
 911	mutex_unlock(&dev->struct_mutex);
 912
 913	return ret;
 914}
 915
 916int
 917kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
 918			struct drm_file *file_priv)
 919{
 920	struct drm_kgsl_gem_mmap *args = data;
 921	struct drm_gem_object *obj;
 922	unsigned long addr;
 923
 924	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 925
 926	if (obj == NULL) {
 927		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 928		return -EBADF;
 929	}
 930
 931	down_write(&current->mm->mmap_sem);
 932
 933	addr = do_mmap(obj->filp, 0, args->size,
 934		       PROT_READ | PROT_WRITE, MAP_SHARED,
 935		       args->offset);
 936
 937	up_write(&current->mm->mmap_sem);
 938
 939	mutex_lock(&dev->struct_mutex);
 940	drm_gem_object_unreference(obj);
 941	mutex_unlock(&dev->struct_mutex);
 942
 943	if (IS_ERR((void *) addr))
 944		return addr;
 945
 946	args->hostptr = (uint32_t) addr;
 947	return 0;
 948}
 949
 950/* This function is deprecated */
 951
 952int
 953kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
 954			struct drm_file *file_priv)
 955{
 956	struct drm_kgsl_gem_prep *args = data;
 957	struct drm_gem_object *obj;
 958	struct drm_kgsl_gem_object *priv;
 959	int ret;
 960
 961	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 962
 963	if (obj == NULL) {
 964		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
 965		return -EBADF;
 966	}
 967
 968	mutex_lock(&dev->struct_mutex);
 969	priv = obj->driver_private;
 970
 971	ret = kgsl_gem_alloc_memory(obj);
 972	if (ret) {
 973		DRM_ERROR("Unable to allocate object memory\n");
 974		drm_gem_object_unreference(obj);
 975		mutex_unlock(&dev->struct_mutex);
 976		return ret;
 977	}
 978
 979	if (priv->mmap_offset == 0) {
 980		ret = kgsl_gem_create_mmap_offset(obj);
 981		if (ret) {
 982			drm_gem_object_unreference(obj);
 983			mutex_unlock(&dev->struct_mutex);
 984			return ret;
 985		}
 986	}
 987
 988	args->offset = priv->mmap_offset;
 989	args->phys = priv->cpuaddr;
 990
 991	drm_gem_object_unreference(obj);
 992	mutex_unlock(&dev->struct_mutex);
 993
 994	return 0;
 995}
 996
 997int
 998kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
 999			   struct drm_file *file_priv)
1000{
1001	struct drm_kgsl_gem_bufinfo *args = data;
1002	struct drm_gem_object *obj;
1003	struct drm_kgsl_gem_object *priv;
1004	int ret = -EINVAL;
1005	int index;
1006
1007	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1008
1009	if (obj == NULL) {
1010		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1011		return -EBADF;
1012	}
1013
1014	mutex_lock(&dev->struct_mutex);
1015	priv = obj->driver_private;
1016
1017	if (!kgsl_gem_memory_allocated(obj)) {
1018		DRM_ERROR("Memory not allocated for this object\n");
1019		goto out;
1020	}
1021
1022	for (index = 0; index < priv->bufcount; index++) {
1023		args->offset[index] = priv->bufs[index].offset;
1024		args->gpuaddr[index] = priv->bufs[index].gpuaddr;
1025	}
1026
1027	args->count = priv->bufcount;
1028	args->active = priv->active;
1029
1030	ret = 0;
1031
1032out:
1033	drm_gem_object_unreference(obj);
1034	mutex_unlock(&dev->struct_mutex);
1035
1036	return ret;
1037}
1038
1039int
1040kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
1041			  struct drm_file *file_priv)
1042{
1043	struct drm_kgsl_gem_bufcount *args = data;
1044	struct drm_gem_object *obj;
1045	struct drm_kgsl_gem_object *priv;
1046	int ret = -EINVAL;
1047
1048	if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
1049		return -EINVAL;
1050
1051	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1052
1053	if (obj == NULL) {
1054		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1055		return -EBADF;
1056	}
1057
1058	mutex_lock(&dev->struct_mutex);
1059	priv = obj->driver_private;
1060
1061	/* It is too much math to worry about what happens if we are already
1062	   allocated, so just bail if we are */
1063
1064	if (kgsl_gem_memory_allocated(obj)) {
1065		DRM_ERROR("Memory already allocated - cannot change"
1066			  "number of buffers\n");
1067		goto out;
1068	}
1069
1070	priv->bufcount = args->bufcount;
1071	ret = 0;
1072
1073out:
1074	drm_gem_object_unreference(obj);
1075	mutex_unlock(&dev->struct_mutex);
1076
1077	return ret;
1078}
1079
1080int
1081kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
1082			  struct drm_file *file_priv)
1083{
1084	struct drm_kgsl_gem_active *args = data;
1085	struct drm_gem_object *obj;
1086	struct drm_kgsl_gem_object *priv;
1087	int ret = -EINVAL;
1088
1089	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1090
1091	if (obj == NULL) {
1092		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1093		return -EBADF;
1094	}
1095
1096	mutex_lock(&dev->struct_mutex);
1097	priv = obj->driver_private;
1098
1099	if (args->active < 0 || args->active >= priv->bufcount) {
1100		DRM_ERROR("Invalid active buffer %d\n", args->active);
1101		goto out;
1102	}
1103
1104	priv->active = args->active;
1105	ret = 0;
1106
1107out:
1108	drm_gem_object_unreference(obj);
1109	mutex_unlock(&dev->struct_mutex);
1110
1111	return ret;
1112}
1113
1114int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1115{
1116	struct drm_gem_object *obj = vma->vm_private_data;
1117	struct drm_device *dev = obj->dev;
1118	struct drm_kgsl_gem_object *priv;
1119	unsigned long offset, pg;
1120	struct page *page;
1121
1122	mutex_lock(&dev->struct_mutex);
1123
1124	priv = obj->driver_private;
1125
1126	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
1127	pg = (unsigned long) priv->cpuaddr + offset;
1128
1129	page = vmalloc_to_page((void *) pg);
1130	if (!page) {
1131		mutex_unlock(&dev->struct_mutex);
1132		return VM_FAULT_SIGBUS;
1133	}
1134
1135	get_page(page);
1136	vmf->page = page;
1137
1138	mutex_unlock(&dev->struct_mutex);
1139	return 0;
1140}
1141
1142int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1143{
1144	struct drm_gem_object *obj = vma->vm_private_data;
1145	struct drm_device *dev = obj->dev;
1146	struct drm_kgsl_gem_object *priv;
1147	unsigned long offset, pfn;
1148	int ret = 0;
1149
1150	offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
1151		PAGE_SHIFT;
1152
1153	mutex_lock(&dev->struct_mutex);
1154
1155	priv = obj->driver_private;
1156
1157	pfn = (priv->cpuaddr >> PAGE_SHIFT) + offset;
1158	ret = vm_insert_pfn(vma,
1159			    (unsigned long) vmf->virtual_address, pfn);
1160	mutex_unlock(&dev->struct_mutex);
1161
1162	switch (ret) {
1163	case -ENOMEM:
1164	case -EAGAIN:
1165		return VM_FAULT_OOM;
1166	case -EFAULT:
1167		return VM_FAULT_SIGBUS;
1168	default:
1169		return VM_FAULT_NOPAGE;
1170	}
1171}
1172
1173static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
1174	.fault = kgsl_gem_kmem_fault,
1175	.open = drm_gem_vm_open,
1176	.close = drm_gem_vm_close,
1177};
1178
1179static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
1180	.fault = kgsl_gem_phys_fault,
1181	.open = drm_gem_vm_open,
1182	.close = drm_gem_vm_close,
1183};
1184
1185/* This is a clone of the standard drm_gem_mmap function modified to allow
1186   us to properly map KMEM regions as well as the PMEM regions */
1187
1188int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1189{
1190	struct drm_file *priv = filp->private_data;
1191	struct drm_device *dev = priv->minor->dev;
1192	struct drm_gem_mm *mm = dev->mm_private;
1193	struct drm_local_map *map = NULL;
1194	struct drm_gem_object *obj;
1195	struct drm_hash_item *hash;
1196	struct drm_kgsl_gem_object *gpriv;
1197	int ret = 0;
1198
1199	mutex_lock(&dev->struct_mutex);
1200
1201	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
1202		mutex_unlock(&dev->struct_mutex);
1203		return drm_mmap(filp, vma);
1204	}
1205
1206	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
1207	if (!map ||
1208	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
1209		ret =  -EPERM;
1210		goto out_unlock;
1211	}
1212
1213	/* Check for valid size. */
1214	if (map->size < vma->vm_end - vma->vm_start) {
1215		ret = -EINVAL;
1216		goto out_unlock;
1217	}
1218
1219	obj = map->handle;
1220
1221	gpriv = obj->driver_private;
1222
1223	/* VM_PFNMAP is only for memory that doesn't use struct page
1224	 * in other words, not "normal" memory.  If you try to use it
1225	 * with "normal" memory then the mappings don't get flushed. */
1226
1227	if (TYPE_IS_MEM(gpriv->type)) {
1228		vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1229		vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1230	} else {
1231		vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1232			VM_DONTEXPAND;
1233		vma->vm_ops = &kgsl_gem_phys_vm_ops;
1234	}
1235
1236	vma->vm_private_data = map->handle;
1237
1238
1239	/* Take care of requested caching policy */
1240	if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1241	    gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1242		if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1243			vma->vm_page_prot =
1244			pgprot_writebackwacache(vma->vm_page_prot);
1245		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1246				vma->vm_page_prot =
1247				pgprot_writebackcache(vma->vm_page_prot);
1248		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1249				vma->vm_page_prot =
1250				pgprot_writethroughcache(vma->vm_page_prot);
1251		else
1252			vma->vm_page_prot =
1253			pgprot_writecombine(vma->vm_page_prot);
1254	} else {
1255		if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1256			vma->vm_page_prot =
1257			pgprot_noncached(vma->vm_page_prot);
1258		else
1259			/* default pmem is WC */
1260			vma->vm_page_prot =
1261			pgprot_writecombine(vma->vm_page_prot);
1262	}
1263
1264	/* flush out existing KMEM cached mappings if new ones are
1265	 * of uncached type */
1266	if (IS_MEM_UNCACHED(gpriv->type))
1267			kgsl_cache_range_op((unsigned long) gpriv->cpuaddr,
1268					    (obj->size * gpriv->bufcount),
1269					    KGSL_MEMFLAGS_CACHE_FLUSH |
1270					    KGSL_MEMFLAGS_VMALLOC_MEM);
1271
1272	/* Add the other memory types here */
1273
1274	/* Take a ref for this mapping of the object, so that the fault
1275	 * handler can dereference the mmap offset's pointer to the object.
1276	 * This reference is cleaned up by the corresponding vm_close
1277	 * (which should happen whether the vma was created by this call, or
1278	 * by a vm_open due to mremap or partial unmap or whatever).
1279	 */
1280	drm_gem_object_reference(obj);
1281
1282	vma->vm_file = filp;	/* Needed for drm_vm_open() */
1283	drm_vm_open_locked(vma);
1284
1285out_unlock:
1286	mutex_unlock(&dev->struct_mutex);
1287
1288	return ret;
1289}
1290
1291void
1292cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1293{
1294	int j;
1295	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1296	struct drm_kgsl_gem_object *unlock_obj;
1297	struct drm_gem_object *obj;
1298	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1299
1300	fence->ts_valid = 0;
1301	fence->timestamp = -1;
1302	fence->ts_device = -1;
1303
1304	/* Walk the list of buffers in this fence and clean up the */
1305	/* references. Note that this can cause memory allocations */
1306	/* to be freed */
1307	for (j = fence->num_buffers; j > 0; j--) {
1308		this_fence_entry =
1309				(struct drm_kgsl_gem_object_fence_list_entry *)
1310				fence->buffers_in_fence.prev;
1311
1312		this_fence_entry->in_use = 0;
1313		obj = this_fence_entry->gem_obj;
1314		unlock_obj = obj->driver_private;
1315
1316		/* Delete it from the list */
1317
1318		list_del(&this_fence_entry->list);
1319
1320		/* we are unlocking - see if there are other pids waiting */
1321		if (check_waiting) {
1322			if (!list_empty(&unlock_obj->wait_list)) {
1323				lock_next =
1324				(struct drm_kgsl_gem_object_wait_list_entry *)
1325					unlock_obj->wait_list.prev;
1326
1327				list_del((struct list_head *)&lock_next->list);
1328
1329				unlock_obj->lockpid = 0;
1330				wake_up_interruptible(
1331						&lock_next->process_wait_q);
1332				lock_next->pid = 0;
1333
1334			} else {
1335				/* List is empty so set pid to 0 */
1336				unlock_obj->lockpid = 0;
1337			}
1338		}
1339
1340		drm_gem_object_unreference(obj);
1341	}
1342	/* here all the buffers in the fence are released */
1343	/* clear the fence entry */
1344	fence->fence_id = ENTRY_EMPTY;
1345}
1346
1347int
1348find_empty_fence(void)
1349{
1350	int i;
1351
1352	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1353		if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1354			gem_buf_fence[i].fence_id = fence_id++;
1355			gem_buf_fence[i].ts_valid = 0;
1356			INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1357			if (fence_id == 0xFFFFFFF0)
1358				fence_id = 1;
1359			return i;
1360		} else {
1361
1362			/* Look for entries to be cleaned up */
1363			if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1364				cleanup_fence(&gem_buf_fence[i], 0);
1365		}
1366	}
1367
1368	return ENTRY_EMPTY;
1369}
1370
1371int
1372find_fence(int index)
1373{
1374	int i;
1375
1376	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1377		if (gem_buf_fence[i].fence_id == index)
1378			return i;
1379	}
1380
1381	return ENTRY_EMPTY;
1382}
1383
1384void
1385wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1386{
1387    struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1388	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1389	struct drm_kgsl_gem_object *unlock_obj;
1390	struct drm_gem_object *obj;
1391
1392	/* TS has expired when we get here */
1393	fence->ts_valid = 0;
1394	fence->timestamp = -1;
1395	fence->ts_device = -1;
1396
1397	list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1398		obj = this_fence_entry->gem_obj;
1399		unlock_obj = obj->driver_private;
1400
1401		if (!list_empty(&unlock_obj->wait_list)) {
1402			lock_next =
1403				(struct drm_kgsl_gem_object_wait_list_entry *)
1404					unlock_obj->wait_list.prev;
1405
1406			/* Unblock the pid */
1407			lock_next->pid = 0;
1408
1409			/* Delete it from the list */
1410			list_del((struct list_head *)&lock_next->list);
1411
1412			unlock_obj->lockpid = 0;
1413			wake_up_interruptible(&lock_next->process_wait_q);
1414
1415		} else {
1416			/* List is empty so set pid to 0 */
1417			unlock_obj->lockpid = 0;
1418		}
1419	}
1420	fence->fence_id = ENTRY_NEEDS_CLEANUP;  /* Mark it as needing cleanup */
1421}
1422
1423static int kgsl_ts_notifier_cb(struct notifier_block *blk,
1424			       unsigned long code, void *_param)
1425{
1426	struct drm_kgsl_gem_object_fence *fence;
1427	struct kgsl_device *device = kgsl_get_device(code);
1428	int i;
1429
1430	/* loop through the fences to see what things can be processed */
1431
1432	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1433		fence = &gem_buf_fence[i];
1434		if (!fence->ts_valid || fence->ts_device != code)
1435			continue;
1436
1437		if (kgsl_check_timestamp(device, fence->timestamp))
1438			wakeup_fence_entries(fence);
1439	}
1440
1441	return 0;
1442}
1443
1444int
1445kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1446						   struct drm_file *file_priv)
1447{
1448	/* The purpose of this function is to lock a given set of handles. */
1449	/* The driver will maintain a list of locked handles. */
1450	/* If a request comes in for a handle that's locked the thread will */
1451	/* block until it's no longer in use. */
1452
1453	struct drm_kgsl_gem_lock_handles *args = data;
1454	struct drm_gem_object *obj;
1455	struct drm_kgsl_gem_object *priv;
1456	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1457	struct drm_kgsl_gem_object_fence *fence;
1458	struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1459	int i, j;
1460	int result = 0;
1461	uint32_t *lock_list;
1462	uint32_t *work_list = NULL;
1463	int32_t fence_index;
1464
1465	/* copy in the data from user space */
1466	lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1467	if (!lock_list) {
1468		DRM_ERROR("Unable allocate memory for lock list\n");
1469		result = -ENOMEM;
1470		goto error;
1471	}
1472
1473	if (copy_from_user(lock_list, args->handle_list,
1474			   sizeof(uint32_t) * args->num_handles)) {
1475		DRM_ERROR("Unable to copy the lock list from the user\n");
1476		result = -EFAULT;
1477		goto free_handle_list;
1478	}
1479
1480
1481	work_list = lock_list;
1482	mutex_lock(&dev->struct_mutex);
1483
1484	/* build the fence for this group of handles */
1485	fence_index = find_empty_fence();
1486	if (fence_index == ENTRY_EMPTY) {
1487		DRM_ERROR("Unable to find a empty fence\n");
1488		args->lock_id = 0xDEADBEEF;
1489		result = -EFAULT;
1490		goto out_unlock;
1491	}
1492
1493	fence = &gem_buf_fence[fence_index];
1494	gem_buf_fence[fence_index].num_buffers = args->num_handles;
1495	args->lock_id = gem_buf_fence[fence_index].fence_id;
1496
1497	for (j = args->num_handles; j > 0; j--, lock_list++) {
1498		obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1499
1500		if (obj == NULL) {
1501			DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1502			result = -EBADF;
1503			goto out_unlock;
1504		}
1505
1506		priv = obj->driver_private;
1507		this_fence_entry = NULL;
1508
1509		/* get a fence entry to hook into the fence */
1510		for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1511			if (!priv->fence_entries[i].in_use) {
1512				this_fence_entry = &priv->fence_entries[i];
1513				this_fence_entry->in_use = 1;
1514				break;
1515			}
1516		}
1517
1518		if (this_fence_entry == NULL) {
1519			fence->num_buffers = 0;
1520			fence->fence_id = ENTRY_EMPTY;
1521			args->lock_id = 0xDEADBEAD;
1522			result = -EFAULT;
1523			drm_gem_object_unreference(obj);
1524			goto out_unlock;
1525		}
1526
1527		/* We're trying to lock - add to a fence */
1528		list_add((struct list_head *)this_fence_entry,
1529				 &gem_buf_fence[fence_index].buffers_in_fence);
1530		if (priv->lockpid) {
1531
1532			if (priv->lockpid == args->pid) {
1533				/* now that things are running async this  */
1534				/* happens when an op isn't done */
1535				/* so it's already locked by the calling pid */
1536					continue;
1537			}
1538
1539
1540			/* if a pid already had it locked */
1541			/* create and add to wait list */
1542			for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1543				if (priv->wait_entries[i].in_use == 0) {
1544					/* this one is empty */
1545					lock_item = &priv->wait_entries[i];
1546				    lock_item->in_use = 1;
1547					lock_item->pid = args->pid;
1548					INIT_LIST_HEAD((struct list_head *)
1549						&priv->wait_entries[i]);
1550					break;
1551				}
1552			}
1553
1554			if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1555
1556				result =  -EFAULT;
1557				drm_gem_object_unreference(obj);
1558				goto out_unlock;
1559			}
1560
1561			list_add_tail((struct list_head *)&lock_item->list,
1562							&priv->wait_list);
1563			mutex_unlock(&dev->struct_mutex);
1564			/* here we need to block */
1565			wait_event_interruptible_timeout(
1566					priv->wait_entries[i].process_wait_q,
1567					(priv->lockpid == 0),
1568					msecs_to_jiffies(64));
1569			mutex_lock(&dev->struct_mutex);
1570			lock_item->in_use = 0;
1571		}
1572
1573		/* Getting here means no one currently holds the lock */
1574		priv->lockpid = args->pid;
1575
1576		args->lock_id = gem_buf_fence[fence_index].fence_id;
1577	}
1578	fence->lockpid = args->pid;
1579
1580out_unlock:
1581	mutex_unlock(&dev->struct_mutex);
1582
1583free_handle_list:
1584	kfree(work_list);
1585
1586error:
1587	return result;
1588}
1589
1590int
1591kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1592			 struct drm_file *file_priv)
1593{
1594	struct drm_kgsl_gem_unlock_handles *args = data;
1595	int result = 0;
1596	int32_t fence_index;
1597
1598	mutex_lock(&dev->struct_mutex);
1599	fence_index = find_fence(args->lock_id);
1600	if (fence_index == ENTRY_EMPTY) {
1601		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1602		result = -EFAULT;
1603		goto out_unlock;
1604	}
1605
1606	cleanup_fence(&gem_buf_fence[fence_index], 1);
1607
1608out_unlock:
1609	mutex_unlock(&dev->struct_mutex);
1610
1611	return result;
1612}
1613
1614
1615int
1616kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1617			struct drm_file *file_priv)
1618{
1619	struct drm_kgsl_gem_unlock_on_ts *args = data;
1620	int result = 0;
1621	int ts_done = 0;
1622	int32_t fence_index, ts_device;
1623	struct drm_kgsl_gem_object_fence *fence;
1624	struct kgsl_device *device;
1625
1626	if (args->type == DRM_KGSL_GEM_TS_3D)
1627		ts_device = KGSL_DEVICE_YAMATO;
1628	else if (args->type == DRM_KGSL_GEM_TS_2D)
1629		ts_device = KGSL_DEVICE_2D0;
1630	else {
1631		result = -EINVAL;
1632		goto error;
1633	}
1634
1635	device = kgsl_get_device(ts_device);
1636	ts_done = kgsl_check_timestamp(device, args->timestamp);
1637
1638	mutex_lock(&dev->struct_mutex);
1639
1640	fence_index = find_fence(args->lock_id);
1641	if (fence_index == ENTRY_EMPTY) {
1642		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1643		result = -EFAULT;
1644		goto out_unlock;
1645	}
1646
1647	fence = &gem_buf_fence[fence_index];
1648	fence->ts_device = ts_device;
1649
1650	if (!ts_done)
1651		fence->ts_valid = 1;
1652	else
1653		cleanup_fence(fence, 1);
1654
1655
1656out_unlock:
1657	mutex_unlock(&dev->struct_mutex);
1658
1659error:
1660	return result;
1661}
1662
1663struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1664	DRM_IOCTL_DEF(DRM_KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1665	DRM_IOCTL_DEF(DRM_KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1666	DRM_IOCTL_DEF(DRM_KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1667	DRM_IOCTL_DEF(DRM_KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1668	DRM_IOCTL_DEF(DRM_KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1669	DRM_IOCTL_DEF(DRM_KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1670	DRM_IOCTL_DEF(DRM_KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1671	DRM_IOCTL_DEF(DRM_KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1672	DRM_IOCTL_DEF(DRM_KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1673	DRM_IOCTL_DEF(DRM_KGSL_GEM_SET_BUFCOUNT,
1674		      kgsl_gem_set_bufcount_ioctl, 0),
1675	DRM_IOCTL_DEF(DRM_KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1676	DRM_IOCTL_DEF(DRM_KGSL_GEM_LOCK_HANDLE,
1677				  kgsl_gem_lock_handle_ioctl, 0),
1678	DRM_IOCTL_DEF(DRM_KGSL_GEM_UNLOCK_HANDLE,
1679				  kgsl_gem_unlock_handle_ioctl, 0),
1680	DRM_IOCTL_DEF(DRM_KGSL_GEM_UNLOCK_ON_TS,
1681				  kgsl_gem_unlock_on_ts_ioctl, 0),
1682	DRM_IOCTL_DEF(DRM_KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1683		      DRM_MASTER),
1684};
1685
1686static struct drm_driver driver = {
1687	.driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
1688	.load = kgsl_drm_load,
1689	.unload = kgsl_drm_unload,
1690	.firstopen = kgsl_drm_firstopen,
1691	.lastclose = kgsl_drm_lastclose,
1692	.preclose = kgsl_drm_preclose,
1693	.suspend = kgsl_drm_suspend,
1694	.resume = kgsl_drm_resume,
1695	.reclaim_buffers = drm_core_reclaim_buffers,
1696	.get_map_ofs = drm_core_get_map_ofs,
1697	.get_reg_ofs = drm_core_get_reg_ofs,
1698	.gem_init_object = kgsl_gem_init_object,
1699	.gem_free_object = kgsl_gem_free_object,
1700	.ioctls = kgsl_drm_ioctls,
1701
1702	.fops = {
1703		 .owner = THIS_MODULE,
1704		 .open = drm_open,
1705		 .release = drm_release,
1706		 .unlocked_ioctl = drm_ioctl,
1707		 .mmap = msm_drm_gem_mmap,
1708		 .poll = drm_poll,
1709		 .fasync = drm_fasync,
1710		 },
1711
1712	.name = DRIVER_NAME,
1713	.desc = DRIVER_DESC,
1714	.date = DRIVER_DATE,
1715	.major = DRIVER_MAJOR,
1716	.minor = DRIVER_MINOR,
1717	.patchlevel = DRIVER_PATCHLEVEL,
1718};
1719
1720int kgsl_drm_init(struct platform_device *dev)
1721{
1722	int i;
1723
1724	driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
1725	driver.platform_device = dev;
1726
1727	INIT_LIST_HEAD(&kgsl_mem_list);
1728
1729	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1730		gem_buf_fence[i].num_buffers = 0;
1731		gem_buf_fence[i].ts_valid = 0;
1732		gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1733	}
1734
1735	return drm_init(&driver);
1736}
1737
1738void kgsl_drm_exit(void)
1739{
1740	drm_exit(&driver);
1741}