/drivers/gpu/msm/kgsl_drm.c

https://gitlab.com/TeamTators/hp-kernel-tenderloin · C · 1741 lines · 1290 code · 354 blank · 97 comment · 219 complexity · 4dbcbdf358965cf4f6f41aaeb5497052 MD5 · raw file

  1. /* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15. * 02110-1301, USA.
  16. */
  17. /* Implements an interface between KGSL and the DRM subsystem. For now this
  18. * is pretty simple, but it will take on more of the workload as time goes
  19. * on
  20. */
  21. #include "drmP.h"
  22. #include "drm.h"
  23. #include <linux/android_pmem.h>
  24. #include <linux/notifier.h>
  25. #include "kgsl.h"
  26. #include "kgsl_device.h"
  27. #include "kgsl_drawctxt.h"
  28. #include "kgsl_drm.h"
  29. #include "kgsl_mmu.h"
  30. #include "kgsl_yamato.h"
  31. #include "kgsl_sharedmem.h"
  32. #define DRIVER_AUTHOR "Qualcomm"
  33. #define DRIVER_NAME "kgsl"
  34. #define DRIVER_DESC "KGSL DRM"
  35. #define DRIVER_DATE "20100127"
  36. #define DRIVER_MAJOR 2
  37. #define DRIVER_MINOR 1
  38. #define DRIVER_PATCHLEVEL 1
  39. #define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
  40. #define ENTRY_EMPTY -1
  41. #define ENTRY_NEEDS_CLEANUP -2
  42. #define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
  43. #define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
  44. /* Returns true if the memory type is in PMEM */
  45. #ifdef CONFIG_KERNEL_PMEM_SMI_REGION
  46. #define TYPE_IS_PMEM(_t) \
  47. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  48. ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
  49. ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
  50. #else
  51. #define TYPE_IS_PMEM(_t) \
  52. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  53. ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
  54. #endif
  55. /* Returns true if the memory type is regular */
  56. #define TYPE_IS_MEM(_t) \
  57. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
  58. ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  59. ((_t) & DRM_KGSL_GEM_TYPE_MEM))
  60. #define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
  61. /* Returns true if KMEM region is uncached */
  62. #define IS_MEM_UNCACHED(_t) \
  63. ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  64. (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
  65. (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
  66. struct drm_kgsl_gem_object_wait_list_entry {
  67. struct list_head list;
  68. int pid;
  69. int in_use;
  70. wait_queue_head_t process_wait_q;
  71. };
  72. struct drm_kgsl_gem_object_fence {
  73. int32_t fence_id;
  74. unsigned int num_buffers;
  75. int ts_valid;
  76. unsigned int timestamp;
  77. int ts_device;
  78. int lockpid;
  79. struct list_head buffers_in_fence;
  80. };
  81. struct drm_kgsl_gem_object_fence_list_entry {
  82. struct list_head list;
  83. int in_use;
  84. struct drm_gem_object *gem_obj;
  85. };
  86. static int32_t fence_id = 0x1;
  87. static struct drm_kgsl_gem_object_fence
  88. gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
  89. struct drm_kgsl_gem_object {
  90. struct drm_gem_object *obj;
  91. uint32_t cpuaddr;
  92. uint32_t type;
  93. uint32_t size;
  94. struct kgsl_pagetable *pagetable;
  95. uint64_t mmap_offset;
  96. int bufcount;
  97. int flags;
  98. struct list_head list;
  99. int active;
  100. struct {
  101. uint32_t offset;
  102. uint32_t gpuaddr;
  103. } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
  104. int bound;
  105. int lockpid;
  106. /* Put these here to avoid allocing all the time */
  107. struct drm_kgsl_gem_object_wait_list_entry
  108. wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
  109. /* Each object can only appear in a single fence */
  110. struct drm_kgsl_gem_object_fence_list_entry
  111. fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
  112. struct list_head wait_list;
  113. };
  114. /* This is a global list of all the memory currently mapped in the MMU */
  115. static struct list_head kgsl_mem_list;
  116. static void kgsl_gem_mem_flush(void *addr,
  117. unsigned long size, uint32_t type, int op)
  118. {
  119. int flags = 0;
  120. switch (op) {
  121. case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
  122. if (type & (DRM_KGSL_GEM_CACHE_WBACK |
  123. DRM_KGSL_GEM_CACHE_WBACKWA))
  124. flags |= KGSL_MEMFLAGS_CACHE_CLEAN;
  125. break;
  126. case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
  127. if (type & (DRM_KGSL_GEM_CACHE_WBACK |
  128. DRM_KGSL_GEM_CACHE_WBACKWA |
  129. DRM_KGSL_GEM_CACHE_WTHROUGH))
  130. flags |= KGSL_MEMFLAGS_CACHE_INV;
  131. }
  132. if (!flags)
  133. return;
  134. if (TYPE_IS_PMEM(type) || type == DRM_KGSL_GEM_TYPE_FD_FBMEM) {
  135. flags |= KGSL_MEMFLAGS_CONPHYS;
  136. addr = __va(addr);
  137. }
  138. else if (TYPE_IS_MEM(type))
  139. flags |= KGSL_MEMFLAGS_VMALLOC_MEM;
  140. else
  141. return;
  142. kgsl_cache_range_op((unsigned long) addr, size, flags);
  143. }
  144. /* Flush all the memory mapped in the MMU */
  145. void kgsl_gpu_mem_flush(int op)
  146. {
  147. struct drm_kgsl_gem_object *entry;
  148. int index;
  149. list_for_each_entry(entry, &kgsl_mem_list, list) {
  150. for (index = 0;
  151. entry->cpuaddr && (index < entry->bufcount); index++)
  152. kgsl_gem_mem_flush((void *)(entry->cpuaddr +
  153. entry->bufs[index].offset),
  154. entry->size, entry->type, op);
  155. }
  156. /* Takes care of WT/WC case.
  157. * More useful when we go barrierless
  158. */
  159. dmb();
  160. }
  161. /* TODO:
  162. * Add vsync wait */
  163. static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
  164. {
  165. return 0;
  166. }
  167. static int kgsl_drm_unload(struct drm_device *dev)
  168. {
  169. return 0;
  170. }
  171. struct kgsl_drm_device_priv {
  172. struct kgsl_device *device[KGSL_DEVICE_MAX];
  173. struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
  174. };
  175. static int kgsl_ts_notifier_cb(struct notifier_block *blk,
  176. unsigned long code, void *_param);
  177. static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
  178. static int kgsl_drm_firstopen(struct drm_device *dev)
  179. {
  180. int i;
  181. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  182. struct kgsl_device *device = kgsl_get_device(i);
  183. if (device == NULL)
  184. continue;
  185. kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
  186. kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
  187. }
  188. return 0;
  189. }
  190. void kgsl_drm_lastclose(struct drm_device *dev)
  191. {
  192. int i;
  193. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  194. struct kgsl_device *device = kgsl_get_device(i);
  195. if (device == NULL)
  196. continue;
  197. kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
  198. }
  199. }
  200. void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
  201. {
  202. }
  203. static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
  204. {
  205. return 0;
  206. }
  207. static int kgsl_drm_resume(struct drm_device *dev)
  208. {
  209. return 0;
  210. }
  211. static void
  212. kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
  213. {
  214. struct drm_device *dev = obj->dev;
  215. struct drm_gem_mm *mm = dev->mm_private;
  216. struct drm_kgsl_gem_object *priv = obj->driver_private;
  217. struct drm_map_list *list;
  218. list = &obj->map_list;
  219. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  220. if (list->file_offset_node) {
  221. drm_mm_put_block(list->file_offset_node);
  222. list->file_offset_node = NULL;
  223. }
  224. kfree(list->map);
  225. list->map = NULL;
  226. priv->mmap_offset = 0;
  227. }
  228. static int
  229. kgsl_gem_memory_allocated(struct drm_gem_object *obj)
  230. {
  231. struct drm_kgsl_gem_object *priv = obj->driver_private;
  232. return priv->cpuaddr ? 1 : 0;
  233. }
  234. static int
  235. kgsl_gem_alloc_memory(struct drm_gem_object *obj)
  236. {
  237. struct drm_kgsl_gem_object *priv = obj->driver_private;
  238. int index;
  239. /* Return if the memory is already allocated */
  240. if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
  241. return 0;
  242. if (TYPE_IS_PMEM(priv->type)) {
  243. int type;
  244. if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
  245. priv->type & DRM_KGSL_GEM_PMEM_EBI)
  246. type = PMEM_MEMTYPE_EBI1;
  247. else
  248. type = PMEM_MEMTYPE_SMI;
  249. priv->cpuaddr = pmem_kalloc(obj->size * priv->bufcount,
  250. type | PMEM_ALIGNMENT_4K);
  251. if (IS_ERR((void *) priv->cpuaddr)) {
  252. DRM_ERROR("Unable to allocate PMEM memory\n");
  253. priv->cpuaddr = 0;
  254. return -ENOMEM;
  255. }
  256. } else if (TYPE_IS_MEM(priv->type)) {
  257. priv->cpuaddr = (uint32_t) vmalloc_user(obj->size *
  258. priv->bufcount);
  259. if (priv->cpuaddr == 0) {
  260. DRM_ERROR("Unable to allocate vmalloc memory\n");
  261. return -ENOMEM;
  262. }
  263. } else
  264. return -EINVAL;
  265. for (index = 0; index < priv->bufcount; index++)
  266. priv->bufs[index].offset = index * obj->size;
  267. return 0;
  268. }
  269. #ifdef CONFIG_MSM_KGSL_MMU
  270. static void
  271. kgsl_gem_unmap(struct drm_gem_object *obj)
  272. {
  273. struct drm_kgsl_gem_object *priv = obj->driver_private;
  274. int index;
  275. if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
  276. return;
  277. for (index = 0; index < DRM_KGSL_GEM_MAX_BUFFERS; index++) {
  278. if (!priv->bufs[index].gpuaddr)
  279. continue;
  280. kgsl_mmu_unmap(priv->pagetable,
  281. priv->bufs[index].gpuaddr,
  282. obj->size);
  283. priv->bufs[index].gpuaddr = 0;
  284. }
  285. kgsl_mmu_putpagetable(priv->pagetable);
  286. priv->pagetable = NULL;
  287. if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
  288. (priv->type & DRM_KGSL_GEM_CACHE_MASK))
  289. list_del(&priv->list);
  290. priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
  291. }
  292. #else
  293. static void
  294. kgsl_gem_unmap(struct drm_gem_object *obj)
  295. {
  296. }
  297. #endif
  298. static void
  299. kgsl_gem_free_memory(struct drm_gem_object *obj)
  300. {
  301. struct drm_kgsl_gem_object *priv = obj->driver_private;
  302. int index;
  303. if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
  304. return;
  305. /* invalidate cached region before releasing */
  306. kgsl_gem_mem_flush((void *)priv->cpuaddr, priv->size,
  307. priv->type, DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
  308. kgsl_gem_unmap(obj);
  309. if (TYPE_IS_PMEM(priv->type))
  310. pmem_kfree(priv->cpuaddr);
  311. else if (TYPE_IS_MEM(priv->type))
  312. vfree((void *) priv->cpuaddr);
  313. priv->cpuaddr = 0;
  314. for (index = 0; index < DRM_KGSL_GEM_MAX_BUFFERS; index++)
  315. priv->bufs[index].offset = 0;
  316. }
  317. int
  318. kgsl_gem_init_object(struct drm_gem_object *obj)
  319. {
  320. struct drm_kgsl_gem_object *priv;
  321. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  322. if (priv == NULL) {
  323. DRM_ERROR("Unable to create GEM object\n");
  324. return -ENOMEM;
  325. }
  326. obj->driver_private = priv;
  327. priv->obj = obj;
  328. return 0;
  329. }
  330. void
  331. kgsl_gem_free_object(struct drm_gem_object *obj)
  332. {
  333. kgsl_gem_free_memory(obj);
  334. kgsl_gem_free_mmap_offset(obj);
  335. drm_gem_object_release(obj);
  336. kfree(obj->driver_private);
  337. }
  338. static int
  339. kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
  340. {
  341. struct drm_device *dev = obj->dev;
  342. struct drm_gem_mm *mm = dev->mm_private;
  343. struct drm_kgsl_gem_object *priv = obj->driver_private;
  344. struct drm_map_list *list;
  345. int msize;
  346. list = &obj->map_list;
  347. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  348. if (list->map == NULL) {
  349. DRM_ERROR("Unable to allocate drm_map_list\n");
  350. return -ENOMEM;
  351. }
  352. msize = obj->size * priv->bufcount;
  353. list->map->type = _DRM_GEM;
  354. list->map->size = msize;
  355. list->map->handle = obj;
  356. /* Allocate a mmap offset */
  357. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  358. msize / PAGE_SIZE,
  359. 0, 0);
  360. if (!list->file_offset_node) {
  361. DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
  362. kfree(list->map);
  363. return -ENOMEM;
  364. }
  365. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  366. msize / PAGE_SIZE, 0);
  367. if (!list->file_offset_node) {
  368. DRM_ERROR("Unable to create the file_offset_node\n");
  369. kfree(list->map);
  370. return -ENOMEM;
  371. }
  372. list->hash.key = list->file_offset_node->start;
  373. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  374. DRM_ERROR("Failed to add to map hash\n");
  375. drm_mm_put_block(list->file_offset_node);
  376. kfree(list->map);
  377. return -ENOMEM;
  378. }
  379. priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  380. return 0;
  381. }
  382. int
  383. kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
  384. unsigned long *len)
  385. {
  386. struct file *filp;
  387. struct drm_device *dev;
  388. struct drm_file *file_priv;
  389. struct drm_gem_object *obj;
  390. struct drm_kgsl_gem_object *priv;
  391. int ret = 0;
  392. filp = fget(drm_fd);
  393. if (unlikely(filp == NULL)) {
  394. DRM_ERROR("Unable to ghet the DRM file descriptor\n");
  395. return -EINVAL;
  396. }
  397. file_priv = filp->private_data;
  398. if (unlikely(file_priv == NULL)) {
  399. DRM_ERROR("Unable to get the file private data\n");
  400. fput(filp);
  401. return -EINVAL;
  402. }
  403. dev = file_priv->minor->dev;
  404. if (unlikely(dev == NULL)) {
  405. DRM_ERROR("Unable to get the minor device\n");
  406. fput(filp);
  407. return -EINVAL;
  408. }
  409. obj = drm_gem_object_lookup(dev, file_priv, handle);
  410. if (unlikely(obj == NULL)) {
  411. DRM_ERROR("Invalid GEM handle %x\n", handle);
  412. fput(filp);
  413. return -EBADF;
  414. }
  415. mutex_lock(&dev->struct_mutex);
  416. priv = obj->driver_private;
  417. /* We can only use the MDP for PMEM regions */
  418. if (priv->cpuaddr && TYPE_IS_PMEM(priv->type)) {
  419. /* Return the address for the currently active buffer */
  420. *start = priv->cpuaddr + priv->bufs[priv->active].offset;
  421. /* priv->mmap_offset is used for virt addr */
  422. *len = obj->size;
  423. /* flush cached obj */
  424. kgsl_gem_mem_flush((void *)*start, *len, priv->type,
  425. DRM_KGSL_GEM_CACHE_OP_TO_DEV);
  426. } else {
  427. *start = 0;
  428. *len = 0;
  429. ret = -EINVAL;
  430. }
  431. drm_gem_object_unreference(obj);
  432. mutex_unlock(&dev->struct_mutex);
  433. fput(filp);
  434. return ret;
  435. }
  436. static int
  437. kgsl_gem_init_obj(struct drm_device *dev,
  438. struct drm_file *file_priv,
  439. struct drm_gem_object *obj,
  440. int *handle)
  441. {
  442. struct drm_kgsl_gem_object *priv;
  443. int ret, i;
  444. mutex_lock(&dev->struct_mutex);
  445. priv = obj->driver_private;
  446. priv->cpuaddr = 0;
  447. priv->size = obj->size;
  448. priv->bufcount = 1;
  449. priv->active = 0;
  450. priv->bound = 0;
  451. /* To preserve backwards compatability, the default memory source
  452. is EBI */
  453. priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
  454. ret = drm_gem_handle_create(file_priv, obj, handle);
  455. drm_gem_object_handle_unreference(obj);
  456. INIT_LIST_HEAD(&priv->wait_list);
  457. for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
  458. INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
  459. priv->wait_entries[i].pid = 0;
  460. init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
  461. }
  462. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  463. INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
  464. priv->fence_entries[i].in_use = 0;
  465. priv->fence_entries[i].gem_obj = obj;
  466. }
  467. mutex_unlock(&dev->struct_mutex);
  468. return ret;
  469. }
  470. int
  471. kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
  472. struct drm_file *file_priv)
  473. {
  474. struct drm_kgsl_gem_create *create = data;
  475. struct drm_gem_object *obj;
  476. int ret, handle;
  477. /* Page align the size so we can allocate multiple buffers */
  478. create->size = ALIGN(create->size, 4096);
  479. obj = drm_gem_object_alloc(dev, create->size);
  480. if (obj == NULL) {
  481. DRM_ERROR("Unable to allocate the GEM object\n");
  482. return -ENOMEM;
  483. }
  484. ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
  485. if (ret)
  486. return ret;
  487. create->handle = handle;
  488. return 0;
  489. }
  490. int
  491. kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
  492. struct drm_file *file_priv)
  493. {
  494. struct drm_kgsl_gem_create_fd *args = data;
  495. struct file *file;
  496. dev_t rdev;
  497. struct fb_info *info;
  498. struct drm_gem_object *obj;
  499. struct drm_kgsl_gem_object *priv;
  500. int ret, put_needed, handle;
  501. file = fget_light(args->fd, &put_needed);
  502. if (file == NULL) {
  503. DRM_ERROR("Unable to get the file object\n");
  504. return -EBADF;
  505. }
  506. rdev = file->f_dentry->d_inode->i_rdev;
  507. /* Only framebuffer objects are supported ATM */
  508. if (MAJOR(rdev) != FB_MAJOR) {
  509. DRM_ERROR("File descriptor is not a framebuffer\n");
  510. ret = -EBADF;
  511. goto error_fput;
  512. }
  513. info = registered_fb[MINOR(rdev)];
  514. if (info == NULL) {
  515. DRM_ERROR("Framebuffer minor %d is not registered\n",
  516. MINOR(rdev));
  517. ret = -EBADF;
  518. goto error_fput;
  519. }
  520. obj = drm_gem_object_alloc(dev, info->fix.smem_len);
  521. if (obj == NULL) {
  522. DRM_ERROR("Unable to allocate GEM object\n");
  523. ret = -ENOMEM;
  524. goto error_fput;
  525. }
  526. ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
  527. if (ret)
  528. goto error_fput;
  529. mutex_lock(&dev->struct_mutex);
  530. priv = obj->driver_private;
  531. priv->cpuaddr = info->fix.smem_start;
  532. priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
  533. mutex_unlock(&dev->struct_mutex);
  534. args->handle = handle;
  535. error_fput:
  536. fput_light(file, put_needed);
  537. return ret;
  538. }
  539. int
  540. kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
  541. struct drm_file *file_priv)
  542. {
  543. struct drm_kgsl_gem_memtype *args = data;
  544. struct drm_gem_object *obj;
  545. struct drm_kgsl_gem_object *priv;
  546. int ret = 0;
  547. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  548. if (obj == NULL) {
  549. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  550. return -EBADF;
  551. }
  552. mutex_lock(&dev->struct_mutex);
  553. priv = obj->driver_private;
  554. if (TYPE_IS_FD(priv->type))
  555. ret = -EINVAL;
  556. else {
  557. if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
  558. priv->type = args->type;
  559. else
  560. ret = -EINVAL;
  561. }
  562. drm_gem_object_unreference(obj);
  563. mutex_unlock(&dev->struct_mutex);
  564. return ret;
  565. }
  566. int
  567. kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
  568. struct drm_file *file_priv)
  569. {
  570. struct drm_kgsl_gem_memtype *args = data;
  571. struct drm_gem_object *obj;
  572. struct drm_kgsl_gem_object *priv;
  573. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  574. if (obj == NULL) {
  575. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  576. return -EBADF;
  577. }
  578. mutex_lock(&dev->struct_mutex);
  579. priv = obj->driver_private;
  580. args->type = priv->type;
  581. drm_gem_object_unreference(obj);
  582. mutex_unlock(&dev->struct_mutex);
  583. return 0;
  584. }
  585. int
  586. kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
  587. struct drm_file *file_priv)
  588. {
  589. struct drm_kgsl_gem_bind_gpu *args = data;
  590. struct drm_gem_object *obj;
  591. struct drm_kgsl_gem_object *priv;
  592. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  593. if (obj == NULL) {
  594. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  595. return -EBADF;
  596. }
  597. mutex_lock(&dev->struct_mutex);
  598. priv = obj->driver_private;
  599. if (--priv->bound == 0)
  600. kgsl_gem_unmap(obj);
  601. drm_gem_object_unreference(obj);
  602. mutex_unlock(&dev->struct_mutex);
  603. return 0;
  604. }
  605. #ifdef CONFIG_MSM_KGSL_MMU
  606. static int
  607. kgsl_gem_map(struct drm_gem_object *obj)
  608. {
  609. struct drm_kgsl_gem_object *priv = obj->driver_private;
  610. int index;
  611. int ret = -EINVAL;
  612. int flags = KGSL_MEMFLAGS_CONPHYS;
  613. if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
  614. return 0;
  615. if (TYPE_IS_PMEM(priv->type) ||
  616. priv->type == DRM_KGSL_GEM_TYPE_FD_FBMEM)
  617. flags = KGSL_MEMFLAGS_CONPHYS;
  618. else
  619. flags = KGSL_MEMFLAGS_VMALLOC_MEM;
  620. /* Get the global page table */
  621. if (priv->pagetable == NULL) {
  622. struct kgsl_device *kgsldev =
  623. kgsl_get_device(KGSL_DEVICE_YAMATO);
  624. struct kgsl_mmu *mmu = kgsl_get_mmu(kgsldev);
  625. if (mmu == NULL) {
  626. DRM_ERROR("The GPU MMU is not enabled\n");
  627. return -EINVAL;
  628. }
  629. priv->pagetable =
  630. kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
  631. if (priv->pagetable == NULL) {
  632. DRM_ERROR("Unable to get the GPU MMU pagetable\n");
  633. return -EINVAL;
  634. }
  635. }
  636. for (index = 0; index < priv->bufcount; index++) {
  637. ret = kgsl_mmu_map(priv->pagetable,
  638. (unsigned long) priv->cpuaddr +
  639. priv->bufs[index].offset,
  640. obj->size,
  641. GSL_PT_PAGE_RV | GSL_PT_PAGE_WV,
  642. &priv->bufs[index].gpuaddr,
  643. flags | KGSL_MEMFLAGS_ALIGN4K);
  644. }
  645. /* Add cached memory to the list to be cached */
  646. if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
  647. priv->type & DRM_KGSL_GEM_CACHE_MASK)
  648. list_add(&priv->list, &kgsl_mem_list);
  649. priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
  650. return ret;
  651. }
  652. #else
  653. static int
  654. kgsl_gem_map(struct drm_gem_object *obj)
  655. {
  656. if (TYPE_IS_PMEM(priv->type)) {
  657. for (index = 0; index < priv->bufcount; index++)
  658. priv->bufs[index].gpuaddr =
  659. priv->cpuaddr + priv->bufs[index].offset;
  660. return 0;
  661. }
  662. return -EINVAL;
  663. }
  664. #endif
  665. int
  666. kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
  667. struct drm_file *file_priv)
  668. {
  669. struct drm_kgsl_gem_bind_gpu *args = data;
  670. struct drm_gem_object *obj;
  671. struct drm_kgsl_gem_object *priv;
  672. int ret = 0;
  673. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  674. if (obj == NULL) {
  675. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  676. return -EBADF;
  677. }
  678. mutex_lock(&dev->struct_mutex);
  679. priv = obj->driver_private;
  680. if (priv->bound++ == 0) {
  681. if (!kgsl_gem_memory_allocated(obj)) {
  682. DRM_ERROR("Memory not allocated for this object\n");
  683. ret = -ENOMEM;
  684. goto out;
  685. }
  686. ret = kgsl_gem_map(obj);
  687. /* This is legacy behavior - use GET_BUFFERINFO instead */
  688. args->gpuptr = priv->bufs[0].gpuaddr;
  689. }
  690. out:
  691. drm_gem_object_unreference(obj);
  692. mutex_unlock(&dev->struct_mutex);
  693. return ret;
  694. }
  695. /* Allocate the memory and prepare it for CPU mapping */
  696. int
  697. kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
  698. struct drm_file *file_priv)
  699. {
  700. struct drm_kgsl_gem_alloc *args = data;
  701. struct drm_gem_object *obj;
  702. struct drm_kgsl_gem_object *priv;
  703. int ret;
  704. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  705. if (obj == NULL) {
  706. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  707. return -EBADF;
  708. }
  709. mutex_lock(&dev->struct_mutex);
  710. priv = obj->driver_private;
  711. ret = kgsl_gem_alloc_memory(obj);
  712. if (ret) {
  713. DRM_ERROR("Unable to allocate object memory\n");
  714. } else if (!priv->mmap_offset) {
  715. ret = kgsl_gem_create_mmap_offset(obj);
  716. if (ret)
  717. DRM_ERROR("Unable to create a mmap offset\n");
  718. }
  719. args->offset = priv->mmap_offset;
  720. drm_gem_object_unreference(obj);
  721. mutex_unlock(&dev->struct_mutex);
  722. return ret;
  723. }
  724. int
  725. kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
  726. struct drm_file *file_priv)
  727. {
  728. struct drm_kgsl_gem_mmap *args = data;
  729. struct drm_gem_object *obj;
  730. unsigned long addr;
  731. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  732. if (obj == NULL) {
  733. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  734. return -EBADF;
  735. }
  736. down_write(&current->mm->mmap_sem);
  737. addr = do_mmap(obj->filp, 0, args->size,
  738. PROT_READ | PROT_WRITE, MAP_SHARED,
  739. args->offset);
  740. up_write(&current->mm->mmap_sem);
  741. mutex_lock(&dev->struct_mutex);
  742. drm_gem_object_unreference(obj);
  743. mutex_unlock(&dev->struct_mutex);
  744. if (IS_ERR((void *) addr))
  745. return addr;
  746. args->hostptr = (uint32_t) addr;
  747. return 0;
  748. }
  749. /* This function is deprecated */
  750. int
  751. kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
  752. struct drm_file *file_priv)
  753. {
  754. struct drm_kgsl_gem_prep *args = data;
  755. struct drm_gem_object *obj;
  756. struct drm_kgsl_gem_object *priv;
  757. int ret;
  758. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  759. if (obj == NULL) {
  760. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  761. return -EBADF;
  762. }
  763. mutex_lock(&dev->struct_mutex);
  764. priv = obj->driver_private;
  765. ret = kgsl_gem_alloc_memory(obj);
  766. if (ret) {
  767. DRM_ERROR("Unable to allocate object memory\n");
  768. drm_gem_object_unreference(obj);
  769. mutex_unlock(&dev->struct_mutex);
  770. return ret;
  771. }
  772. if (priv->mmap_offset == 0) {
  773. ret = kgsl_gem_create_mmap_offset(obj);
  774. if (ret) {
  775. drm_gem_object_unreference(obj);
  776. mutex_unlock(&dev->struct_mutex);
  777. return ret;
  778. }
  779. }
  780. args->offset = priv->mmap_offset;
  781. args->phys = priv->cpuaddr;
  782. drm_gem_object_unreference(obj);
  783. mutex_unlock(&dev->struct_mutex);
  784. return 0;
  785. }
  786. int
  787. kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
  788. struct drm_file *file_priv)
  789. {
  790. struct drm_kgsl_gem_bufinfo *args = data;
  791. struct drm_gem_object *obj;
  792. struct drm_kgsl_gem_object *priv;
  793. int ret = -EINVAL;
  794. int index;
  795. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  796. if (obj == NULL) {
  797. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  798. return -EBADF;
  799. }
  800. mutex_lock(&dev->struct_mutex);
  801. priv = obj->driver_private;
  802. if (!kgsl_gem_memory_allocated(obj)) {
  803. DRM_ERROR("Memory not allocated for this object\n");
  804. goto out;
  805. }
  806. for (index = 0; index < priv->bufcount; index++) {
  807. args->offset[index] = priv->bufs[index].offset;
  808. args->gpuaddr[index] = priv->bufs[index].gpuaddr;
  809. }
  810. args->count = priv->bufcount;
  811. args->active = priv->active;
  812. ret = 0;
  813. out:
  814. drm_gem_object_unreference(obj);
  815. mutex_unlock(&dev->struct_mutex);
  816. return ret;
  817. }
  818. int
  819. kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
  820. struct drm_file *file_priv)
  821. {
  822. struct drm_kgsl_gem_bufcount *args = data;
  823. struct drm_gem_object *obj;
  824. struct drm_kgsl_gem_object *priv;
  825. int ret = -EINVAL;
  826. if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
  827. return -EINVAL;
  828. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  829. if (obj == NULL) {
  830. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  831. return -EBADF;
  832. }
  833. mutex_lock(&dev->struct_mutex);
  834. priv = obj->driver_private;
  835. /* It is too much math to worry about what happens if we are already
  836. allocated, so just bail if we are */
  837. if (kgsl_gem_memory_allocated(obj)) {
  838. DRM_ERROR("Memory already allocated - cannot change"
  839. "number of buffers\n");
  840. goto out;
  841. }
  842. priv->bufcount = args->bufcount;
  843. ret = 0;
  844. out:
  845. drm_gem_object_unreference(obj);
  846. mutex_unlock(&dev->struct_mutex);
  847. return ret;
  848. }
  849. int
  850. kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
  851. struct drm_file *file_priv)
  852. {
  853. struct drm_kgsl_gem_active *args = data;
  854. struct drm_gem_object *obj;
  855. struct drm_kgsl_gem_object *priv;
  856. int ret = -EINVAL;
  857. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  858. if (obj == NULL) {
  859. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  860. return -EBADF;
  861. }
  862. mutex_lock(&dev->struct_mutex);
  863. priv = obj->driver_private;
  864. if (args->active < 0 || args->active >= priv->bufcount) {
  865. DRM_ERROR("Invalid active buffer %d\n", args->active);
  866. goto out;
  867. }
  868. priv->active = args->active;
  869. ret = 0;
  870. out:
  871. drm_gem_object_unreference(obj);
  872. mutex_unlock(&dev->struct_mutex);
  873. return ret;
  874. }
  875. int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  876. {
  877. struct drm_gem_object *obj = vma->vm_private_data;
  878. struct drm_device *dev = obj->dev;
  879. struct drm_kgsl_gem_object *priv;
  880. unsigned long offset, pg;
  881. struct page *page;
  882. mutex_lock(&dev->struct_mutex);
  883. priv = obj->driver_private;
  884. offset = (unsigned long) vmf->virtual_address - vma->vm_start;
  885. pg = (unsigned long) priv->cpuaddr + offset;
  886. page = vmalloc_to_page((void *) pg);
  887. if (!page) {
  888. mutex_unlock(&dev->struct_mutex);
  889. return VM_FAULT_SIGBUS;
  890. }
  891. get_page(page);
  892. vmf->page = page;
  893. mutex_unlock(&dev->struct_mutex);
  894. return 0;
  895. }
  896. int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  897. {
  898. struct drm_gem_object *obj = vma->vm_private_data;
  899. struct drm_device *dev = obj->dev;
  900. struct drm_kgsl_gem_object *priv;
  901. unsigned long offset, pfn;
  902. int ret = 0;
  903. offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
  904. PAGE_SHIFT;
  905. mutex_lock(&dev->struct_mutex);
  906. priv = obj->driver_private;
  907. pfn = (priv->cpuaddr >> PAGE_SHIFT) + offset;
  908. ret = vm_insert_pfn(vma,
  909. (unsigned long) vmf->virtual_address, pfn);
  910. mutex_unlock(&dev->struct_mutex);
  911. switch (ret) {
  912. case -ENOMEM:
  913. case -EAGAIN:
  914. return VM_FAULT_OOM;
  915. case -EFAULT:
  916. return VM_FAULT_SIGBUS;
  917. default:
  918. return VM_FAULT_NOPAGE;
  919. }
  920. }
  921. static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
  922. .fault = kgsl_gem_kmem_fault,
  923. .open = drm_gem_vm_open,
  924. .close = drm_gem_vm_close,
  925. };
  926. static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
  927. .fault = kgsl_gem_phys_fault,
  928. .open = drm_gem_vm_open,
  929. .close = drm_gem_vm_close,
  930. };
  931. /* This is a clone of the standard drm_gem_mmap function modified to allow
  932. us to properly map KMEM regions as well as the PMEM regions */
  933. int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  934. {
  935. struct drm_file *priv = filp->private_data;
  936. struct drm_device *dev = priv->minor->dev;
  937. struct drm_gem_mm *mm = dev->mm_private;
  938. struct drm_local_map *map = NULL;
  939. struct drm_gem_object *obj;
  940. struct drm_hash_item *hash;
  941. struct drm_kgsl_gem_object *gpriv;
  942. int ret = 0;
  943. mutex_lock(&dev->struct_mutex);
  944. if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
  945. mutex_unlock(&dev->struct_mutex);
  946. return drm_mmap(filp, vma);
  947. }
  948. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  949. if (!map ||
  950. ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
  951. ret = -EPERM;
  952. goto out_unlock;
  953. }
  954. /* Check for valid size. */
  955. if (map->size < vma->vm_end - vma->vm_start) {
  956. ret = -EINVAL;
  957. goto out_unlock;
  958. }
  959. obj = map->handle;
  960. gpriv = obj->driver_private;
  961. /* VM_PFNMAP is only for memory that doesn't use struct page
  962. * in other words, not "normal" memory. If you try to use it
  963. * with "normal" memory then the mappings don't get flushed. */
  964. if (TYPE_IS_MEM(gpriv->type)) {
  965. vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
  966. vma->vm_ops = &kgsl_gem_kmem_vm_ops;
  967. } else {
  968. vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
  969. VM_DONTEXPAND;
  970. vma->vm_ops = &kgsl_gem_phys_vm_ops;
  971. }
  972. vma->vm_private_data = map->handle;
  973. /* Take care of requested caching policy */
  974. if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
  975. gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
  976. if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
  977. vma->vm_page_prot =
  978. pgprot_writebackwacache(vma->vm_page_prot);
  979. else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
  980. vma->vm_page_prot =
  981. pgprot_writebackcache(vma->vm_page_prot);
  982. else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
  983. vma->vm_page_prot =
  984. pgprot_writethroughcache(vma->vm_page_prot);
  985. else
  986. vma->vm_page_prot =
  987. pgprot_writecombine(vma->vm_page_prot);
  988. } else {
  989. if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
  990. vma->vm_page_prot =
  991. pgprot_noncached(vma->vm_page_prot);
  992. else
  993. /* default pmem is WC */
  994. vma->vm_page_prot =
  995. pgprot_writecombine(vma->vm_page_prot);
  996. }
  997. /* flush out existing KMEM cached mappings if new ones are
  998. * of uncached type */
  999. if (IS_MEM_UNCACHED(gpriv->type))
  1000. kgsl_cache_range_op((unsigned long) gpriv->cpuaddr,
  1001. (obj->size * gpriv->bufcount),
  1002. KGSL_MEMFLAGS_CACHE_FLUSH |
  1003. KGSL_MEMFLAGS_VMALLOC_MEM);
  1004. /* Add the other memory types here */
  1005. /* Take a ref for this mapping of the object, so that the fault
  1006. * handler can dereference the mmap offset's pointer to the object.
  1007. * This reference is cleaned up by the corresponding vm_close
  1008. * (which should happen whether the vma was created by this call, or
  1009. * by a vm_open due to mremap or partial unmap or whatever).
  1010. */
  1011. drm_gem_object_reference(obj);
  1012. vma->vm_file = filp; /* Needed for drm_vm_open() */
  1013. drm_vm_open_locked(vma);
  1014. out_unlock:
  1015. mutex_unlock(&dev->struct_mutex);
  1016. return ret;
  1017. }
  1018. void
  1019. cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
  1020. {
  1021. int j;
  1022. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1023. struct drm_kgsl_gem_object *unlock_obj;
  1024. struct drm_gem_object *obj;
  1025. struct drm_kgsl_gem_object_wait_list_entry *lock_next;
  1026. fence->ts_valid = 0;
  1027. fence->timestamp = -1;
  1028. fence->ts_device = -1;
  1029. /* Walk the list of buffers in this fence and clean up the */
  1030. /* references. Note that this can cause memory allocations */
  1031. /* to be freed */
  1032. for (j = fence->num_buffers; j > 0; j--) {
  1033. this_fence_entry =
  1034. (struct drm_kgsl_gem_object_fence_list_entry *)
  1035. fence->buffers_in_fence.prev;
  1036. this_fence_entry->in_use = 0;
  1037. obj = this_fence_entry->gem_obj;
  1038. unlock_obj = obj->driver_private;
  1039. /* Delete it from the list */
  1040. list_del(&this_fence_entry->list);
  1041. /* we are unlocking - see if there are other pids waiting */
  1042. if (check_waiting) {
  1043. if (!list_empty(&unlock_obj->wait_list)) {
  1044. lock_next =
  1045. (struct drm_kgsl_gem_object_wait_list_entry *)
  1046. unlock_obj->wait_list.prev;
  1047. list_del((struct list_head *)&lock_next->list);
  1048. unlock_obj->lockpid = 0;
  1049. wake_up_interruptible(
  1050. &lock_next->process_wait_q);
  1051. lock_next->pid = 0;
  1052. } else {
  1053. /* List is empty so set pid to 0 */
  1054. unlock_obj->lockpid = 0;
  1055. }
  1056. }
  1057. drm_gem_object_unreference(obj);
  1058. }
  1059. /* here all the buffers in the fence are released */
  1060. /* clear the fence entry */
  1061. fence->fence_id = ENTRY_EMPTY;
  1062. }
  1063. int
  1064. find_empty_fence(void)
  1065. {
  1066. int i;
  1067. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1068. if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
  1069. gem_buf_fence[i].fence_id = fence_id++;
  1070. gem_buf_fence[i].ts_valid = 0;
  1071. INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
  1072. if (fence_id == 0xFFFFFFF0)
  1073. fence_id = 1;
  1074. return i;
  1075. } else {
  1076. /* Look for entries to be cleaned up */
  1077. if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
  1078. cleanup_fence(&gem_buf_fence[i], 0);
  1079. }
  1080. }
  1081. return ENTRY_EMPTY;
  1082. }
  1083. int
  1084. find_fence(int index)
  1085. {
  1086. int i;
  1087. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1088. if (gem_buf_fence[i].fence_id == index)
  1089. return i;
  1090. }
  1091. return ENTRY_EMPTY;
  1092. }
  1093. void
  1094. wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
  1095. {
  1096. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1097. struct drm_kgsl_gem_object_wait_list_entry *lock_next;
  1098. struct drm_kgsl_gem_object *unlock_obj;
  1099. struct drm_gem_object *obj;
  1100. /* TS has expired when we get here */
  1101. fence->ts_valid = 0;
  1102. fence->timestamp = -1;
  1103. fence->ts_device = -1;
  1104. list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
  1105. obj = this_fence_entry->gem_obj;
  1106. unlock_obj = obj->driver_private;
  1107. if (!list_empty(&unlock_obj->wait_list)) {
  1108. lock_next =
  1109. (struct drm_kgsl_gem_object_wait_list_entry *)
  1110. unlock_obj->wait_list.prev;
  1111. /* Unblock the pid */
  1112. lock_next->pid = 0;
  1113. /* Delete it from the list */
  1114. list_del((struct list_head *)&lock_next->list);
  1115. unlock_obj->lockpid = 0;
  1116. wake_up_interruptible(&lock_next->process_wait_q);
  1117. } else {
  1118. /* List is empty so set pid to 0 */
  1119. unlock_obj->lockpid = 0;
  1120. }
  1121. }
  1122. fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
  1123. }
  1124. static int kgsl_ts_notifier_cb(struct notifier_block *blk,
  1125. unsigned long code, void *_param)
  1126. {
  1127. struct drm_kgsl_gem_object_fence *fence;
  1128. struct kgsl_device *device = kgsl_get_device(code);
  1129. int i;
  1130. /* loop through the fences to see what things can be processed */
  1131. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1132. fence = &gem_buf_fence[i];
  1133. if (!fence->ts_valid || fence->ts_device != code)
  1134. continue;
  1135. if (kgsl_check_timestamp(device, fence->timestamp))
  1136. wakeup_fence_entries(fence);
  1137. }
  1138. return 0;
  1139. }
  1140. int
  1141. kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
  1142. struct drm_file *file_priv)
  1143. {
  1144. /* The purpose of this function is to lock a given set of handles. */
  1145. /* The driver will maintain a list of locked handles. */
  1146. /* If a request comes in for a handle that's locked the thread will */
  1147. /* block until it's no longer in use. */
  1148. struct drm_kgsl_gem_lock_handles *args = data;
  1149. struct drm_gem_object *obj;
  1150. struct drm_kgsl_gem_object *priv;
  1151. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1152. struct drm_kgsl_gem_object_fence *fence;
  1153. struct drm_kgsl_gem_object_wait_list_entry *lock_item;
  1154. int i, j;
  1155. int result = 0;
  1156. uint32_t *lock_list;
  1157. uint32_t *work_list = NULL;
  1158. int32_t fence_index;
  1159. /* copy in the data from user space */
  1160. lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
  1161. if (!lock_list) {
  1162. DRM_ERROR("Unable allocate memory for lock list\n");
  1163. result = -ENOMEM;
  1164. goto error;
  1165. }
  1166. if (copy_from_user(lock_list, args->handle_list,
  1167. sizeof(uint32_t) * args->num_handles)) {
  1168. DRM_ERROR("Unable to copy the lock list from the user\n");
  1169. result = -EFAULT;
  1170. goto free_handle_list;
  1171. }
  1172. work_list = lock_list;
  1173. mutex_lock(&dev->struct_mutex);
  1174. /* build the fence for this group of handles */
  1175. fence_index = find_empty_fence();
  1176. if (fence_index == ENTRY_EMPTY) {
  1177. DRM_ERROR("Unable to find a empty fence\n");
  1178. args->lock_id = 0xDEADBEEF;
  1179. result = -EFAULT;
  1180. goto out_unlock;
  1181. }
  1182. fence = &gem_buf_fence[fence_index];
  1183. gem_buf_fence[fence_index].num_buffers = args->num_handles;
  1184. args->lock_id = gem_buf_fence[fence_index].fence_id;
  1185. for (j = args->num_handles; j > 0; j--, lock_list++) {
  1186. obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
  1187. if (obj == NULL) {
  1188. DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
  1189. result = -EBADF;
  1190. goto out_unlock;
  1191. }
  1192. priv = obj->driver_private;
  1193. this_fence_entry = NULL;
  1194. /* get a fence entry to hook into the fence */
  1195. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1196. if (!priv->fence_entries[i].in_use) {
  1197. this_fence_entry = &priv->fence_entries[i];
  1198. this_fence_entry->in_use = 1;
  1199. break;
  1200. }
  1201. }
  1202. if (this_fence_entry == NULL) {
  1203. fence->num_buffers = 0;
  1204. fence->fence_id = ENTRY_EMPTY;
  1205. args->lock_id = 0xDEADBEAD;
  1206. result = -EFAULT;
  1207. drm_gem_object_unreference(obj);
  1208. goto out_unlock;
  1209. }
  1210. /* We're trying to lock - add to a fence */
  1211. list_add((struct list_head *)this_fence_entry,
  1212. &gem_buf_fence[fence_index].buffers_in_fence);
  1213. if (priv->lockpid) {
  1214. if (priv->lockpid == args->pid) {
  1215. /* now that things are running async this */
  1216. /* happens when an op isn't done */
  1217. /* so it's already locked by the calling pid */
  1218. continue;
  1219. }
  1220. /* if a pid already had it locked */
  1221. /* create and add to wait list */
  1222. for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
  1223. if (priv->wait_entries[i].in_use == 0) {
  1224. /* this one is empty */
  1225. lock_item = &priv->wait_entries[i];
  1226. lock_item->in_use = 1;
  1227. lock_item->pid = args->pid;
  1228. INIT_LIST_HEAD((struct list_head *)
  1229. &priv->wait_entries[i]);
  1230. break;
  1231. }
  1232. }
  1233. if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
  1234. result = -EFAULT;
  1235. drm_gem_object_unreference(obj);
  1236. goto out_unlock;
  1237. }
  1238. list_add_tail((struct list_head *)&lock_item->list,
  1239. &priv->wait_list);
  1240. mutex_unlock(&dev->struct_mutex);
  1241. /* here we need to block */
  1242. wait_event_interruptible_timeout(
  1243. priv->wait_entries[i].process_wait_q,
  1244. (priv->lockpid == 0),
  1245. msecs_to_jiffies(64));
  1246. mutex_lock(&dev->struct_mutex);
  1247. lock_item->in_use = 0;
  1248. }
  1249. /* Getting here means no one currently holds the lock */
  1250. priv->lockpid = args->pid;
  1251. args->lock_id = gem_buf_fence[fence_index].fence_id;
  1252. }
  1253. fence->lockpid = args->pid;
  1254. out_unlock:
  1255. mutex_unlock(&dev->struct_mutex);
  1256. free_handle_list:
  1257. kfree(work_list);
  1258. error:
  1259. return result;
  1260. }
  1261. int
  1262. kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
  1263. struct drm_file *file_priv)
  1264. {
  1265. struct drm_kgsl_gem_unlock_handles *args = data;
  1266. int result = 0;
  1267. int32_t fence_index;
  1268. mutex_lock(&dev->struct_mutex);
  1269. fence_index = find_fence(args->lock_id);
  1270. if (fence_index == ENTRY_EMPTY) {
  1271. DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
  1272. result = -EFAULT;
  1273. goto out_unlock;
  1274. }
  1275. cleanup_fence(&gem_buf_fence[fence_index], 1);
  1276. out_unlock:
  1277. mutex_unlock(&dev->struct_mutex);
  1278. return result;
  1279. }
  1280. int
  1281. kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
  1282. struct drm_file *file_priv)
  1283. {
  1284. struct drm_kgsl_gem_unlock_on_ts *args = data;
  1285. int result = 0;
  1286. int ts_done = 0;
  1287. int32_t fence_index, ts_device;
  1288. struct drm_kgsl_gem_object_fence *fence;
  1289. struct kgsl_device *device;
  1290. if (args->type == DRM_KGSL_GEM_TS_3D)
  1291. ts_device = KGSL_DEVICE_YAMATO;
  1292. else if (args->type == DRM_KGSL_GEM_TS_2D)
  1293. ts_device = KGSL_DEVICE_2D0;
  1294. else {
  1295. result = -EINVAL;
  1296. goto error;
  1297. }
  1298. device = kgsl_get_device(ts_device);
  1299. ts_done = kgsl_check_timestamp(device, args->timestamp);
  1300. mutex_lock(&dev->struct_mutex);
  1301. fence_index = find_fence(args->lock_id);
  1302. if (fence_index == ENTRY_EMPTY) {
  1303. DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
  1304. result = -EFAULT;
  1305. goto out_unlock;
  1306. }
  1307. fence = &gem_buf_fence[fence_index];
  1308. fence->ts_device = ts_device;
  1309. if (!ts_done)
  1310. fence->ts_valid = 1;
  1311. else
  1312. cleanup_fence(fence, 1);
  1313. out_unlock:
  1314. mutex_unlock(&dev->struct_mutex);
  1315. error:
  1316. return result;
  1317. }
  1318. struct drm_ioctl_desc kgsl_drm_ioctls[] = {
  1319. DRM_IOCTL_DEF(DRM_KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
  1320. DRM_IOCTL_DEF(DRM_KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
  1321. DRM_IOCTL_DEF(DRM_KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
  1322. DRM_IOCTL_DEF(DRM_KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
  1323. DRM_IOCTL_DEF(DRM_KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
  1324. DRM_IOCTL_DEF(DRM_KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
  1325. DRM_IOCTL_DEF(DRM_KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
  1326. DRM_IOCTL_DEF(DRM_KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
  1327. DRM_IOCTL_DEF(DRM_KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
  1328. DRM_IOCTL_DEF(DRM_KGSL_GEM_SET_BUFCOUNT,
  1329. kgsl_gem_set_bufcount_ioctl, 0),
  1330. DRM_IOCTL_DEF(DRM_KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
  1331. DRM_IOCTL_DEF(DRM_KGSL_GEM_LOCK_HANDLE,
  1332. kgsl_gem_lock_handle_ioctl, 0),
  1333. DRM_IOCTL_DEF(DRM_KGSL_GEM_UNLOCK_HANDLE,
  1334. kgsl_gem_unlock_handle_ioctl, 0),
  1335. DRM_IOCTL_DEF(DRM_KGSL_GEM_UNLOCK_ON_TS,
  1336. kgsl_gem_unlock_on_ts_ioctl, 0),
  1337. DRM_IOCTL_DEF(DRM_KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
  1338. DRM_MASTER),
  1339. };
  1340. static struct drm_driver driver = {
  1341. .driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
  1342. .load = kgsl_drm_load,
  1343. .unload = kgsl_drm_unload,
  1344. .firstopen = kgsl_drm_firstopen,
  1345. .lastclose = kgsl_drm_lastclose,
  1346. .preclose = kgsl_drm_preclose,
  1347. .suspend = kgsl_drm_suspend,
  1348. .resume = kgsl_drm_resume,
  1349. .reclaim_buffers = drm_core_reclaim_buffers,
  1350. .get_map_ofs = drm_core_get_map_ofs,
  1351. .get_reg_ofs = drm_core_get_reg_ofs,
  1352. .gem_init_object = kgsl_gem_init_object,
  1353. .gem_free_object = kgsl_gem_free_object,
  1354. .ioctls = kgsl_drm_ioctls,
  1355. .fops = {
  1356. .owner = THIS_MODULE,
  1357. .open = drm_open,
  1358. .release = drm_release,
  1359. .unlocked_ioctl = drm_ioctl,
  1360. .mmap = msm_drm_gem_mmap,
  1361. .poll = drm_poll,
  1362. .fasync = drm_fasync,
  1363. },
  1364. .name = DRIVER_NAME,
  1365. .desc = DRIVER_DESC,
  1366. .date = DRIVER_DATE,
  1367. .major = DRIVER_MAJOR,
  1368. .minor = DRIVER_MINOR,
  1369. .patchlevel = DRIVER_PATCHLEVEL,
  1370. };
  1371. int kgsl_drm_init(struct platform_device *dev)
  1372. {
  1373. int i;
  1374. driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
  1375. driver.platform_device = dev;
  1376. INIT_LIST_HEAD(&kgsl_mem_list);
  1377. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1378. gem_buf_fence[i].num_buffers = 0;
  1379. gem_buf_fence[i].ts_valid = 0;
  1380. gem_buf_fence[i].fence_id = ENTRY_EMPTY;
  1381. }
  1382. return drm_init(&driver);
  1383. }
  1384. void kgsl_drm_exit(void)
  1385. {
  1386. drm_exit(&driver);
  1387. }