PageRenderTime 952ms CodeModel.GetById 25ms RepoModel.GetById 14ms app.codeStats 0ms

/drivers/gpu/ion/ion.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 1186 lines | 941 code | 151 blank | 94 comment | 129 complexity | f334c5f289f4de1c8a149e66a032bc2d MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * drivers/gpu/ion/ion.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/device.h>
  17. #include <linux/file.h>
  18. #include <linux/fs.h>
  19. #include <linux/anon_inodes.h>
  20. #include <linux/ion.h>
  21. #include <linux/list.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/mm.h>
  24. #include <linux/mm_types.h>
  25. #include <linux/rbtree.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/debugfs.h>
  31. #include "ion_priv.h"
  32. #define DEBUG
  33. /**
  34. * struct ion_device - the metadata of the ion device node
  35. * @dev: the actual misc device
  36. * @buffers: an rb tree of all the existing buffers
  37. * @lock: lock protecting the buffers & heaps trees
  38. * @heaps: list of all the heaps in the system
  39. * @user_clients: list of all the clients created from userspace
  40. */
  41. struct ion_device {
  42. struct miscdevice dev;
  43. struct rb_root buffers;
  44. struct mutex lock;
  45. struct rb_root heaps;
  46. long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
  47. unsigned long arg);
  48. struct rb_root user_clients;
  49. struct rb_root kernel_clients;
  50. struct dentry *debug_root;
  51. };
  52. /**
  53. * struct ion_client - a process/hw block local address space
  54. * @ref: for reference counting the client
  55. * @node: node in the tree of all clients
  56. * @dev: backpointer to ion device
  57. * @handles: an rb tree of all the handles in this client
  58. * @lock: lock protecting the tree of handles
  59. * @heap_mask: mask of all supported heaps
  60. * @name: used for debugging
  61. * @task: used for debugging
  62. *
  63. * A client represents a list of buffers this client may access.
  64. * The mutex stored here is used to protect both handles tree
  65. * as well as the handles themselves, and should be held while modifying either.
  66. */
  67. struct ion_client {
  68. struct kref ref;
  69. struct rb_node node;
  70. struct ion_device *dev;
  71. struct rb_root handles;
  72. struct mutex lock;
  73. unsigned int heap_mask;
  74. const char *name;
  75. struct task_struct *task;
  76. pid_t pid;
  77. struct dentry *debug_root;
  78. };
  79. /**
  80. * ion_handle - a client local reference to a buffer
  81. * @ref: reference count
  82. * @client: back pointer to the client the buffer resides in
  83. * @buffer: pointer to the buffer
  84. * @node: node in the client's handle rbtree
  85. * @kmap_cnt: count of times this client has mapped to kernel
  86. * @dmap_cnt: count of times this client has mapped for dma
  87. * @usermap_cnt: count of times this client has mapped for userspace
  88. *
  89. * Modifications to node, map_cnt or mapping should be protected by the
  90. * lock in the client. Other fields are never changed after initialization.
  91. */
  92. struct ion_handle {
  93. struct kref ref;
  94. struct ion_client *client;
  95. struct ion_buffer *buffer;
  96. struct rb_node node;
  97. unsigned int kmap_cnt;
  98. unsigned int dmap_cnt;
  99. unsigned int usermap_cnt;
  100. };
  101. /* this function should only be called while dev->lock is held */
  102. static void ion_buffer_add(struct ion_device *dev,
  103. struct ion_buffer *buffer)
  104. {
  105. struct rb_node **p = &dev->buffers.rb_node;
  106. struct rb_node *parent = NULL;
  107. struct ion_buffer *entry;
  108. while (*p) {
  109. parent = *p;
  110. entry = rb_entry(parent, struct ion_buffer, node);
  111. if (buffer < entry) {
  112. p = &(*p)->rb_left;
  113. } else if (buffer > entry) {
  114. p = &(*p)->rb_right;
  115. } else {
  116. pr_err("%s: buffer already found.", __func__);
  117. BUG();
  118. }
  119. }
  120. rb_link_node(&buffer->node, parent, p);
  121. rb_insert_color(&buffer->node, &dev->buffers);
  122. }
  123. /* this function should only be called while dev->lock is held */
  124. static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
  125. struct ion_device *dev,
  126. unsigned long len,
  127. unsigned long align,
  128. unsigned long flags)
  129. {
  130. struct ion_buffer *buffer;
  131. int ret;
  132. buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
  133. if (!buffer)
  134. return ERR_PTR(-ENOMEM);
  135. buffer->heap = heap;
  136. kref_init(&buffer->ref);
  137. ret = heap->ops->allocate(heap, buffer, len, align, flags);
  138. if (ret) {
  139. kfree(buffer);
  140. return ERR_PTR(ret);
  141. }
  142. buffer->dev = dev;
  143. buffer->size = len;
  144. mutex_init(&buffer->lock);
  145. ion_buffer_add(dev, buffer);
  146. return buffer;
  147. }
  148. static void ion_buffer_destroy(struct kref *kref)
  149. {
  150. struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
  151. struct ion_device *dev = buffer->dev;
  152. buffer->heap->ops->free(buffer);
  153. mutex_lock(&dev->lock);
  154. rb_erase(&buffer->node, &dev->buffers);
  155. mutex_unlock(&dev->lock);
  156. kfree(buffer);
  157. }
  158. static void ion_buffer_get(struct ion_buffer *buffer)
  159. {
  160. kref_get(&buffer->ref);
  161. }
  162. static int ion_buffer_put(struct ion_buffer *buffer)
  163. {
  164. return kref_put(&buffer->ref, ion_buffer_destroy);
  165. }
  166. static struct ion_handle *ion_handle_create(struct ion_client *client,
  167. struct ion_buffer *buffer)
  168. {
  169. struct ion_handle *handle;
  170. handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
  171. if (!handle)
  172. return ERR_PTR(-ENOMEM);
  173. kref_init(&handle->ref);
  174. rb_init_node(&handle->node);
  175. handle->client = client;
  176. ion_buffer_get(buffer);
  177. handle->buffer = buffer;
  178. return handle;
  179. }
  180. static void ion_handle_destroy(struct kref *kref)
  181. {
  182. struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
  183. /* XXX Can a handle be destroyed while it's map count is non-zero?:
  184. if (handle->map_cnt) unmap
  185. */
  186. ion_buffer_put(handle->buffer);
  187. mutex_lock(&handle->client->lock);
  188. if (!RB_EMPTY_NODE(&handle->node))
  189. rb_erase(&handle->node, &handle->client->handles);
  190. mutex_unlock(&handle->client->lock);
  191. kfree(handle);
  192. }
  193. struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
  194. {
  195. return handle->buffer;
  196. }
  197. static void ion_handle_get(struct ion_handle *handle)
  198. {
  199. kref_get(&handle->ref);
  200. }
  201. static int ion_handle_put(struct ion_handle *handle)
  202. {
  203. return kref_put(&handle->ref, ion_handle_destroy);
  204. }
  205. static struct ion_handle *ion_handle_lookup(struct ion_client *client,
  206. struct ion_buffer *buffer)
  207. {
  208. struct rb_node *n;
  209. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  210. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  211. node);
  212. if (handle->buffer == buffer)
  213. return handle;
  214. }
  215. return NULL;
  216. }
  217. static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
  218. {
  219. struct rb_node *n = client->handles.rb_node;
  220. while (n) {
  221. struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
  222. node);
  223. if (handle < handle_node)
  224. n = n->rb_left;
  225. else if (handle > handle_node)
  226. n = n->rb_right;
  227. else
  228. return true;
  229. }
  230. return false;
  231. }
  232. static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
  233. {
  234. struct rb_node **p = &client->handles.rb_node;
  235. struct rb_node *parent = NULL;
  236. struct ion_handle *entry;
  237. while (*p) {
  238. parent = *p;
  239. entry = rb_entry(parent, struct ion_handle, node);
  240. if (handle < entry)
  241. p = &(*p)->rb_left;
  242. else if (handle > entry)
  243. p = &(*p)->rb_right;
  244. else
  245. WARN(1, "%s: buffer already found.", __func__);
  246. }
  247. rb_link_node(&handle->node, parent, p);
  248. rb_insert_color(&handle->node, &client->handles);
  249. }
  250. struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
  251. size_t align, unsigned int flags)
  252. {
  253. struct rb_node *n;
  254. struct ion_handle *handle;
  255. struct ion_device *dev = client->dev;
  256. struct ion_buffer *buffer = NULL;
  257. /*
  258. * traverse the list of heaps available in this system in priority
  259. * order. If the heap type is supported by the client, and matches the
  260. * request of the caller allocate from it. Repeat until allocate has
  261. * succeeded or all heaps have been tried
  262. */
  263. mutex_lock(&dev->lock);
  264. for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
  265. struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
  266. /* if the client doesn't support this heap type */
  267. if (!((1 << heap->type) & client->heap_mask))
  268. continue;
  269. /* if the caller didn't specify this heap type */
  270. if (!((1 << heap->id) & flags))
  271. continue;
  272. buffer = ion_buffer_create(heap, dev, len, align, flags);
  273. if (!IS_ERR_OR_NULL(buffer))
  274. break;
  275. }
  276. mutex_unlock(&dev->lock);
  277. if (IS_ERR_OR_NULL(buffer))
  278. return ERR_PTR(PTR_ERR(buffer));
  279. handle = ion_handle_create(client, buffer);
  280. if (IS_ERR_OR_NULL(handle))
  281. goto end;
  282. /*
  283. * ion_buffer_create will create a buffer with a ref_cnt of 1,
  284. * and ion_handle_create will take a second reference, drop one here
  285. */
  286. ion_buffer_put(buffer);
  287. mutex_lock(&client->lock);
  288. ion_handle_add(client, handle);
  289. mutex_unlock(&client->lock);
  290. return handle;
  291. end:
  292. ion_buffer_put(buffer);
  293. return handle;
  294. }
  295. void ion_free(struct ion_client *client, struct ion_handle *handle)
  296. {
  297. bool valid_handle;
  298. BUG_ON(client != handle->client);
  299. mutex_lock(&client->lock);
  300. valid_handle = ion_handle_validate(client, handle);
  301. mutex_unlock(&client->lock);
  302. if (!valid_handle) {
  303. WARN("%s: invalid handle passed to free.\n", __func__);
  304. return;
  305. }
  306. ion_handle_put(handle);
  307. }
  308. static void ion_client_get(struct ion_client *client);
  309. static int ion_client_put(struct ion_client *client);
  310. static bool _ion_map(int *buffer_cnt, int *handle_cnt)
  311. {
  312. bool map;
  313. BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
  314. if (*buffer_cnt)
  315. map = false;
  316. else
  317. map = true;
  318. if (*handle_cnt == 0)
  319. (*buffer_cnt)++;
  320. (*handle_cnt)++;
  321. return map;
  322. }
  323. static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
  324. {
  325. BUG_ON(*handle_cnt == 0);
  326. (*handle_cnt)--;
  327. if (*handle_cnt != 0)
  328. return false;
  329. BUG_ON(*buffer_cnt == 0);
  330. (*buffer_cnt)--;
  331. if (*buffer_cnt == 0)
  332. return true;
  333. return false;
  334. }
  335. int ion_phys(struct ion_client *client, struct ion_handle *handle,
  336. ion_phys_addr_t *addr, size_t *len)
  337. {
  338. struct ion_buffer *buffer;
  339. int ret;
  340. mutex_lock(&client->lock);
  341. if (!ion_handle_validate(client, handle)) {
  342. mutex_unlock(&client->lock);
  343. return -EINVAL;
  344. }
  345. buffer = handle->buffer;
  346. if (!buffer->heap->ops->phys) {
  347. pr_err("%s: ion_phys is not implemented by this heap.\n",
  348. __func__);
  349. mutex_unlock(&client->lock);
  350. return -ENODEV;
  351. }
  352. mutex_unlock(&client->lock);
  353. ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
  354. return ret;
  355. }
  356. void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
  357. {
  358. struct ion_buffer *buffer;
  359. void *vaddr;
  360. mutex_lock(&client->lock);
  361. if (!ion_handle_validate(client, handle)) {
  362. pr_err("%s: invalid handle passed to map_kernel.\n",
  363. __func__);
  364. mutex_unlock(&client->lock);
  365. return ERR_PTR(-EINVAL);
  366. }
  367. buffer = handle->buffer;
  368. mutex_lock(&buffer->lock);
  369. if (!handle->buffer->heap->ops->map_kernel) {
  370. pr_err("%s: map_kernel is not implemented by this heap.\n",
  371. __func__);
  372. mutex_unlock(&buffer->lock);
  373. mutex_unlock(&client->lock);
  374. return ERR_PTR(-ENODEV);
  375. }
  376. if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
  377. vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
  378. if (IS_ERR_OR_NULL(vaddr))
  379. _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
  380. buffer->vaddr = vaddr;
  381. } else {
  382. vaddr = buffer->vaddr;
  383. }
  384. mutex_unlock(&buffer->lock);
  385. mutex_unlock(&client->lock);
  386. return vaddr;
  387. }
  388. struct scatterlist *ion_map_dma(struct ion_client *client,
  389. struct ion_handle *handle)
  390. {
  391. struct ion_buffer *buffer;
  392. struct scatterlist *sglist;
  393. mutex_lock(&client->lock);
  394. if (!ion_handle_validate(client, handle)) {
  395. pr_err("%s: invalid handle passed to map_dma.\n",
  396. __func__);
  397. mutex_unlock(&client->lock);
  398. return ERR_PTR(-EINVAL);
  399. }
  400. buffer = handle->buffer;
  401. mutex_lock(&buffer->lock);
  402. if (!handle->buffer->heap->ops->map_dma) {
  403. pr_err("%s: map_kernel is not implemented by this heap.\n",
  404. __func__);
  405. mutex_unlock(&buffer->lock);
  406. mutex_unlock(&client->lock);
  407. return ERR_PTR(-ENODEV);
  408. }
  409. if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
  410. sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
  411. if (IS_ERR_OR_NULL(sglist))
  412. _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
  413. buffer->sglist = sglist;
  414. } else {
  415. sglist = buffer->sglist;
  416. }
  417. mutex_unlock(&buffer->lock);
  418. mutex_unlock(&client->lock);
  419. return sglist;
  420. }
  421. void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
  422. {
  423. struct ion_buffer *buffer;
  424. mutex_lock(&client->lock);
  425. buffer = handle->buffer;
  426. mutex_lock(&buffer->lock);
  427. if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
  428. buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
  429. buffer->vaddr = NULL;
  430. }
  431. mutex_unlock(&buffer->lock);
  432. mutex_unlock(&client->lock);
  433. }
  434. void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
  435. {
  436. struct ion_buffer *buffer;
  437. mutex_lock(&client->lock);
  438. buffer = handle->buffer;
  439. mutex_lock(&buffer->lock);
  440. if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
  441. buffer->heap->ops->unmap_dma(buffer->heap, buffer);
  442. buffer->sglist = NULL;
  443. }
  444. mutex_unlock(&buffer->lock);
  445. mutex_unlock(&client->lock);
  446. }
  447. struct ion_buffer *ion_share(struct ion_client *client,
  448. struct ion_handle *handle)
  449. {
  450. bool valid_handle;
  451. mutex_lock(&client->lock);
  452. valid_handle = ion_handle_validate(client, handle);
  453. mutex_unlock(&client->lock);
  454. if (!valid_handle) {
  455. WARN("%s: invalid handle passed to share.\n", __func__);
  456. return ERR_PTR(-EINVAL);
  457. }
  458. /* do not take an extra reference here, the burden is on the caller
  459. * to make sure the buffer doesn't go away while it's passing it
  460. * to another client -- ion_free should not be called on this handle
  461. * until the buffer has been imported into the other client
  462. */
  463. return handle->buffer;
  464. }
  465. struct ion_handle *ion_import(struct ion_client *client,
  466. struct ion_buffer *buffer)
  467. {
  468. struct ion_handle *handle = NULL;
  469. mutex_lock(&client->lock);
  470. /* if a handle exists for this buffer just take a reference to it */
  471. handle = ion_handle_lookup(client, buffer);
  472. if (!IS_ERR_OR_NULL(handle)) {
  473. ion_handle_get(handle);
  474. goto end;
  475. }
  476. handle = ion_handle_create(client, buffer);
  477. if (IS_ERR_OR_NULL(handle))
  478. goto end;
  479. ion_handle_add(client, handle);
  480. end:
  481. mutex_unlock(&client->lock);
  482. return handle;
  483. }
  484. static const struct file_operations ion_share_fops;
  485. struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
  486. {
  487. struct file *file = fget(fd);
  488. struct ion_handle *handle;
  489. if (!file) {
  490. pr_err("%s: imported fd not found in file table.\n", __func__);
  491. return ERR_PTR(-EINVAL);
  492. }
  493. if (file->f_op != &ion_share_fops) {
  494. pr_err("%s: imported file is not a shared ion file.\n",
  495. __func__);
  496. handle = ERR_PTR(-EINVAL);
  497. goto end;
  498. }
  499. handle = ion_import(client, file->private_data);
  500. end:
  501. fput(file);
  502. return handle;
  503. }
  504. static int ion_debug_client_show(struct seq_file *s, void *unused)
  505. {
  506. struct ion_client *client = s->private;
  507. struct rb_node *n;
  508. size_t sizes[ION_NUM_HEAPS] = {0};
  509. const char *names[ION_NUM_HEAPS] = {0};
  510. int i;
  511. mutex_lock(&client->lock);
  512. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  513. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  514. node);
  515. enum ion_heap_type type = handle->buffer->heap->type;
  516. if (!names[type])
  517. names[type] = handle->buffer->heap->name;
  518. sizes[type] += handle->buffer->size;
  519. }
  520. mutex_unlock(&client->lock);
  521. seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
  522. for (i = 0; i < ION_NUM_HEAPS; i++) {
  523. if (!names[i])
  524. continue;
  525. seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
  526. atomic_read(&client->ref.refcount));
  527. }
  528. return 0;
  529. }
  530. static int ion_debug_client_open(struct inode *inode, struct file *file)
  531. {
  532. return single_open(file, ion_debug_client_show, inode->i_private);
  533. }
  534. static const struct file_operations debug_client_fops = {
  535. .open = ion_debug_client_open,
  536. .read = seq_read,
  537. .llseek = seq_lseek,
  538. .release = single_release,
  539. };
  540. static struct ion_client *ion_client_lookup(struct ion_device *dev,
  541. struct task_struct *task)
  542. {
  543. struct rb_node *n = dev->user_clients.rb_node;
  544. struct ion_client *client;
  545. mutex_lock(&dev->lock);
  546. while (n) {
  547. client = rb_entry(n, struct ion_client, node);
  548. if (task == client->task) {
  549. ion_client_get(client);
  550. mutex_unlock(&dev->lock);
  551. return client;
  552. } else if (task < client->task) {
  553. n = n->rb_left;
  554. } else if (task > client->task) {
  555. n = n->rb_right;
  556. }
  557. }
  558. mutex_unlock(&dev->lock);
  559. return NULL;
  560. }
  561. struct ion_client *ion_client_create(struct ion_device *dev,
  562. unsigned int heap_mask,
  563. const char *name)
  564. {
  565. struct ion_client *client;
  566. struct task_struct *task;
  567. struct rb_node **p;
  568. struct rb_node *parent = NULL;
  569. struct ion_client *entry;
  570. char debug_name[64];
  571. pid_t pid;
  572. get_task_struct(current->group_leader);
  573. task_lock(current->group_leader);
  574. pid = task_pid_nr(current->group_leader);
  575. /* don't bother to store task struct for kernel threads,
  576. they can't be killed anyway */
  577. if (current->group_leader->flags & PF_KTHREAD) {
  578. put_task_struct(current->group_leader);
  579. task = NULL;
  580. } else {
  581. task = current->group_leader;
  582. }
  583. task_unlock(current->group_leader);
  584. /* if this isn't a kernel thread, see if a client already
  585. exists */
  586. if (task) {
  587. client = ion_client_lookup(dev, task);
  588. if (!IS_ERR_OR_NULL(client)) {
  589. put_task_struct(current->group_leader);
  590. return client;
  591. }
  592. }
  593. client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
  594. if (!client) {
  595. put_task_struct(current->group_leader);
  596. return ERR_PTR(-ENOMEM);
  597. }
  598. client->dev = dev;
  599. client->handles = RB_ROOT;
  600. mutex_init(&client->lock);
  601. client->name = name;
  602. client->heap_mask = heap_mask;
  603. client->task = task;
  604. client->pid = pid;
  605. kref_init(&client->ref);
  606. mutex_lock(&dev->lock);
  607. if (task) {
  608. p = &dev->user_clients.rb_node;
  609. while (*p) {
  610. parent = *p;
  611. entry = rb_entry(parent, struct ion_client, node);
  612. if (task < entry->task)
  613. p = &(*p)->rb_left;
  614. else if (task > entry->task)
  615. p = &(*p)->rb_right;
  616. }
  617. rb_link_node(&client->node, parent, p);
  618. rb_insert_color(&client->node, &dev->user_clients);
  619. } else {
  620. p = &dev->kernel_clients.rb_node;
  621. while (*p) {
  622. parent = *p;
  623. entry = rb_entry(parent, struct ion_client, node);
  624. if (client < entry)
  625. p = &(*p)->rb_left;
  626. else if (client > entry)
  627. p = &(*p)->rb_right;
  628. }
  629. rb_link_node(&client->node, parent, p);
  630. rb_insert_color(&client->node, &dev->kernel_clients);
  631. }
  632. snprintf(debug_name, 64, "%u", client->pid);
  633. client->debug_root = debugfs_create_file(debug_name, 0664,
  634. dev->debug_root, client,
  635. &debug_client_fops);
  636. mutex_unlock(&dev->lock);
  637. return client;
  638. }
  639. static void _ion_client_destroy(struct kref *kref)
  640. {
  641. struct ion_client *client = container_of(kref, struct ion_client, ref);
  642. struct ion_device *dev = client->dev;
  643. struct rb_node *n;
  644. pr_debug("%s: %d\n", __func__, __LINE__);
  645. while ((n = rb_first(&client->handles))) {
  646. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  647. node);
  648. ion_handle_destroy(&handle->ref);
  649. }
  650. mutex_lock(&dev->lock);
  651. if (client->task) {
  652. rb_erase(&client->node, &dev->user_clients);
  653. put_task_struct(client->task);
  654. } else {
  655. rb_erase(&client->node, &dev->kernel_clients);
  656. }
  657. debugfs_remove_recursive(client->debug_root);
  658. mutex_unlock(&dev->lock);
  659. kfree(client);
  660. }
  661. static void ion_client_get(struct ion_client *client)
  662. {
  663. kref_get(&client->ref);
  664. }
  665. static int ion_client_put(struct ion_client *client)
  666. {
  667. return kref_put(&client->ref, _ion_client_destroy);
  668. }
  669. void ion_client_destroy(struct ion_client *client)
  670. {
  671. ion_client_put(client);
  672. }
  673. static int ion_share_release(struct inode *inode, struct file* file)
  674. {
  675. struct ion_buffer *buffer = file->private_data;
  676. pr_debug("%s: %d\n", __func__, __LINE__);
  677. /* drop the reference to the buffer -- this prevents the
  678. buffer from going away because the client holding it exited
  679. while it was being passed */
  680. ion_buffer_put(buffer);
  681. return 0;
  682. }
  683. static void ion_vma_open(struct vm_area_struct *vma)
  684. {
  685. struct ion_buffer *buffer = vma->vm_file->private_data;
  686. struct ion_handle *handle = vma->vm_private_data;
  687. struct ion_client *client;
  688. pr_debug("%s: %d\n", __func__, __LINE__);
  689. /* check that the client still exists and take a reference so
  690. it can't go away until this vma is closed */
  691. client = ion_client_lookup(buffer->dev, current->group_leader);
  692. if (IS_ERR_OR_NULL(client)) {
  693. vma->vm_private_data = NULL;
  694. return;
  695. }
  696. pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
  697. __func__, __LINE__,
  698. atomic_read(&client->ref.refcount),
  699. atomic_read(&handle->ref.refcount),
  700. atomic_read(&buffer->ref.refcount));
  701. }
  702. static void ion_vma_close(struct vm_area_struct *vma)
  703. {
  704. struct ion_handle *handle = vma->vm_private_data;
  705. struct ion_buffer *buffer = vma->vm_file->private_data;
  706. struct ion_client *client;
  707. pr_debug("%s: %d\n", __func__, __LINE__);
  708. /* this indicates the client is gone, nothing to do here */
  709. if (!handle)
  710. return;
  711. client = handle->client;
  712. pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
  713. __func__, __LINE__,
  714. atomic_read(&client->ref.refcount),
  715. atomic_read(&handle->ref.refcount),
  716. atomic_read(&buffer->ref.refcount));
  717. ion_handle_put(handle);
  718. ion_client_put(client);
  719. pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
  720. __func__, __LINE__,
  721. atomic_read(&client->ref.refcount),
  722. atomic_read(&handle->ref.refcount),
  723. atomic_read(&buffer->ref.refcount));
  724. }
  725. static struct vm_operations_struct ion_vm_ops = {
  726. .open = ion_vma_open,
  727. .close = ion_vma_close,
  728. };
  729. static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
  730. {
  731. struct ion_buffer *buffer = file->private_data;
  732. unsigned long size = vma->vm_end - vma->vm_start;
  733. struct ion_client *client;
  734. struct ion_handle *handle;
  735. int ret;
  736. pr_debug("%s: %d\n", __func__, __LINE__);
  737. /* make sure the client still exists, it's possible for the client to
  738. have gone away but the map/share fd still to be around, take
  739. a reference to it so it can't go away while this mapping exists */
  740. client = ion_client_lookup(buffer->dev, current->group_leader);
  741. if (IS_ERR_OR_NULL(client)) {
  742. pr_err("%s: trying to mmap an ion handle in a process with no "
  743. "ion client\n", __func__);
  744. return -EINVAL;
  745. }
  746. if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
  747. buffer->size)) {
  748. pr_err("%s: trying to map larger area than handle has available"
  749. "\n", __func__);
  750. ret = -EINVAL;
  751. goto err;
  752. }
  753. /* find the handle and take a reference to it */
  754. handle = ion_import(client, buffer);
  755. if (IS_ERR_OR_NULL(handle)) {
  756. ret = -EINVAL;
  757. goto err;
  758. }
  759. if (!handle->buffer->heap->ops->map_user) {
  760. pr_err("%s: this heap does not define a method for mapping "
  761. "to userspace\n", __func__);
  762. ret = -EINVAL;
  763. goto err1;
  764. }
  765. mutex_lock(&buffer->lock);
  766. /* now map it to userspace */
  767. ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
  768. mutex_unlock(&buffer->lock);
  769. if (ret) {
  770. pr_err("%s: failure mapping buffer to userspace\n",
  771. __func__);
  772. goto err1;
  773. }
  774. vma->vm_ops = &ion_vm_ops;
  775. /* move the handle into the vm_private_data so we can access it from
  776. vma_open/close */
  777. vma->vm_private_data = handle;
  778. pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
  779. __func__, __LINE__,
  780. atomic_read(&client->ref.refcount),
  781. atomic_read(&handle->ref.refcount),
  782. atomic_read(&buffer->ref.refcount));
  783. return 0;
  784. err1:
  785. /* drop the reference to the handle */
  786. ion_handle_put(handle);
  787. err:
  788. /* drop the reference to the client */
  789. ion_client_put(client);
  790. return ret;
  791. }
  792. static const struct file_operations ion_share_fops = {
  793. .owner = THIS_MODULE,
  794. .release = ion_share_release,
  795. .mmap = ion_share_mmap,
  796. };
  797. static int ion_ioctl_share(struct file *parent, struct ion_client *client,
  798. struct ion_handle *handle)
  799. {
  800. int fd = get_unused_fd();
  801. struct file *file;
  802. if (fd < 0)
  803. return -ENFILE;
  804. file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
  805. handle->buffer, O_RDWR);
  806. if (IS_ERR_OR_NULL(file))
  807. goto err;
  808. ion_buffer_get(handle->buffer);
  809. fd_install(fd, file);
  810. return fd;
  811. err:
  812. put_unused_fd(fd);
  813. return -ENFILE;
  814. }
  815. static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  816. {
  817. struct ion_client *client = filp->private_data;
  818. switch (cmd) {
  819. case ION_IOC_ALLOC:
  820. {
  821. struct ion_allocation_data data;
  822. if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
  823. return -EFAULT;
  824. data.handle = ion_alloc(client, data.len, data.align,
  825. data.flags);
  826. if (copy_to_user((void __user *)arg, &data, sizeof(data)))
  827. return -EFAULT;
  828. break;
  829. }
  830. case ION_IOC_FREE:
  831. {
  832. struct ion_handle_data data;
  833. bool valid;
  834. if (copy_from_user(&data, (void __user *)arg,
  835. sizeof(struct ion_handle_data)))
  836. return -EFAULT;
  837. mutex_lock(&client->lock);
  838. valid = ion_handle_validate(client, data.handle);
  839. mutex_unlock(&client->lock);
  840. if (!valid)
  841. return -EINVAL;
  842. ion_free(client, data.handle);
  843. break;
  844. }
  845. case ION_IOC_MAP:
  846. case ION_IOC_SHARE:
  847. {
  848. struct ion_fd_data data;
  849. if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
  850. return -EFAULT;
  851. mutex_lock(&client->lock);
  852. if (!ion_handle_validate(client, data.handle)) {
  853. pr_err("%s: invalid handle passed to share ioctl.\n",
  854. __func__);
  855. mutex_unlock(&client->lock);
  856. return -EINVAL;
  857. }
  858. data.fd = ion_ioctl_share(filp, client, data.handle);
  859. mutex_unlock(&client->lock);
  860. if (copy_to_user((void __user *)arg, &data, sizeof(data)))
  861. return -EFAULT;
  862. break;
  863. }
  864. case ION_IOC_IMPORT:
  865. {
  866. struct ion_fd_data data;
  867. if (copy_from_user(&data, (void __user *)arg,
  868. sizeof(struct ion_fd_data)))
  869. return -EFAULT;
  870. data.handle = ion_import_fd(client, data.fd);
  871. if (IS_ERR(data.handle))
  872. data.handle = NULL;
  873. if (copy_to_user((void __user *)arg, &data,
  874. sizeof(struct ion_fd_data)))
  875. return -EFAULT;
  876. break;
  877. }
  878. case ION_IOC_CUSTOM:
  879. {
  880. struct ion_device *dev = client->dev;
  881. struct ion_custom_data data;
  882. if (!dev->custom_ioctl)
  883. return -ENOTTY;
  884. if (copy_from_user(&data, (void __user *)arg,
  885. sizeof(struct ion_custom_data)))
  886. return -EFAULT;
  887. return dev->custom_ioctl(client, data.cmd, data.arg);
  888. }
  889. default:
  890. return -ENOTTY;
  891. }
  892. return 0;
  893. }
  894. static int ion_release(struct inode *inode, struct file *file)
  895. {
  896. struct ion_client *client = file->private_data;
  897. pr_debug("%s: %d\n", __func__, __LINE__);
  898. ion_client_put(client);
  899. return 0;
  900. }
  901. static int ion_open(struct inode *inode, struct file *file)
  902. {
  903. struct miscdevice *miscdev = file->private_data;
  904. struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
  905. struct ion_client *client;
  906. pr_debug("%s: %d\n", __func__, __LINE__);
  907. client = ion_client_create(dev, -1, "user");
  908. if (IS_ERR_OR_NULL(client))
  909. return PTR_ERR(client);
  910. file->private_data = client;
  911. return 0;
  912. }
  913. static const struct file_operations ion_fops = {
  914. .owner = THIS_MODULE,
  915. .open = ion_open,
  916. .release = ion_release,
  917. .unlocked_ioctl = ion_ioctl,
  918. };
  919. static size_t ion_debug_heap_total(struct ion_client *client,
  920. enum ion_heap_type type)
  921. {
  922. size_t size = 0;
  923. struct rb_node *n;
  924. mutex_lock(&client->lock);
  925. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  926. struct ion_handle *handle = rb_entry(n,
  927. struct ion_handle,
  928. node);
  929. if (handle->buffer->heap->type == type)
  930. size += handle->buffer->size;
  931. }
  932. mutex_unlock(&client->lock);
  933. return size;
  934. }
  935. static int ion_debug_heap_show(struct seq_file *s, void *unused)
  936. {
  937. struct ion_heap *heap = s->private;
  938. struct ion_device *dev = heap->dev;
  939. struct rb_node *n;
  940. seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
  941. for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
  942. struct ion_client *client = rb_entry(n, struct ion_client,
  943. node);
  944. char task_comm[TASK_COMM_LEN];
  945. size_t size = ion_debug_heap_total(client, heap->type);
  946. if (!size)
  947. continue;
  948. get_task_comm(task_comm, client->task);
  949. seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
  950. size);
  951. }
  952. for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
  953. struct ion_client *client = rb_entry(n, struct ion_client,
  954. node);
  955. size_t size = ion_debug_heap_total(client, heap->type);
  956. if (!size)
  957. continue;
  958. seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
  959. size);
  960. }
  961. return 0;
  962. }
  963. static int ion_debug_heap_open(struct inode *inode, struct file *file)
  964. {
  965. return single_open(file, ion_debug_heap_show, inode->i_private);
  966. }
  967. static const struct file_operations debug_heap_fops = {
  968. .open = ion_debug_heap_open,
  969. .read = seq_read,
  970. .llseek = seq_lseek,
  971. .release = single_release,
  972. };
  973. void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
  974. {
  975. struct rb_node **p = &dev->heaps.rb_node;
  976. struct rb_node *parent = NULL;
  977. struct ion_heap *entry;
  978. heap->dev = dev;
  979. mutex_lock(&dev->lock);
  980. while (*p) {
  981. parent = *p;
  982. entry = rb_entry(parent, struct ion_heap, node);
  983. if (heap->id < entry->id) {
  984. p = &(*p)->rb_left;
  985. } else if (heap->id > entry->id ) {
  986. p = &(*p)->rb_right;
  987. } else {
  988. pr_err("%s: can not insert multiple heaps with "
  989. "id %d\n", __func__, heap->id);
  990. goto end;
  991. }
  992. }
  993. rb_link_node(&heap->node, parent, p);
  994. rb_insert_color(&heap->node, &dev->heaps);
  995. debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
  996. &debug_heap_fops);
  997. end:
  998. mutex_unlock(&dev->lock);
  999. }
  1000. struct ion_device *ion_device_create(long (*custom_ioctl)
  1001. (struct ion_client *client,
  1002. unsigned int cmd,
  1003. unsigned long arg))
  1004. {
  1005. struct ion_device *idev;
  1006. int ret;
  1007. idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
  1008. if (!idev)
  1009. return ERR_PTR(-ENOMEM);
  1010. idev->dev.minor = MISC_DYNAMIC_MINOR;
  1011. idev->dev.name = "ion";
  1012. idev->dev.fops = &ion_fops;
  1013. idev->dev.parent = NULL;
  1014. ret = misc_register(&idev->dev);
  1015. if (ret) {
  1016. pr_err("ion: failed to register misc device.\n");
  1017. return ERR_PTR(ret);
  1018. }
  1019. idev->debug_root = debugfs_create_dir("ion", NULL);
  1020. if (IS_ERR_OR_NULL(idev->debug_root))
  1021. pr_err("ion: failed to create debug files.\n");
  1022. idev->custom_ioctl = custom_ioctl;
  1023. idev->buffers = RB_ROOT;
  1024. mutex_init(&idev->lock);
  1025. idev->heaps = RB_ROOT;
  1026. idev->user_clients = RB_ROOT;
  1027. idev->kernel_clients = RB_ROOT;
  1028. return idev;
  1029. }
  1030. void ion_device_destroy(struct ion_device *dev)
  1031. {
  1032. misc_deregister(&dev->dev);
  1033. /* XXX need to free the heaps and clients ? */
  1034. kfree(dev);
  1035. }