/drivers/media/video/videobuf-core.c

https://gitlab.com/TeamCarbonXtreme/android_kernel_samsung_msm7x27 · C · 1197 lines · 955 code · 182 blank · 60 comment · 211 complexity · f007daf5d537529b8443e695462646de MD5 · raw file

  1. /*
  2. * generic helper functions for handling video4linux capture buffers
  3. *
  4. * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
  5. *
  6. * Highly based on video-buf written originally by:
  7. * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
  8. * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
  9. * (c) 2006 Ted Walther and John Sokol
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2
  14. */
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/mm.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/interrupt.h>
  22. #include <media/videobuf-core.h>
  23. #define MAGIC_BUFFER 0x20070728
  24. #define MAGIC_CHECK(is, should) \
  25. do { \
  26. if (unlikely((is) != (should))) { \
  27. printk(KERN_ERR \
  28. "magic mismatch: %x (expected %x)\n", \
  29. is, should); \
  30. BUG(); \
  31. } \
  32. } while (0)
  33. static int debug;
  34. module_param(debug, int, 0644);
  35. MODULE_DESCRIPTION("helper module to manage video4linux buffers");
  36. MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
  37. MODULE_LICENSE("GPL");
  38. #define dprintk(level, fmt, arg...) \
  39. do { \
  40. if (debug >= level) \
  41. printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
  42. } while (0)
  43. /* --------------------------------------------------------------------- */
  44. #define CALL(q, f, arg...) \
  45. ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
  46. struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q)
  47. {
  48. struct videobuf_buffer *vb;
  49. BUG_ON(q->msize < sizeof(*vb));
  50. if (!q->int_ops || !q->int_ops->alloc) {
  51. printk(KERN_ERR "No specific ops defined!\n");
  52. BUG();
  53. }
  54. vb = q->int_ops->alloc(q->msize);
  55. if (NULL != vb) {
  56. init_waitqueue_head(&vb->done);
  57. vb->magic = MAGIC_BUFFER;
  58. }
  59. return vb;
  60. }
  61. EXPORT_SYMBOL_GPL(videobuf_alloc);
  62. #define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
  63. vb->state != VIDEOBUF_QUEUED)
  64. int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
  65. {
  66. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  67. if (non_blocking) {
  68. if (WAITON_CONDITION)
  69. return 0;
  70. else
  71. return -EAGAIN;
  72. }
  73. if (intr)
  74. return wait_event_interruptible(vb->done, WAITON_CONDITION);
  75. else
  76. wait_event(vb->done, WAITON_CONDITION);
  77. return 0;
  78. }
  79. EXPORT_SYMBOL_GPL(videobuf_waiton);
  80. int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
  81. struct v4l2_framebuffer *fbuf)
  82. {
  83. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  84. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  85. return CALL(q, iolock, q, vb, fbuf);
  86. }
  87. EXPORT_SYMBOL_GPL(videobuf_iolock);
  88. void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
  89. struct videobuf_buffer *buf)
  90. {
  91. if (q->int_ops->vaddr)
  92. return q->int_ops->vaddr(buf);
  93. return NULL;
  94. }
  95. EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
  96. /* --------------------------------------------------------------------- */
  97. void videobuf_queue_core_init(struct videobuf_queue *q,
  98. const struct videobuf_queue_ops *ops,
  99. struct device *dev,
  100. spinlock_t *irqlock,
  101. enum v4l2_buf_type type,
  102. enum v4l2_field field,
  103. unsigned int msize,
  104. void *priv,
  105. struct videobuf_qtype_ops *int_ops)
  106. {
  107. BUG_ON(!q);
  108. memset(q, 0, sizeof(*q));
  109. q->irqlock = irqlock;
  110. q->dev = dev;
  111. q->type = type;
  112. q->field = field;
  113. q->msize = msize;
  114. q->ops = ops;
  115. q->priv_data = priv;
  116. q->int_ops = int_ops;
  117. /* All buffer operations are mandatory */
  118. BUG_ON(!q->ops->buf_setup);
  119. BUG_ON(!q->ops->buf_prepare);
  120. BUG_ON(!q->ops->buf_queue);
  121. BUG_ON(!q->ops->buf_release);
  122. /* Lock is mandatory for queue_cancel to work */
  123. BUG_ON(!irqlock);
  124. /* Having implementations for abstract methods are mandatory */
  125. BUG_ON(!q->int_ops);
  126. mutex_init(&q->vb_lock);
  127. init_waitqueue_head(&q->wait);
  128. INIT_LIST_HEAD(&q->stream);
  129. }
  130. EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
  131. /* Locking: Only usage in bttv unsafe find way to remove */
  132. int videobuf_queue_is_busy(struct videobuf_queue *q)
  133. {
  134. int i;
  135. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  136. if (q->streaming) {
  137. dprintk(1, "busy: streaming active\n");
  138. return 1;
  139. }
  140. if (q->reading) {
  141. dprintk(1, "busy: pending read #1\n");
  142. return 1;
  143. }
  144. if (q->read_buf) {
  145. dprintk(1, "busy: pending read #2\n");
  146. return 1;
  147. }
  148. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  149. if (NULL == q->bufs[i])
  150. continue;
  151. if (q->bufs[i]->map) {
  152. dprintk(1, "busy: buffer #%d mapped\n", i);
  153. return 1;
  154. }
  155. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  156. dprintk(1, "busy: buffer #%d queued\n", i);
  157. return 1;
  158. }
  159. if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
  160. dprintk(1, "busy: buffer #%d avtive\n", i);
  161. return 1;
  162. }
  163. }
  164. return 0;
  165. }
  166. EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
  167. /* Locking: Caller holds q->vb_lock */
  168. void videobuf_queue_cancel(struct videobuf_queue *q)
  169. {
  170. unsigned long flags = 0;
  171. int i;
  172. q->streaming = 0;
  173. q->reading = 0;
  174. wake_up_interruptible_sync(&q->wait);
  175. /* remove queued buffers from list */
  176. spin_lock_irqsave(q->irqlock, flags);
  177. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  178. if (NULL == q->bufs[i])
  179. continue;
  180. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  181. list_del(&q->bufs[i]->queue);
  182. q->bufs[i]->state = VIDEOBUF_ERROR;
  183. wake_up_all(&q->bufs[i]->done);
  184. }
  185. }
  186. spin_unlock_irqrestore(q->irqlock, flags);
  187. /* free all buffers + clear queue */
  188. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  189. if (NULL == q->bufs[i])
  190. continue;
  191. q->ops->buf_release(q, q->bufs[i]);
  192. }
  193. INIT_LIST_HEAD(&q->stream);
  194. }
  195. EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
  196. /* --------------------------------------------------------------------- */
  197. /* Locking: Caller holds q->vb_lock */
  198. enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
  199. {
  200. enum v4l2_field field = q->field;
  201. BUG_ON(V4L2_FIELD_ANY == field);
  202. if (V4L2_FIELD_ALTERNATE == field) {
  203. if (V4L2_FIELD_TOP == q->last) {
  204. field = V4L2_FIELD_BOTTOM;
  205. q->last = V4L2_FIELD_BOTTOM;
  206. } else {
  207. field = V4L2_FIELD_TOP;
  208. q->last = V4L2_FIELD_TOP;
  209. }
  210. }
  211. return field;
  212. }
  213. EXPORT_SYMBOL_GPL(videobuf_next_field);
  214. /* Locking: Caller holds q->vb_lock */
  215. static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
  216. struct videobuf_buffer *vb, enum v4l2_buf_type type)
  217. {
  218. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  219. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  220. b->index = vb->i;
  221. b->type = type;
  222. b->memory = vb->memory;
  223. switch (b->memory) {
  224. case V4L2_MEMORY_MMAP:
  225. b->m.offset = vb->boff;
  226. b->length = vb->bsize;
  227. break;
  228. case V4L2_MEMORY_USERPTR:
  229. b->m.userptr = vb->baddr;
  230. b->length = vb->bsize;
  231. break;
  232. case V4L2_MEMORY_OVERLAY:
  233. b->m.offset = vb->boff;
  234. break;
  235. }
  236. b->flags = 0;
  237. if (vb->map)
  238. b->flags |= V4L2_BUF_FLAG_MAPPED;
  239. switch (vb->state) {
  240. case VIDEOBUF_PREPARED:
  241. case VIDEOBUF_QUEUED:
  242. case VIDEOBUF_ACTIVE:
  243. b->flags |= V4L2_BUF_FLAG_QUEUED;
  244. break;
  245. case VIDEOBUF_ERROR:
  246. b->flags |= V4L2_BUF_FLAG_ERROR;
  247. /* fall through */
  248. case VIDEOBUF_DONE:
  249. b->flags |= V4L2_BUF_FLAG_DONE;
  250. break;
  251. case VIDEOBUF_NEEDS_INIT:
  252. case VIDEOBUF_IDLE:
  253. /* nothing */
  254. break;
  255. }
  256. if (vb->input != UNSET) {
  257. b->flags |= V4L2_BUF_FLAG_INPUT;
  258. b->input = vb->input;
  259. }
  260. b->field = vb->field;
  261. b->timestamp = vb->ts;
  262. b->bytesused = vb->size;
  263. b->sequence = vb->field_count >> 1;
  264. }
  265. /* Locking: Caller holds q->vb_lock */
  266. static int __videobuf_mmap_free(struct videobuf_queue *q)
  267. {
  268. int i;
  269. if (!q)
  270. return 0;
  271. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  272. for (i = 0; i < VIDEO_MAX_FRAME; i++)
  273. if (q->bufs[i] && q->bufs[i]->map)
  274. return -EBUSY;
  275. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  276. if (NULL == q->bufs[i])
  277. continue;
  278. q->ops->buf_release(q, q->bufs[i]);
  279. kfree(q->bufs[i]);
  280. q->bufs[i] = NULL;
  281. }
  282. return 0;
  283. }
  284. int videobuf_mmap_free(struct videobuf_queue *q)
  285. {
  286. int ret;
  287. mutex_lock(&q->vb_lock);
  288. ret = __videobuf_mmap_free(q);
  289. mutex_unlock(&q->vb_lock);
  290. return ret;
  291. }
  292. EXPORT_SYMBOL_GPL(videobuf_mmap_free);
  293. /* Locking: Caller holds q->vb_lock */
  294. int __videobuf_mmap_setup(struct videobuf_queue *q,
  295. unsigned int bcount, unsigned int bsize,
  296. enum v4l2_memory memory)
  297. {
  298. unsigned int i;
  299. int err;
  300. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  301. err = __videobuf_mmap_free(q);
  302. if (0 != err)
  303. return err;
  304. /* Allocate and initialize buffers */
  305. for (i = 0; i < bcount; i++) {
  306. q->bufs[i] = videobuf_alloc(q);
  307. if (NULL == q->bufs[i])
  308. break;
  309. q->bufs[i]->i = i;
  310. q->bufs[i]->input = UNSET;
  311. q->bufs[i]->memory = memory;
  312. q->bufs[i]->bsize = bsize;
  313. switch (memory) {
  314. case V4L2_MEMORY_MMAP:
  315. q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
  316. break;
  317. case V4L2_MEMORY_USERPTR:
  318. case V4L2_MEMORY_OVERLAY:
  319. /* nothing */
  320. break;
  321. }
  322. }
  323. if (!i)
  324. return -ENOMEM;
  325. dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
  326. return i;
  327. }
  328. EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
  329. int videobuf_mmap_setup(struct videobuf_queue *q,
  330. unsigned int bcount, unsigned int bsize,
  331. enum v4l2_memory memory)
  332. {
  333. int ret;
  334. mutex_lock(&q->vb_lock);
  335. ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
  336. mutex_unlock(&q->vb_lock);
  337. return ret;
  338. }
  339. EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
  340. int videobuf_reqbufs(struct videobuf_queue *q,
  341. struct v4l2_requestbuffers *req)
  342. {
  343. unsigned int size, count;
  344. int retval;
  345. if (req->count < 1) {
  346. dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
  347. return -EINVAL;
  348. }
  349. if (req->memory != V4L2_MEMORY_MMAP &&
  350. req->memory != V4L2_MEMORY_USERPTR &&
  351. req->memory != V4L2_MEMORY_OVERLAY) {
  352. dprintk(1, "reqbufs: memory type invalid\n");
  353. return -EINVAL;
  354. }
  355. mutex_lock(&q->vb_lock);
  356. if (req->type != q->type) {
  357. dprintk(1, "reqbufs: queue type invalid\n");
  358. retval = -EINVAL;
  359. goto done;
  360. }
  361. if (q->streaming) {
  362. dprintk(1, "reqbufs: streaming already exists\n");
  363. retval = -EBUSY;
  364. goto done;
  365. }
  366. if (!list_empty(&q->stream)) {
  367. dprintk(1, "reqbufs: stream running\n");
  368. retval = -EBUSY;
  369. goto done;
  370. }
  371. count = req->count;
  372. if (count > VIDEO_MAX_FRAME)
  373. count = VIDEO_MAX_FRAME;
  374. size = 0;
  375. q->ops->buf_setup(q, &count, &size);
  376. dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
  377. count, size,
  378. (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
  379. retval = __videobuf_mmap_setup(q, count, size, req->memory);
  380. if (retval < 0) {
  381. dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
  382. goto done;
  383. }
  384. req->count = retval;
  385. retval = 0;
  386. done:
  387. mutex_unlock(&q->vb_lock);
  388. return retval;
  389. }
  390. EXPORT_SYMBOL_GPL(videobuf_reqbufs);
  391. int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  392. {
  393. int ret = -EINVAL;
  394. mutex_lock(&q->vb_lock);
  395. if (unlikely(b->type != q->type)) {
  396. dprintk(1, "querybuf: Wrong type.\n");
  397. goto done;
  398. }
  399. if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
  400. dprintk(1, "querybuf: index out of range.\n");
  401. goto done;
  402. }
  403. if (unlikely(NULL == q->bufs[b->index])) {
  404. dprintk(1, "querybuf: buffer is null.\n");
  405. goto done;
  406. }
  407. videobuf_status(q, b, q->bufs[b->index], q->type);
  408. ret = 0;
  409. done:
  410. mutex_unlock(&q->vb_lock);
  411. return ret;
  412. }
  413. EXPORT_SYMBOL_GPL(videobuf_querybuf);
  414. int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  415. {
  416. struct videobuf_buffer *buf;
  417. enum v4l2_field field;
  418. unsigned long flags = 0;
  419. int retval;
  420. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  421. if (b->memory == V4L2_MEMORY_MMAP)
  422. down_read(&current->mm->mmap_sem);
  423. mutex_lock(&q->vb_lock);
  424. retval = -EBUSY;
  425. if (q->reading) {
  426. dprintk(1, "qbuf: Reading running...\n");
  427. goto done;
  428. }
  429. retval = -EINVAL;
  430. if (b->type != q->type) {
  431. dprintk(1, "qbuf: Wrong type.\n");
  432. goto done;
  433. }
  434. if (b->index >= VIDEO_MAX_FRAME) {
  435. dprintk(1, "qbuf: index out of range.\n");
  436. goto done;
  437. }
  438. buf = q->bufs[b->index];
  439. if (NULL == buf) {
  440. dprintk(1, "qbuf: buffer is null.\n");
  441. goto done;
  442. }
  443. MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
  444. if (buf->memory != b->memory) {
  445. dprintk(1, "qbuf: memory type is wrong.\n");
  446. goto done;
  447. }
  448. if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
  449. dprintk(1, "qbuf: buffer is already queued or active.\n");
  450. goto done;
  451. }
  452. if (b->flags & V4L2_BUF_FLAG_INPUT) {
  453. if (b->input >= q->inputs) {
  454. dprintk(1, "qbuf: wrong input.\n");
  455. goto done;
  456. }
  457. buf->input = b->input;
  458. } else {
  459. buf->input = UNSET;
  460. }
  461. switch (b->memory) {
  462. case V4L2_MEMORY_MMAP:
  463. if (0 == buf->baddr) {
  464. dprintk(1, "qbuf: mmap requested "
  465. "but buffer addr is zero!\n");
  466. goto done;
  467. }
  468. if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
  469. || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
  470. || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
  471. buf->size = b->bytesused;
  472. buf->field = b->field;
  473. buf->ts = b->timestamp;
  474. }
  475. break;
  476. case V4L2_MEMORY_USERPTR:
  477. if (b->length < buf->bsize) {
  478. dprintk(1, "qbuf: buffer length is not enough\n");
  479. goto done;
  480. }
  481. if (VIDEOBUF_NEEDS_INIT != buf->state &&
  482. buf->baddr != b->m.userptr)
  483. q->ops->buf_release(q, buf);
  484. buf->baddr = b->m.userptr;
  485. break;
  486. case V4L2_MEMORY_OVERLAY:
  487. buf->boff = b->m.offset;
  488. break;
  489. default:
  490. dprintk(1, "qbuf: wrong memory type\n");
  491. goto done;
  492. }
  493. dprintk(1, "qbuf: requesting next field\n");
  494. field = videobuf_next_field(q);
  495. retval = q->ops->buf_prepare(q, buf, field);
  496. if (0 != retval) {
  497. dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
  498. goto done;
  499. }
  500. list_add_tail(&buf->stream, &q->stream);
  501. if (q->streaming) {
  502. spin_lock_irqsave(q->irqlock, flags);
  503. q->ops->buf_queue(q, buf);
  504. spin_unlock_irqrestore(q->irqlock, flags);
  505. }
  506. dprintk(1, "qbuf: succeeded\n");
  507. retval = 0;
  508. wake_up_interruptible_sync(&q->wait);
  509. done:
  510. mutex_unlock(&q->vb_lock);
  511. if (b->memory == V4L2_MEMORY_MMAP)
  512. up_read(&current->mm->mmap_sem);
  513. return retval;
  514. }
  515. EXPORT_SYMBOL_GPL(videobuf_qbuf);
  516. /* Locking: Caller holds q->vb_lock */
  517. static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
  518. {
  519. int retval;
  520. checks:
  521. if (!q->streaming) {
  522. dprintk(1, "next_buffer: Not streaming\n");
  523. retval = -EINVAL;
  524. goto done;
  525. }
  526. if (list_empty(&q->stream)) {
  527. if (noblock) {
  528. retval = -EAGAIN;
  529. dprintk(2, "next_buffer: no buffers to dequeue\n");
  530. goto done;
  531. } else {
  532. dprintk(2, "next_buffer: waiting on buffer\n");
  533. /* Drop lock to avoid deadlock with qbuf */
  534. mutex_unlock(&q->vb_lock);
  535. /* Checking list_empty and streaming is safe without
  536. * locks because we goto checks to validate while
  537. * holding locks before proceeding */
  538. retval = wait_event_interruptible(q->wait,
  539. !list_empty(&q->stream) || !q->streaming);
  540. mutex_lock(&q->vb_lock);
  541. if (retval)
  542. goto done;
  543. goto checks;
  544. }
  545. }
  546. retval = 0;
  547. done:
  548. return retval;
  549. }
  550. /* Locking: Caller holds q->vb_lock */
  551. static int stream_next_buffer(struct videobuf_queue *q,
  552. struct videobuf_buffer **vb, int nonblocking)
  553. {
  554. int retval;
  555. struct videobuf_buffer *buf = NULL;
  556. retval = stream_next_buffer_check_queue(q, nonblocking);
  557. if (retval)
  558. goto done;
  559. buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
  560. retval = videobuf_waiton(buf, nonblocking, 1);
  561. if (retval < 0)
  562. goto done;
  563. *vb = buf;
  564. done:
  565. return retval;
  566. }
  567. int videobuf_dqbuf(struct videobuf_queue *q,
  568. struct v4l2_buffer *b, int nonblocking)
  569. {
  570. struct videobuf_buffer *buf = NULL;
  571. int retval;
  572. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  573. memset(b, 0, sizeof(*b));
  574. mutex_lock(&q->vb_lock);
  575. retval = stream_next_buffer(q, &buf, nonblocking);
  576. if (retval < 0) {
  577. dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
  578. goto done;
  579. }
  580. switch (buf->state) {
  581. case VIDEOBUF_ERROR:
  582. dprintk(1, "dqbuf: state is error\n");
  583. break;
  584. case VIDEOBUF_DONE:
  585. dprintk(1, "dqbuf: state is done\n");
  586. break;
  587. default:
  588. dprintk(1, "dqbuf: state invalid\n");
  589. retval = -EINVAL;
  590. goto done;
  591. }
  592. CALL(q, sync, q, buf);
  593. videobuf_status(q, b, buf, q->type);
  594. list_del(&buf->stream);
  595. buf->state = VIDEOBUF_IDLE;
  596. b->flags &= ~V4L2_BUF_FLAG_DONE;
  597. done:
  598. mutex_unlock(&q->vb_lock);
  599. return retval;
  600. }
  601. EXPORT_SYMBOL_GPL(videobuf_dqbuf);
  602. int videobuf_streamon(struct videobuf_queue *q)
  603. {
  604. struct videobuf_buffer *buf;
  605. unsigned long flags = 0;
  606. int retval;
  607. mutex_lock(&q->vb_lock);
  608. retval = -EBUSY;
  609. if (q->reading)
  610. goto done;
  611. retval = 0;
  612. if (q->streaming)
  613. goto done;
  614. q->streaming = 1;
  615. spin_lock_irqsave(q->irqlock, flags);
  616. list_for_each_entry(buf, &q->stream, stream)
  617. if (buf->state == VIDEOBUF_PREPARED)
  618. q->ops->buf_queue(q, buf);
  619. spin_unlock_irqrestore(q->irqlock, flags);
  620. wake_up_interruptible_sync(&q->wait);
  621. done:
  622. mutex_unlock(&q->vb_lock);
  623. return retval;
  624. }
  625. EXPORT_SYMBOL_GPL(videobuf_streamon);
  626. /* Locking: Caller holds q->vb_lock */
  627. static int __videobuf_streamoff(struct videobuf_queue *q)
  628. {
  629. if (!q->streaming)
  630. return -EINVAL;
  631. videobuf_queue_cancel(q);
  632. return 0;
  633. }
  634. int videobuf_streamoff(struct videobuf_queue *q)
  635. {
  636. int retval;
  637. mutex_lock(&q->vb_lock);
  638. retval = __videobuf_streamoff(q);
  639. mutex_unlock(&q->vb_lock);
  640. return retval;
  641. }
  642. EXPORT_SYMBOL_GPL(videobuf_streamoff);
  643. /* Locking: Caller holds q->vb_lock */
  644. static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
  645. char __user *data,
  646. size_t count, loff_t *ppos)
  647. {
  648. enum v4l2_field field;
  649. unsigned long flags = 0;
  650. int retval;
  651. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  652. /* setup stuff */
  653. q->read_buf = videobuf_alloc(q);
  654. if (NULL == q->read_buf)
  655. return -ENOMEM;
  656. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  657. q->read_buf->baddr = (unsigned long)data;
  658. q->read_buf->bsize = count;
  659. field = videobuf_next_field(q);
  660. retval = q->ops->buf_prepare(q, q->read_buf, field);
  661. if (0 != retval)
  662. goto done;
  663. /* start capture & wait */
  664. spin_lock_irqsave(q->irqlock, flags);
  665. q->ops->buf_queue(q, q->read_buf);
  666. spin_unlock_irqrestore(q->irqlock, flags);
  667. retval = videobuf_waiton(q->read_buf, 0, 0);
  668. if (0 == retval) {
  669. CALL(q, sync, q, q->read_buf);
  670. if (VIDEOBUF_ERROR == q->read_buf->state)
  671. retval = -EIO;
  672. else
  673. retval = q->read_buf->size;
  674. }
  675. done:
  676. /* cleanup */
  677. q->ops->buf_release(q, q->read_buf);
  678. kfree(q->read_buf);
  679. q->read_buf = NULL;
  680. return retval;
  681. }
  682. static int __videobuf_copy_to_user(struct videobuf_queue *q,
  683. struct videobuf_buffer *buf,
  684. char __user *data, size_t count,
  685. int nonblocking)
  686. {
  687. void *vaddr = CALL(q, vaddr, buf);
  688. /* copy to userspace */
  689. if (count > buf->size - q->read_off)
  690. count = buf->size - q->read_off;
  691. if (copy_to_user(data, vaddr + q->read_off, count))
  692. return -EFAULT;
  693. return count;
  694. }
  695. static int __videobuf_copy_stream(struct videobuf_queue *q,
  696. struct videobuf_buffer *buf,
  697. char __user *data, size_t count, size_t pos,
  698. int vbihack, int nonblocking)
  699. {
  700. unsigned int *fc = CALL(q, vaddr, buf);
  701. if (vbihack) {
  702. /* dirty, undocumented hack -- pass the frame counter
  703. * within the last four bytes of each vbi data block.
  704. * We need that one to maintain backward compatibility
  705. * to all vbi decoding software out there ... */
  706. fc += (buf->size >> 2) - 1;
  707. *fc = buf->field_count >> 1;
  708. dprintk(1, "vbihack: %d\n", *fc);
  709. }
  710. /* copy stuff using the common method */
  711. count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
  712. if ((count == -EFAULT) && (pos == 0))
  713. return -EFAULT;
  714. return count;
  715. }
  716. ssize_t videobuf_read_one(struct videobuf_queue *q,
  717. char __user *data, size_t count, loff_t *ppos,
  718. int nonblocking)
  719. {
  720. enum v4l2_field field;
  721. unsigned long flags = 0;
  722. unsigned size = 0, nbufs = 1;
  723. int retval;
  724. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  725. mutex_lock(&q->vb_lock);
  726. q->ops->buf_setup(q, &nbufs, &size);
  727. if (NULL == q->read_buf &&
  728. count >= size &&
  729. !nonblocking) {
  730. retval = videobuf_read_zerocopy(q, data, count, ppos);
  731. if (retval >= 0 || retval == -EIO)
  732. /* ok, all done */
  733. goto done;
  734. /* fallback to kernel bounce buffer on failures */
  735. }
  736. if (NULL == q->read_buf) {
  737. /* need to capture a new frame */
  738. retval = -ENOMEM;
  739. q->read_buf = videobuf_alloc(q);
  740. dprintk(1, "video alloc=0x%p\n", q->read_buf);
  741. if (NULL == q->read_buf)
  742. goto done;
  743. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  744. q->read_buf->bsize = count; /* preferred size */
  745. field = videobuf_next_field(q);
  746. retval = q->ops->buf_prepare(q, q->read_buf, field);
  747. if (0 != retval) {
  748. kfree(q->read_buf);
  749. q->read_buf = NULL;
  750. goto done;
  751. }
  752. spin_lock_irqsave(q->irqlock, flags);
  753. q->ops->buf_queue(q, q->read_buf);
  754. spin_unlock_irqrestore(q->irqlock, flags);
  755. q->read_off = 0;
  756. }
  757. /* wait until capture is done */
  758. retval = videobuf_waiton(q->read_buf, nonblocking, 1);
  759. if (0 != retval)
  760. goto done;
  761. CALL(q, sync, q, q->read_buf);
  762. if (VIDEOBUF_ERROR == q->read_buf->state) {
  763. /* catch I/O errors */
  764. q->ops->buf_release(q, q->read_buf);
  765. kfree(q->read_buf);
  766. q->read_buf = NULL;
  767. retval = -EIO;
  768. goto done;
  769. }
  770. /* Copy to userspace */
  771. retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
  772. if (retval < 0)
  773. goto done;
  774. q->read_off += retval;
  775. if (q->read_off == q->read_buf->size) {
  776. /* all data copied, cleanup */
  777. q->ops->buf_release(q, q->read_buf);
  778. kfree(q->read_buf);
  779. q->read_buf = NULL;
  780. }
  781. done:
  782. mutex_unlock(&q->vb_lock);
  783. return retval;
  784. }
  785. EXPORT_SYMBOL_GPL(videobuf_read_one);
  786. /* Locking: Caller holds q->vb_lock */
  787. static int __videobuf_read_start(struct videobuf_queue *q)
  788. {
  789. enum v4l2_field field;
  790. unsigned long flags = 0;
  791. unsigned int count = 0, size = 0;
  792. int err, i;
  793. q->ops->buf_setup(q, &count, &size);
  794. if (count < 2)
  795. count = 2;
  796. if (count > VIDEO_MAX_FRAME)
  797. count = VIDEO_MAX_FRAME;
  798. size = PAGE_ALIGN(size);
  799. err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
  800. if (err < 0)
  801. return err;
  802. count = err;
  803. for (i = 0; i < count; i++) {
  804. field = videobuf_next_field(q);
  805. err = q->ops->buf_prepare(q, q->bufs[i], field);
  806. if (err)
  807. return err;
  808. list_add_tail(&q->bufs[i]->stream, &q->stream);
  809. }
  810. spin_lock_irqsave(q->irqlock, flags);
  811. for (i = 0; i < count; i++)
  812. q->ops->buf_queue(q, q->bufs[i]);
  813. spin_unlock_irqrestore(q->irqlock, flags);
  814. q->reading = 1;
  815. return 0;
  816. }
  817. static void __videobuf_read_stop(struct videobuf_queue *q)
  818. {
  819. int i;
  820. videobuf_queue_cancel(q);
  821. __videobuf_mmap_free(q);
  822. INIT_LIST_HEAD(&q->stream);
  823. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  824. if (NULL == q->bufs[i])
  825. continue;
  826. kfree(q->bufs[i]);
  827. q->bufs[i] = NULL;
  828. }
  829. q->read_buf = NULL;
  830. }
  831. int videobuf_read_start(struct videobuf_queue *q)
  832. {
  833. int rc;
  834. mutex_lock(&q->vb_lock);
  835. rc = __videobuf_read_start(q);
  836. mutex_unlock(&q->vb_lock);
  837. return rc;
  838. }
  839. EXPORT_SYMBOL_GPL(videobuf_read_start);
  840. void videobuf_read_stop(struct videobuf_queue *q)
  841. {
  842. mutex_lock(&q->vb_lock);
  843. __videobuf_read_stop(q);
  844. mutex_unlock(&q->vb_lock);
  845. }
  846. EXPORT_SYMBOL_GPL(videobuf_read_stop);
  847. void videobuf_stop(struct videobuf_queue *q)
  848. {
  849. mutex_lock(&q->vb_lock);
  850. if (q->streaming)
  851. __videobuf_streamoff(q);
  852. if (q->reading)
  853. __videobuf_read_stop(q);
  854. mutex_unlock(&q->vb_lock);
  855. }
  856. EXPORT_SYMBOL_GPL(videobuf_stop);
  857. ssize_t videobuf_read_stream(struct videobuf_queue *q,
  858. char __user *data, size_t count, loff_t *ppos,
  859. int vbihack, int nonblocking)
  860. {
  861. int rc, retval;
  862. unsigned long flags = 0;
  863. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  864. dprintk(2, "%s\n", __func__);
  865. mutex_lock(&q->vb_lock);
  866. retval = -EBUSY;
  867. if (q->streaming)
  868. goto done;
  869. if (!q->reading) {
  870. retval = __videobuf_read_start(q);
  871. if (retval < 0)
  872. goto done;
  873. }
  874. retval = 0;
  875. while (count > 0) {
  876. /* get / wait for data */
  877. if (NULL == q->read_buf) {
  878. q->read_buf = list_entry(q->stream.next,
  879. struct videobuf_buffer,
  880. stream);
  881. list_del(&q->read_buf->stream);
  882. q->read_off = 0;
  883. }
  884. rc = videobuf_waiton(q->read_buf, nonblocking, 1);
  885. if (rc < 0) {
  886. if (0 == retval)
  887. retval = rc;
  888. break;
  889. }
  890. if (q->read_buf->state == VIDEOBUF_DONE) {
  891. rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
  892. retval, vbihack, nonblocking);
  893. if (rc < 0) {
  894. retval = rc;
  895. break;
  896. }
  897. retval += rc;
  898. count -= rc;
  899. q->read_off += rc;
  900. } else {
  901. /* some error */
  902. q->read_off = q->read_buf->size;
  903. if (0 == retval)
  904. retval = -EIO;
  905. }
  906. /* requeue buffer when done with copying */
  907. if (q->read_off == q->read_buf->size) {
  908. list_add_tail(&q->read_buf->stream,
  909. &q->stream);
  910. spin_lock_irqsave(q->irqlock, flags);
  911. q->ops->buf_queue(q, q->read_buf);
  912. spin_unlock_irqrestore(q->irqlock, flags);
  913. q->read_buf = NULL;
  914. }
  915. if (retval < 0)
  916. break;
  917. }
  918. done:
  919. mutex_unlock(&q->vb_lock);
  920. return retval;
  921. }
  922. EXPORT_SYMBOL_GPL(videobuf_read_stream);
  923. unsigned int videobuf_poll_stream(struct file *file,
  924. struct videobuf_queue *q,
  925. poll_table *wait)
  926. {
  927. struct videobuf_buffer *buf = NULL;
  928. unsigned int rc = 0;
  929. mutex_lock(&q->vb_lock);
  930. if (q->streaming) {
  931. if (!list_empty(&q->stream))
  932. buf = list_entry(q->stream.next,
  933. struct videobuf_buffer, stream);
  934. } else {
  935. if (!q->reading)
  936. __videobuf_read_start(q);
  937. if (!q->reading) {
  938. rc = POLLERR;
  939. } else if (NULL == q->read_buf) {
  940. q->read_buf = list_entry(q->stream.next,
  941. struct videobuf_buffer,
  942. stream);
  943. list_del(&q->read_buf->stream);
  944. q->read_off = 0;
  945. }
  946. buf = q->read_buf;
  947. }
  948. if (!buf)
  949. rc = POLLERR;
  950. if (0 == rc) {
  951. poll_wait(file, &buf->done, wait);
  952. if (buf->state == VIDEOBUF_DONE ||
  953. buf->state == VIDEOBUF_ERROR) {
  954. switch (q->type) {
  955. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  956. case V4L2_BUF_TYPE_VBI_OUTPUT:
  957. case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
  958. rc = POLLOUT | POLLWRNORM;
  959. break;
  960. default:
  961. rc = POLLIN | POLLRDNORM;
  962. break;
  963. }
  964. }
  965. }
  966. mutex_unlock(&q->vb_lock);
  967. return rc;
  968. }
  969. EXPORT_SYMBOL_GPL(videobuf_poll_stream);
  970. int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
  971. {
  972. int rc = -EINVAL;
  973. int i;
  974. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  975. if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
  976. dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
  977. return -EINVAL;
  978. }
  979. mutex_lock(&q->vb_lock);
  980. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  981. struct videobuf_buffer *buf = q->bufs[i];
  982. if (buf && buf->memory == V4L2_MEMORY_MMAP &&
  983. buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
  984. rc = CALL(q, mmap_mapper, q, buf, vma);
  985. break;
  986. }
  987. }
  988. mutex_unlock(&q->vb_lock);
  989. return rc;
  990. }
  991. EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
  992. #ifdef CONFIG_VIDEO_V4L1_COMPAT
  993. int videobuf_cgmbuf(struct videobuf_queue *q,
  994. struct video_mbuf *mbuf, int count)
  995. {
  996. struct v4l2_requestbuffers req;
  997. int rc, i;
  998. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  999. memset(&req, 0, sizeof(req));
  1000. req.type = q->type;
  1001. req.count = count;
  1002. req.memory = V4L2_MEMORY_MMAP;
  1003. rc = videobuf_reqbufs(q, &req);
  1004. if (rc < 0)
  1005. return rc;
  1006. mbuf->frames = req.count;
  1007. mbuf->size = 0;
  1008. for (i = 0; i < mbuf->frames; i++) {
  1009. mbuf->offsets[i] = q->bufs[i]->boff;
  1010. mbuf->size += PAGE_ALIGN(q->bufs[i]->bsize);
  1011. }
  1012. return 0;
  1013. }
  1014. EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
  1015. #endif