/drivers/media/video/omap3isp/ispvideo.c

https://bitbucket.org/ndreys/linux-sunxi · C · 1335 lines · 894 code · 235 blank · 206 comment · 184 complexity · 37cbfc13a0e8ad43c32c6192811eda54 MD5 · raw file

  1. /*
  2. * ispvideo.c
  3. *
  4. * TI OMAP3 ISP - Generic video node
  5. *
  6. * Copyright (C) 2009-2010 Nokia Corporation
  7. *
  8. * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  9. * Sakari Ailus <sakari.ailus@iki.fi>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <asm/cacheflush.h>
  26. #include <linux/clk.h>
  27. #include <linux/mm.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/sched.h>
  31. #include <linux/slab.h>
  32. #include <linux/vmalloc.h>
  33. #include <media/v4l2-dev.h>
  34. #include <media/v4l2-ioctl.h>
  35. #include <plat/iommu.h>
  36. #include <plat/iovmm.h>
  37. #include <plat/omap-pm.h>
  38. #include "ispvideo.h"
  39. #include "isp.h"
  40. /* -----------------------------------------------------------------------------
  41. * Helper functions
  42. */
  43. static struct isp_format_info formats[] = {
  44. { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
  45. V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
  46. V4L2_PIX_FMT_GREY, 8, },
  47. { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
  48. V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
  49. V4L2_PIX_FMT_Y10, 10, },
  50. { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
  51. V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
  52. V4L2_PIX_FMT_Y12, 12, },
  53. { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
  54. V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
  55. V4L2_PIX_FMT_SBGGR8, 8, },
  56. { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
  57. V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
  58. V4L2_PIX_FMT_SGBRG8, 8, },
  59. { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
  60. V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
  61. V4L2_PIX_FMT_SGRBG8, 8, },
  62. { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
  63. V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
  64. V4L2_PIX_FMT_SRGGB8, 8, },
  65. { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
  66. V4L2_MBUS_FMT_SGRBG10_1X10, 0,
  67. V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
  68. { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
  69. V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
  70. V4L2_PIX_FMT_SBGGR10, 10, },
  71. { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
  72. V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
  73. V4L2_PIX_FMT_SGBRG10, 10, },
  74. { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
  75. V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
  76. V4L2_PIX_FMT_SGRBG10, 10, },
  77. { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
  78. V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
  79. V4L2_PIX_FMT_SRGGB10, 10, },
  80. { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
  81. V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
  82. V4L2_PIX_FMT_SBGGR12, 12, },
  83. { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
  84. V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
  85. V4L2_PIX_FMT_SGBRG12, 12, },
  86. { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
  87. V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
  88. V4L2_PIX_FMT_SGRBG12, 12, },
  89. { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
  90. V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
  91. V4L2_PIX_FMT_SRGGB12, 12, },
  92. { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
  93. V4L2_MBUS_FMT_UYVY8_1X16, 0,
  94. V4L2_PIX_FMT_UYVY, 16, },
  95. { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
  96. V4L2_MBUS_FMT_YUYV8_1X16, 0,
  97. V4L2_PIX_FMT_YUYV, 16, },
  98. };
  99. const struct isp_format_info *
  100. omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
  101. {
  102. unsigned int i;
  103. for (i = 0; i < ARRAY_SIZE(formats); ++i) {
  104. if (formats[i].code == code)
  105. return &formats[i];
  106. }
  107. return NULL;
  108. }
  109. /*
  110. * Decide whether desired output pixel code can be obtained with
  111. * the lane shifter by shifting the input pixel code.
  112. * @in: input pixelcode to shifter
  113. * @out: output pixelcode from shifter
  114. * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
  115. *
  116. * return true if the combination is possible
  117. * return false otherwise
  118. */
  119. static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
  120. enum v4l2_mbus_pixelcode out,
  121. unsigned int additional_shift)
  122. {
  123. const struct isp_format_info *in_info, *out_info;
  124. if (in == out)
  125. return true;
  126. in_info = omap3isp_video_format_info(in);
  127. out_info = omap3isp_video_format_info(out);
  128. if ((in_info->flavor == 0) || (out_info->flavor == 0))
  129. return false;
  130. if (in_info->flavor != out_info->flavor)
  131. return false;
  132. return in_info->bpp - out_info->bpp + additional_shift <= 6;
  133. }
  134. /*
  135. * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
  136. * @video: ISP video instance
  137. * @mbus: v4l2_mbus_framefmt format (input)
  138. * @pix: v4l2_pix_format format (output)
  139. *
  140. * Fill the output pix structure with information from the input mbus format.
  141. * The bytesperline and sizeimage fields are computed from the requested bytes
  142. * per line value in the pix format and information from the video instance.
  143. *
  144. * Return the number of padding bytes at end of line.
  145. */
  146. static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
  147. const struct v4l2_mbus_framefmt *mbus,
  148. struct v4l2_pix_format *pix)
  149. {
  150. unsigned int bpl = pix->bytesperline;
  151. unsigned int min_bpl;
  152. unsigned int i;
  153. memset(pix, 0, sizeof(*pix));
  154. pix->width = mbus->width;
  155. pix->height = mbus->height;
  156. for (i = 0; i < ARRAY_SIZE(formats); ++i) {
  157. if (formats[i].code == mbus->code)
  158. break;
  159. }
  160. if (WARN_ON(i == ARRAY_SIZE(formats)))
  161. return 0;
  162. min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
  163. /* Clamp the requested bytes per line value. If the maximum bytes per
  164. * line value is zero, the module doesn't support user configurable line
  165. * sizes. Override the requested value with the minimum in that case.
  166. */
  167. if (video->bpl_max)
  168. bpl = clamp(bpl, min_bpl, video->bpl_max);
  169. else
  170. bpl = min_bpl;
  171. if (!video->bpl_zero_padding || bpl != min_bpl)
  172. bpl = ALIGN(bpl, video->bpl_alignment);
  173. pix->pixelformat = formats[i].pixelformat;
  174. pix->bytesperline = bpl;
  175. pix->sizeimage = pix->bytesperline * pix->height;
  176. pix->colorspace = mbus->colorspace;
  177. pix->field = mbus->field;
  178. return bpl - min_bpl;
  179. }
  180. static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
  181. struct v4l2_mbus_framefmt *mbus)
  182. {
  183. unsigned int i;
  184. memset(mbus, 0, sizeof(*mbus));
  185. mbus->width = pix->width;
  186. mbus->height = pix->height;
  187. for (i = 0; i < ARRAY_SIZE(formats); ++i) {
  188. if (formats[i].pixelformat == pix->pixelformat)
  189. break;
  190. }
  191. if (WARN_ON(i == ARRAY_SIZE(formats)))
  192. return;
  193. mbus->code = formats[i].code;
  194. mbus->colorspace = pix->colorspace;
  195. mbus->field = pix->field;
  196. }
  197. static struct v4l2_subdev *
  198. isp_video_remote_subdev(struct isp_video *video, u32 *pad)
  199. {
  200. struct media_pad *remote;
  201. remote = media_entity_remote_source(&video->pad);
  202. if (remote == NULL ||
  203. media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
  204. return NULL;
  205. if (pad)
  206. *pad = remote->index;
  207. return media_entity_to_v4l2_subdev(remote->entity);
  208. }
  209. /* Return a pointer to the ISP video instance at the far end of the pipeline. */
  210. static struct isp_video *
  211. isp_video_far_end(struct isp_video *video)
  212. {
  213. struct media_entity_graph graph;
  214. struct media_entity *entity = &video->video.entity;
  215. struct media_device *mdev = entity->parent;
  216. struct isp_video *far_end = NULL;
  217. mutex_lock(&mdev->graph_mutex);
  218. media_entity_graph_walk_start(&graph, entity);
  219. while ((entity = media_entity_graph_walk_next(&graph))) {
  220. if (entity == &video->video.entity)
  221. continue;
  222. if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
  223. continue;
  224. far_end = to_isp_video(media_entity_to_video_device(entity));
  225. if (far_end->type != video->type)
  226. break;
  227. far_end = NULL;
  228. }
  229. mutex_unlock(&mdev->graph_mutex);
  230. return far_end;
  231. }
  232. /*
  233. * Validate a pipeline by checking both ends of all links for format
  234. * discrepancies.
  235. *
  236. * Compute the minimum time per frame value as the maximum of time per frame
  237. * limits reported by every block in the pipeline.
  238. *
  239. * Return 0 if all formats match, or -EPIPE if at least one link is found with
  240. * different formats on its two ends.
  241. */
  242. static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
  243. {
  244. struct isp_device *isp = pipe->output->isp;
  245. struct v4l2_subdev_format fmt_source;
  246. struct v4l2_subdev_format fmt_sink;
  247. struct media_pad *pad;
  248. struct v4l2_subdev *subdev;
  249. int ret;
  250. pipe->max_rate = pipe->l3_ick;
  251. subdev = isp_video_remote_subdev(pipe->output, NULL);
  252. if (subdev == NULL)
  253. return -EPIPE;
  254. while (1) {
  255. unsigned int shifter_link;
  256. /* Retrieve the sink format */
  257. pad = &subdev->entity.pads[0];
  258. if (!(pad->flags & MEDIA_PAD_FL_SINK))
  259. break;
  260. fmt_sink.pad = pad->index;
  261. fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  262. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
  263. if (ret < 0 && ret != -ENOIOCTLCMD)
  264. return -EPIPE;
  265. /* Update the maximum frame rate */
  266. if (subdev == &isp->isp_res.subdev)
  267. omap3isp_resizer_max_rate(&isp->isp_res,
  268. &pipe->max_rate);
  269. /* Check ccdc maximum data rate when data comes from sensor
  270. * TODO: Include ccdc rate in pipe->max_rate and compare the
  271. * total pipe rate with the input data rate from sensor.
  272. */
  273. if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
  274. unsigned int rate = UINT_MAX;
  275. omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
  276. if (isp->isp_ccdc.vpcfg.pixelclk > rate)
  277. return -ENOSPC;
  278. }
  279. /* If sink pad is on CCDC, the link has the lane shifter
  280. * in the middle of it. */
  281. shifter_link = subdev == &isp->isp_ccdc.subdev;
  282. /* Retrieve the source format */
  283. pad = media_entity_remote_source(pad);
  284. if (pad == NULL ||
  285. media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
  286. break;
  287. subdev = media_entity_to_v4l2_subdev(pad->entity);
  288. fmt_source.pad = pad->index;
  289. fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  290. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
  291. if (ret < 0 && ret != -ENOIOCTLCMD)
  292. return -EPIPE;
  293. /* Check if the two ends match */
  294. if (fmt_source.format.width != fmt_sink.format.width ||
  295. fmt_source.format.height != fmt_sink.format.height)
  296. return -EPIPE;
  297. if (shifter_link) {
  298. unsigned int parallel_shift = 0;
  299. if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
  300. struct isp_parallel_platform_data *pdata =
  301. &((struct isp_v4l2_subdevs_group *)
  302. subdev->host_priv)->bus.parallel;
  303. parallel_shift = pdata->data_lane_shift * 2;
  304. }
  305. if (!isp_video_is_shiftable(fmt_source.format.code,
  306. fmt_sink.format.code,
  307. parallel_shift))
  308. return -EPIPE;
  309. } else if (fmt_source.format.code != fmt_sink.format.code)
  310. return -EPIPE;
  311. }
  312. return 0;
  313. }
  314. static int
  315. __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
  316. {
  317. struct v4l2_subdev_format fmt;
  318. struct v4l2_subdev *subdev;
  319. u32 pad;
  320. int ret;
  321. subdev = isp_video_remote_subdev(video, &pad);
  322. if (subdev == NULL)
  323. return -EINVAL;
  324. mutex_lock(&video->mutex);
  325. fmt.pad = pad;
  326. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  327. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  328. if (ret == -ENOIOCTLCMD)
  329. ret = -EINVAL;
  330. mutex_unlock(&video->mutex);
  331. if (ret)
  332. return ret;
  333. format->type = video->type;
  334. return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
  335. }
  336. static int
  337. isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
  338. {
  339. struct v4l2_format format;
  340. int ret;
  341. memcpy(&format, &vfh->format, sizeof(format));
  342. ret = __isp_video_get_format(video, &format);
  343. if (ret < 0)
  344. return ret;
  345. if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
  346. vfh->format.fmt.pix.height != format.fmt.pix.height ||
  347. vfh->format.fmt.pix.width != format.fmt.pix.width ||
  348. vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
  349. vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
  350. return -EINVAL;
  351. return ret;
  352. }
  353. /* -----------------------------------------------------------------------------
  354. * IOMMU management
  355. */
  356. #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
  357. /*
  358. * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
  359. * @dev: Device pointer specific to the OMAP3 ISP.
  360. * @sglist: Pointer to source Scatter gather list to allocate.
  361. * @sglen: Number of elements of the scatter-gatter list.
  362. *
  363. * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
  364. * we ran out of memory.
  365. */
  366. static dma_addr_t
  367. ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
  368. {
  369. struct sg_table *sgt;
  370. u32 da;
  371. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  372. if (sgt == NULL)
  373. return -ENOMEM;
  374. sgt->sgl = (struct scatterlist *)sglist;
  375. sgt->nents = sglen;
  376. sgt->orig_nents = sglen;
  377. da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG);
  378. if (IS_ERR_VALUE(da))
  379. kfree(sgt);
  380. return da;
  381. }
  382. /*
  383. * ispmmu_vunmap - Unmap a device address from the ISP MMU
  384. * @dev: Device pointer specific to the OMAP3 ISP.
  385. * @da: Device address generated from a ispmmu_vmap call.
  386. */
  387. static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
  388. {
  389. struct sg_table *sgt;
  390. sgt = iommu_vunmap(isp->iommu, (u32)da);
  391. kfree(sgt);
  392. }
  393. /* -----------------------------------------------------------------------------
  394. * Video queue operations
  395. */
  396. static void isp_video_queue_prepare(struct isp_video_queue *queue,
  397. unsigned int *nbuffers, unsigned int *size)
  398. {
  399. struct isp_video_fh *vfh =
  400. container_of(queue, struct isp_video_fh, queue);
  401. struct isp_video *video = vfh->video;
  402. *size = vfh->format.fmt.pix.sizeimage;
  403. if (*size == 0)
  404. return;
  405. *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
  406. }
  407. static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
  408. {
  409. struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
  410. struct isp_buffer *buffer = to_isp_buffer(buf);
  411. struct isp_video *video = vfh->video;
  412. if (buffer->isp_addr) {
  413. ispmmu_vunmap(video->isp, buffer->isp_addr);
  414. buffer->isp_addr = 0;
  415. }
  416. }
  417. static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
  418. {
  419. struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
  420. struct isp_buffer *buffer = to_isp_buffer(buf);
  421. struct isp_video *video = vfh->video;
  422. unsigned long addr;
  423. addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
  424. if (IS_ERR_VALUE(addr))
  425. return -EIO;
  426. if (!IS_ALIGNED(addr, 32)) {
  427. dev_dbg(video->isp->dev, "Buffer address must be "
  428. "aligned to 32 bytes boundary.\n");
  429. ispmmu_vunmap(video->isp, buffer->isp_addr);
  430. return -EINVAL;
  431. }
  432. buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
  433. buffer->isp_addr = addr;
  434. return 0;
  435. }
  436. /*
  437. * isp_video_buffer_queue - Add buffer to streaming queue
  438. * @buf: Video buffer
  439. *
  440. * In memory-to-memory mode, start streaming on the pipeline if buffers are
  441. * queued on both the input and the output, if the pipeline isn't already busy.
  442. * If the pipeline is busy, it will be restarted in the output module interrupt
  443. * handler.
  444. */
  445. static void isp_video_buffer_queue(struct isp_video_buffer *buf)
  446. {
  447. struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
  448. struct isp_buffer *buffer = to_isp_buffer(buf);
  449. struct isp_video *video = vfh->video;
  450. struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
  451. enum isp_pipeline_state state;
  452. unsigned long flags;
  453. unsigned int empty;
  454. unsigned int start;
  455. empty = list_empty(&video->dmaqueue);
  456. list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
  457. if (empty) {
  458. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  459. state = ISP_PIPELINE_QUEUE_OUTPUT;
  460. else
  461. state = ISP_PIPELINE_QUEUE_INPUT;
  462. spin_lock_irqsave(&pipe->lock, flags);
  463. pipe->state |= state;
  464. video->ops->queue(video, buffer);
  465. video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
  466. start = isp_pipeline_ready(pipe);
  467. if (start)
  468. pipe->state |= ISP_PIPELINE_STREAM;
  469. spin_unlock_irqrestore(&pipe->lock, flags);
  470. if (start)
  471. omap3isp_pipeline_set_stream(pipe,
  472. ISP_PIPELINE_STREAM_SINGLESHOT);
  473. }
  474. }
  475. static const struct isp_video_queue_operations isp_video_queue_ops = {
  476. .queue_prepare = &isp_video_queue_prepare,
  477. .buffer_prepare = &isp_video_buffer_prepare,
  478. .buffer_queue = &isp_video_buffer_queue,
  479. .buffer_cleanup = &isp_video_buffer_cleanup,
  480. };
  481. /*
  482. * omap3isp_video_buffer_next - Complete the current buffer and return the next
  483. * @video: ISP video object
  484. * @error: Whether an error occurred during capture
  485. *
  486. * Remove the current video buffer from the DMA queue and fill its timestamp,
  487. * field count and state fields before waking up its completion handler.
  488. *
  489. * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0)
  490. * or VIDEOBUF_ERROR otherwise (@error is non-zero).
  491. *
  492. * The DMA queue is expected to contain at least one buffer.
  493. *
  494. * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
  495. * empty.
  496. */
  497. struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
  498. unsigned int error)
  499. {
  500. struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
  501. struct isp_video_queue *queue = video->queue;
  502. enum isp_pipeline_state state;
  503. struct isp_video_buffer *buf;
  504. unsigned long flags;
  505. struct timespec ts;
  506. spin_lock_irqsave(&queue->irqlock, flags);
  507. if (WARN_ON(list_empty(&video->dmaqueue))) {
  508. spin_unlock_irqrestore(&queue->irqlock, flags);
  509. return NULL;
  510. }
  511. buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
  512. irqlist);
  513. list_del(&buf->irqlist);
  514. spin_unlock_irqrestore(&queue->irqlock, flags);
  515. ktime_get_ts(&ts);
  516. buf->vbuf.timestamp.tv_sec = ts.tv_sec;
  517. buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
  518. /* Do frame number propagation only if this is the output video node.
  519. * Frame number either comes from the CSI receivers or it gets
  520. * incremented here if H3A is not active.
  521. * Note: There is no guarantee that the output buffer will finish
  522. * first, so the input number might lag behind by 1 in some cases.
  523. */
  524. if (video == pipe->output && !pipe->do_propagation)
  525. buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
  526. else
  527. buf->vbuf.sequence = atomic_read(&pipe->frame_number);
  528. buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
  529. wake_up(&buf->wait);
  530. if (list_empty(&video->dmaqueue)) {
  531. if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  532. state = ISP_PIPELINE_QUEUE_OUTPUT
  533. | ISP_PIPELINE_STREAM;
  534. else
  535. state = ISP_PIPELINE_QUEUE_INPUT
  536. | ISP_PIPELINE_STREAM;
  537. spin_lock_irqsave(&pipe->lock, flags);
  538. pipe->state &= ~state;
  539. if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
  540. video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
  541. spin_unlock_irqrestore(&pipe->lock, flags);
  542. return NULL;
  543. }
  544. if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
  545. spin_lock_irqsave(&pipe->lock, flags);
  546. pipe->state &= ~ISP_PIPELINE_STREAM;
  547. spin_unlock_irqrestore(&pipe->lock, flags);
  548. }
  549. buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
  550. irqlist);
  551. buf->state = ISP_BUF_STATE_ACTIVE;
  552. return to_isp_buffer(buf);
  553. }
  554. /*
  555. * omap3isp_video_resume - Perform resume operation on the buffers
  556. * @video: ISP video object
  557. * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
  558. *
  559. * This function is intended to be used on suspend/resume scenario. It
  560. * requests video queue layer to discard buffers marked as DONE if it's in
  561. * continuous mode and requests ISP modules to queue again the ACTIVE buffer
  562. * if there's any.
  563. */
  564. void omap3isp_video_resume(struct isp_video *video, int continuous)
  565. {
  566. struct isp_buffer *buf = NULL;
  567. if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  568. omap3isp_video_queue_discard_done(video->queue);
  569. if (!list_empty(&video->dmaqueue)) {
  570. buf = list_first_entry(&video->dmaqueue,
  571. struct isp_buffer, buffer.irqlist);
  572. video->ops->queue(video, buf);
  573. video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
  574. } else {
  575. if (continuous)
  576. video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
  577. }
  578. }
  579. /* -----------------------------------------------------------------------------
  580. * V4L2 ioctls
  581. */
  582. static int
  583. isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  584. {
  585. struct isp_video *video = video_drvdata(file);
  586. strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
  587. strlcpy(cap->card, video->video.name, sizeof(cap->card));
  588. strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
  589. cap->version = ISP_VIDEO_DRIVER_VERSION;
  590. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  591. cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
  592. else
  593. cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
  594. return 0;
  595. }
  596. static int
  597. isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
  598. {
  599. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  600. struct isp_video *video = video_drvdata(file);
  601. if (format->type != video->type)
  602. return -EINVAL;
  603. mutex_lock(&video->mutex);
  604. *format = vfh->format;
  605. mutex_unlock(&video->mutex);
  606. return 0;
  607. }
  608. static int
  609. isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
  610. {
  611. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  612. struct isp_video *video = video_drvdata(file);
  613. struct v4l2_mbus_framefmt fmt;
  614. if (format->type != video->type)
  615. return -EINVAL;
  616. mutex_lock(&video->mutex);
  617. /* Fill the bytesperline and sizeimage fields by converting to media bus
  618. * format and back to pixel format.
  619. */
  620. isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
  621. isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
  622. vfh->format = *format;
  623. mutex_unlock(&video->mutex);
  624. return 0;
  625. }
  626. static int
  627. isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
  628. {
  629. struct isp_video *video = video_drvdata(file);
  630. struct v4l2_subdev_format fmt;
  631. struct v4l2_subdev *subdev;
  632. u32 pad;
  633. int ret;
  634. if (format->type != video->type)
  635. return -EINVAL;
  636. subdev = isp_video_remote_subdev(video, &pad);
  637. if (subdev == NULL)
  638. return -EINVAL;
  639. isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
  640. fmt.pad = pad;
  641. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  642. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  643. if (ret)
  644. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  645. isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
  646. return 0;
  647. }
  648. static int
  649. isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
  650. {
  651. struct isp_video *video = video_drvdata(file);
  652. struct v4l2_subdev *subdev;
  653. int ret;
  654. subdev = isp_video_remote_subdev(video, NULL);
  655. if (subdev == NULL)
  656. return -EINVAL;
  657. mutex_lock(&video->mutex);
  658. ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
  659. mutex_unlock(&video->mutex);
  660. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  661. }
  662. static int
  663. isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
  664. {
  665. struct isp_video *video = video_drvdata(file);
  666. struct v4l2_subdev_format format;
  667. struct v4l2_subdev *subdev;
  668. u32 pad;
  669. int ret;
  670. subdev = isp_video_remote_subdev(video, &pad);
  671. if (subdev == NULL)
  672. return -EINVAL;
  673. /* Try the get crop operation first and fallback to get format if not
  674. * implemented.
  675. */
  676. ret = v4l2_subdev_call(subdev, video, g_crop, crop);
  677. if (ret != -ENOIOCTLCMD)
  678. return ret;
  679. format.pad = pad;
  680. format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  681. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
  682. if (ret < 0)
  683. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  684. crop->c.left = 0;
  685. crop->c.top = 0;
  686. crop->c.width = format.format.width;
  687. crop->c.height = format.format.height;
  688. return 0;
  689. }
  690. static int
  691. isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
  692. {
  693. struct isp_video *video = video_drvdata(file);
  694. struct v4l2_subdev *subdev;
  695. int ret;
  696. subdev = isp_video_remote_subdev(video, NULL);
  697. if (subdev == NULL)
  698. return -EINVAL;
  699. mutex_lock(&video->mutex);
  700. ret = v4l2_subdev_call(subdev, video, s_crop, crop);
  701. mutex_unlock(&video->mutex);
  702. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  703. }
  704. static int
  705. isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
  706. {
  707. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  708. struct isp_video *video = video_drvdata(file);
  709. if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
  710. video->type != a->type)
  711. return -EINVAL;
  712. memset(a, 0, sizeof(*a));
  713. a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
  714. a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
  715. a->parm.output.timeperframe = vfh->timeperframe;
  716. return 0;
  717. }
  718. static int
  719. isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
  720. {
  721. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  722. struct isp_video *video = video_drvdata(file);
  723. if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
  724. video->type != a->type)
  725. return -EINVAL;
  726. if (a->parm.output.timeperframe.denominator == 0)
  727. a->parm.output.timeperframe.denominator = 1;
  728. vfh->timeperframe = a->parm.output.timeperframe;
  729. return 0;
  730. }
  731. static int
  732. isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
  733. {
  734. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  735. return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
  736. }
  737. static int
  738. isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
  739. {
  740. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  741. return omap3isp_video_queue_querybuf(&vfh->queue, b);
  742. }
  743. static int
  744. isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
  745. {
  746. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  747. return omap3isp_video_queue_qbuf(&vfh->queue, b);
  748. }
  749. static int
  750. isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
  751. {
  752. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  753. return omap3isp_video_queue_dqbuf(&vfh->queue, b,
  754. file->f_flags & O_NONBLOCK);
  755. }
  756. /*
  757. * Stream management
  758. *
  759. * Every ISP pipeline has a single input and a single output. The input can be
  760. * either a sensor or a video node. The output is always a video node.
  761. *
  762. * As every pipeline has an output video node, the ISP video objects at the
  763. * pipeline output stores the pipeline state. It tracks the streaming state of
  764. * both the input and output, as well as the availability of buffers.
  765. *
  766. * In sensor-to-memory mode, frames are always available at the pipeline input.
  767. * Starting the sensor usually requires I2C transfers and must be done in
  768. * interruptible context. The pipeline is started and stopped synchronously
  769. * to the stream on/off commands. All modules in the pipeline will get their
  770. * subdev set stream handler called. The module at the end of the pipeline must
  771. * delay starting the hardware until buffers are available at its output.
  772. *
  773. * In memory-to-memory mode, starting/stopping the stream requires
  774. * synchronization between the input and output. ISP modules can't be stopped
  775. * in the middle of a frame, and at least some of the modules seem to become
  776. * busy as soon as they're started, even if they don't receive a frame start
  777. * event. For that reason frames need to be processed in single-shot mode. The
  778. * driver needs to wait until a frame is completely processed and written to
  779. * memory before restarting the pipeline for the next frame. Pipelined
  780. * processing might be possible but requires more testing.
  781. *
  782. * Stream start must be delayed until buffers are available at both the input
  783. * and output. The pipeline must be started in the videobuf queue callback with
  784. * the buffers queue spinlock held. The modules subdev set stream operation must
  785. * not sleep.
  786. */
  787. static int
  788. isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
  789. {
  790. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  791. struct isp_video *video = video_drvdata(file);
  792. enum isp_pipeline_state state;
  793. struct isp_pipeline *pipe;
  794. struct isp_video *far_end;
  795. unsigned long flags;
  796. int ret;
  797. if (type != video->type)
  798. return -EINVAL;
  799. mutex_lock(&video->stream_lock);
  800. if (video->streaming) {
  801. mutex_unlock(&video->stream_lock);
  802. return -EBUSY;
  803. }
  804. /* Start streaming on the pipeline. No link touching an entity in the
  805. * pipeline can be activated or deactivated once streaming is started.
  806. */
  807. pipe = video->video.entity.pipe
  808. ? to_isp_pipeline(&video->video.entity) : &video->pipe;
  809. media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
  810. /* Verify that the currently configured format matches the output of
  811. * the connected subdev.
  812. */
  813. ret = isp_video_check_format(video, vfh);
  814. if (ret < 0)
  815. goto error;
  816. video->bpl_padding = ret;
  817. video->bpl_value = vfh->format.fmt.pix.bytesperline;
  818. /* Find the ISP video node connected at the far end of the pipeline and
  819. * update the pipeline.
  820. */
  821. far_end = isp_video_far_end(video);
  822. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  823. state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
  824. pipe->input = far_end;
  825. pipe->output = video;
  826. } else {
  827. if (far_end == NULL) {
  828. ret = -EPIPE;
  829. goto error;
  830. }
  831. state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
  832. pipe->input = video;
  833. pipe->output = far_end;
  834. }
  835. if (video->isp->pdata->set_constraints)
  836. video->isp->pdata->set_constraints(video->isp, true);
  837. pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
  838. /* Validate the pipeline and update its state. */
  839. ret = isp_video_validate_pipeline(pipe);
  840. if (ret < 0)
  841. goto error;
  842. spin_lock_irqsave(&pipe->lock, flags);
  843. pipe->state &= ~ISP_PIPELINE_STREAM;
  844. pipe->state |= state;
  845. spin_unlock_irqrestore(&pipe->lock, flags);
  846. /* Set the maximum time per frame as the value requested by userspace.
  847. * This is a soft limit that can be overridden if the hardware doesn't
  848. * support the request limit.
  849. */
  850. if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
  851. pipe->max_timeperframe = vfh->timeperframe;
  852. video->queue = &vfh->queue;
  853. INIT_LIST_HEAD(&video->dmaqueue);
  854. atomic_set(&pipe->frame_number, -1);
  855. ret = omap3isp_video_queue_streamon(&vfh->queue);
  856. if (ret < 0)
  857. goto error;
  858. /* In sensor-to-memory mode, the stream can be started synchronously
  859. * to the stream on command. In memory-to-memory mode, it will be
  860. * started when buffers are queued on both the input and output.
  861. */
  862. if (pipe->input == NULL) {
  863. ret = omap3isp_pipeline_set_stream(pipe,
  864. ISP_PIPELINE_STREAM_CONTINUOUS);
  865. if (ret < 0)
  866. goto error;
  867. spin_lock_irqsave(&video->queue->irqlock, flags);
  868. if (list_empty(&video->dmaqueue))
  869. video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
  870. spin_unlock_irqrestore(&video->queue->irqlock, flags);
  871. }
  872. error:
  873. if (ret < 0) {
  874. omap3isp_video_queue_streamoff(&vfh->queue);
  875. if (video->isp->pdata->set_constraints)
  876. video->isp->pdata->set_constraints(video->isp, false);
  877. media_entity_pipeline_stop(&video->video.entity);
  878. video->queue = NULL;
  879. }
  880. if (!ret)
  881. video->streaming = 1;
  882. mutex_unlock(&video->stream_lock);
  883. return ret;
  884. }
  885. static int
  886. isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
  887. {
  888. struct isp_video_fh *vfh = to_isp_video_fh(fh);
  889. struct isp_video *video = video_drvdata(file);
  890. struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
  891. enum isp_pipeline_state state;
  892. unsigned int streaming;
  893. unsigned long flags;
  894. if (type != video->type)
  895. return -EINVAL;
  896. mutex_lock(&video->stream_lock);
  897. /* Make sure we're not streaming yet. */
  898. mutex_lock(&vfh->queue.lock);
  899. streaming = vfh->queue.streaming;
  900. mutex_unlock(&vfh->queue.lock);
  901. if (!streaming)
  902. goto done;
  903. /* Update the pipeline state. */
  904. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  905. state = ISP_PIPELINE_STREAM_OUTPUT
  906. | ISP_PIPELINE_QUEUE_OUTPUT;
  907. else
  908. state = ISP_PIPELINE_STREAM_INPUT
  909. | ISP_PIPELINE_QUEUE_INPUT;
  910. spin_lock_irqsave(&pipe->lock, flags);
  911. pipe->state &= ~state;
  912. spin_unlock_irqrestore(&pipe->lock, flags);
  913. /* Stop the stream. */
  914. omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
  915. omap3isp_video_queue_streamoff(&vfh->queue);
  916. video->queue = NULL;
  917. video->streaming = 0;
  918. if (video->isp->pdata->set_constraints)
  919. video->isp->pdata->set_constraints(video->isp, false);
  920. media_entity_pipeline_stop(&video->video.entity);
  921. done:
  922. mutex_unlock(&video->stream_lock);
  923. return 0;
  924. }
  925. static int
  926. isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
  927. {
  928. if (input->index > 0)
  929. return -EINVAL;
  930. strlcpy(input->name, "camera", sizeof(input->name));
  931. input->type = V4L2_INPUT_TYPE_CAMERA;
  932. return 0;
  933. }
  934. static int
  935. isp_video_g_input(struct file *file, void *fh, unsigned int *input)
  936. {
  937. *input = 0;
  938. return 0;
  939. }
  940. static int
  941. isp_video_s_input(struct file *file, void *fh, unsigned int input)
  942. {
  943. return input == 0 ? 0 : -EINVAL;
  944. }
  945. static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
  946. .vidioc_querycap = isp_video_querycap,
  947. .vidioc_g_fmt_vid_cap = isp_video_get_format,
  948. .vidioc_s_fmt_vid_cap = isp_video_set_format,
  949. .vidioc_try_fmt_vid_cap = isp_video_try_format,
  950. .vidioc_g_fmt_vid_out = isp_video_get_format,
  951. .vidioc_s_fmt_vid_out = isp_video_set_format,
  952. .vidioc_try_fmt_vid_out = isp_video_try_format,
  953. .vidioc_cropcap = isp_video_cropcap,
  954. .vidioc_g_crop = isp_video_get_crop,
  955. .vidioc_s_crop = isp_video_set_crop,
  956. .vidioc_g_parm = isp_video_get_param,
  957. .vidioc_s_parm = isp_video_set_param,
  958. .vidioc_reqbufs = isp_video_reqbufs,
  959. .vidioc_querybuf = isp_video_querybuf,
  960. .vidioc_qbuf = isp_video_qbuf,
  961. .vidioc_dqbuf = isp_video_dqbuf,
  962. .vidioc_streamon = isp_video_streamon,
  963. .vidioc_streamoff = isp_video_streamoff,
  964. .vidioc_enum_input = isp_video_enum_input,
  965. .vidioc_g_input = isp_video_g_input,
  966. .vidioc_s_input = isp_video_s_input,
  967. };
  968. /* -----------------------------------------------------------------------------
  969. * V4L2 file operations
  970. */
  971. static int isp_video_open(struct file *file)
  972. {
  973. struct isp_video *video = video_drvdata(file);
  974. struct isp_video_fh *handle;
  975. int ret = 0;
  976. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  977. if (handle == NULL)
  978. return -ENOMEM;
  979. v4l2_fh_init(&handle->vfh, &video->video);
  980. v4l2_fh_add(&handle->vfh);
  981. /* If this is the first user, initialise the pipeline. */
  982. if (omap3isp_get(video->isp) == NULL) {
  983. ret = -EBUSY;
  984. goto done;
  985. }
  986. ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
  987. if (ret < 0) {
  988. omap3isp_put(video->isp);
  989. goto done;
  990. }
  991. omap3isp_video_queue_init(&handle->queue, video->type,
  992. &isp_video_queue_ops, video->isp->dev,
  993. sizeof(struct isp_buffer));
  994. memset(&handle->format, 0, sizeof(handle->format));
  995. handle->format.type = video->type;
  996. handle->timeperframe.denominator = 1;
  997. handle->video = video;
  998. file->private_data = &handle->vfh;
  999. done:
  1000. if (ret < 0) {
  1001. v4l2_fh_del(&handle->vfh);
  1002. kfree(handle);
  1003. }
  1004. return ret;
  1005. }
  1006. static int isp_video_release(struct file *file)
  1007. {
  1008. struct isp_video *video = video_drvdata(file);
  1009. struct v4l2_fh *vfh = file->private_data;
  1010. struct isp_video_fh *handle = to_isp_video_fh(vfh);
  1011. /* Disable streaming and free the buffers queue resources. */
  1012. isp_video_streamoff(file, vfh, video->type);
  1013. mutex_lock(&handle->queue.lock);
  1014. omap3isp_video_queue_cleanup(&handle->queue);
  1015. mutex_unlock(&handle->queue.lock);
  1016. omap3isp_pipeline_pm_use(&video->video.entity, 0);
  1017. /* Release the file handle. */
  1018. v4l2_fh_del(vfh);
  1019. kfree(handle);
  1020. file->private_data = NULL;
  1021. omap3isp_put(video->isp);
  1022. return 0;
  1023. }
  1024. static unsigned int isp_video_poll(struct file *file, poll_table *wait)
  1025. {
  1026. struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
  1027. struct isp_video_queue *queue = &vfh->queue;
  1028. return omap3isp_video_queue_poll(queue, file, wait);
  1029. }
  1030. static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
  1031. {
  1032. struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
  1033. return omap3isp_video_queue_mmap(&vfh->queue, vma);
  1034. }
  1035. static struct v4l2_file_operations isp_video_fops = {
  1036. .owner = THIS_MODULE,
  1037. .unlocked_ioctl = video_ioctl2,
  1038. .open = isp_video_open,
  1039. .release = isp_video_release,
  1040. .poll = isp_video_poll,
  1041. .mmap = isp_video_mmap,
  1042. };
  1043. /* -----------------------------------------------------------------------------
  1044. * ISP video core
  1045. */
  1046. static const struct isp_video_operations isp_video_dummy_ops = {
  1047. };
  1048. int omap3isp_video_init(struct isp_video *video, const char *name)
  1049. {
  1050. const char *direction;
  1051. int ret;
  1052. switch (video->type) {
  1053. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  1054. direction = "output";
  1055. video->pad.flags = MEDIA_PAD_FL_SINK;
  1056. break;
  1057. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  1058. direction = "input";
  1059. video->pad.flags = MEDIA_PAD_FL_SOURCE;
  1060. break;
  1061. default:
  1062. return -EINVAL;
  1063. }
  1064. ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
  1065. if (ret < 0)
  1066. return ret;
  1067. mutex_init(&video->mutex);
  1068. atomic_set(&video->active, 0);
  1069. spin_lock_init(&video->pipe.lock);
  1070. mutex_init(&video->stream_lock);
  1071. /* Initialize the video device. */
  1072. if (video->ops == NULL)
  1073. video->ops = &isp_video_dummy_ops;
  1074. video->video.fops = &isp_video_fops;
  1075. snprintf(video->video.name, sizeof(video->video.name),
  1076. "OMAP3 ISP %s %s", name, direction);
  1077. video->video.vfl_type = VFL_TYPE_GRABBER;
  1078. video->video.release = video_device_release_empty;
  1079. video->video.ioctl_ops = &isp_video_ioctl_ops;
  1080. video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
  1081. video_set_drvdata(&video->video, video);
  1082. return 0;
  1083. }
  1084. int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
  1085. {
  1086. int ret;
  1087. video->video.v4l2_dev = vdev;
  1088. ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
  1089. if (ret < 0)
  1090. printk(KERN_ERR "%s: could not register video device (%d)\n",
  1091. __func__, ret);
  1092. return ret;
  1093. }
  1094. void omap3isp_video_unregister(struct isp_video *video)
  1095. {
  1096. if (video_is_registered(&video->video)) {
  1097. media_entity_cleanup(&video->video.entity);
  1098. video_unregister_device(&video->video);
  1099. }
  1100. }