/drivers/media/video/omap3isp/ispvideo.c
C | 1335 lines | 894 code | 235 blank | 206 comment | 184 complexity | 37cbfc13a0e8ad43c32c6192811eda54 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1/* 2 * ispvideo.c 3 * 4 * TI OMAP3 ISP - Generic video node 5 * 6 * Copyright (C) 2009-2010 Nokia Corporation 7 * 8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 9 * Sakari Ailus <sakari.ailus@iki.fi> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26#include <asm/cacheflush.h> 27#include <linux/clk.h> 28#include <linux/mm.h> 29#include <linux/pagemap.h> 30#include <linux/scatterlist.h> 31#include <linux/sched.h> 32#include <linux/slab.h> 33#include <linux/vmalloc.h> 34#include <media/v4l2-dev.h> 35#include <media/v4l2-ioctl.h> 36#include <plat/iommu.h> 37#include <plat/iovmm.h> 38#include <plat/omap-pm.h> 39 40#include "ispvideo.h" 41#include "isp.h" 42 43 44/* ----------------------------------------------------------------------------- 45 * Helper functions 46 */ 47 48static struct isp_format_info formats[] = { 49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 50 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 51 V4L2_PIX_FMT_GREY, 8, }, 52 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, 53 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, 54 V4L2_PIX_FMT_Y10, 10, }, 55 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, 56 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, 57 V4L2_PIX_FMT_Y12, 12, }, 58 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 59 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 60 V4L2_PIX_FMT_SBGGR8, 8, }, 61 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 62 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 63 V4L2_PIX_FMT_SGBRG8, 8, }, 64 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 65 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 66 V4L2_PIX_FMT_SGRBG8, 8, }, 67 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 68 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 69 V4L2_PIX_FMT_SRGGB8, 8, }, 70 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 71 V4L2_MBUS_FMT_SGRBG10_1X10, 0, 72 V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, 73 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, 74 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, 75 V4L2_PIX_FMT_SBGGR10, 10, }, 76 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, 77 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, 78 V4L2_PIX_FMT_SGBRG10, 10, }, 79 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, 80 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, 81 V4L2_PIX_FMT_SGRBG10, 10, }, 82 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, 83 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, 84 V4L2_PIX_FMT_SRGGB10, 10, }, 85 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, 86 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, 87 V4L2_PIX_FMT_SBGGR12, 12, }, 88 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, 89 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, 90 V4L2_PIX_FMT_SGBRG12, 12, }, 91 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, 92 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, 93 V4L2_PIX_FMT_SGRBG12, 12, }, 94 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, 95 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, 96 V4L2_PIX_FMT_SRGGB12, 12, }, 97 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 98 V4L2_MBUS_FMT_UYVY8_1X16, 0, 99 V4L2_PIX_FMT_UYVY, 16, }, 100 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 101 V4L2_MBUS_FMT_YUYV8_1X16, 0, 102 V4L2_PIX_FMT_YUYV, 16, }, 103}; 104 105const struct isp_format_info * 106omap3isp_video_format_info(enum v4l2_mbus_pixelcode code) 107{ 108 unsigned int i; 109 110 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 111 if (formats[i].code == code) 112 return &formats[i]; 113 } 114 115 return NULL; 116} 117 118/* 119 * Decide whether desired output pixel code can be obtained with 120 * the lane shifter by shifting the input pixel code. 121 * @in: input pixelcode to shifter 122 * @out: output pixelcode from shifter 123 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] 124 * 125 * return true if the combination is possible 126 * return false otherwise 127 */ 128static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in, 129 enum v4l2_mbus_pixelcode out, 130 unsigned int additional_shift) 131{ 132 const struct isp_format_info *in_info, *out_info; 133 134 if (in == out) 135 return true; 136 137 in_info = omap3isp_video_format_info(in); 138 out_info = omap3isp_video_format_info(out); 139 140 if ((in_info->flavor == 0) || (out_info->flavor == 0)) 141 return false; 142 143 if (in_info->flavor != out_info->flavor) 144 return false; 145 146 return in_info->bpp - out_info->bpp + additional_shift <= 6; 147} 148 149/* 150 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 151 * @video: ISP video instance 152 * @mbus: v4l2_mbus_framefmt format (input) 153 * @pix: v4l2_pix_format format (output) 154 * 155 * Fill the output pix structure with information from the input mbus format. 156 * The bytesperline and sizeimage fields are computed from the requested bytes 157 * per line value in the pix format and information from the video instance. 158 * 159 * Return the number of padding bytes at end of line. 160 */ 161static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 162 const struct v4l2_mbus_framefmt *mbus, 163 struct v4l2_pix_format *pix) 164{ 165 unsigned int bpl = pix->bytesperline; 166 unsigned int min_bpl; 167 unsigned int i; 168 169 memset(pix, 0, sizeof(*pix)); 170 pix->width = mbus->width; 171 pix->height = mbus->height; 172 173 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 174 if (formats[i].code == mbus->code) 175 break; 176 } 177 178 if (WARN_ON(i == ARRAY_SIZE(formats))) 179 return 0; 180 181 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8; 182 183 /* Clamp the requested bytes per line value. If the maximum bytes per 184 * line value is zero, the module doesn't support user configurable line 185 * sizes. Override the requested value with the minimum in that case. 186 */ 187 if (video->bpl_max) 188 bpl = clamp(bpl, min_bpl, video->bpl_max); 189 else 190 bpl = min_bpl; 191 192 if (!video->bpl_zero_padding || bpl != min_bpl) 193 bpl = ALIGN(bpl, video->bpl_alignment); 194 195 pix->pixelformat = formats[i].pixelformat; 196 pix->bytesperline = bpl; 197 pix->sizeimage = pix->bytesperline * pix->height; 198 pix->colorspace = mbus->colorspace; 199 pix->field = mbus->field; 200 201 return bpl - min_bpl; 202} 203 204static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 205 struct v4l2_mbus_framefmt *mbus) 206{ 207 unsigned int i; 208 209 memset(mbus, 0, sizeof(*mbus)); 210 mbus->width = pix->width; 211 mbus->height = pix->height; 212 213 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 214 if (formats[i].pixelformat == pix->pixelformat) 215 break; 216 } 217 218 if (WARN_ON(i == ARRAY_SIZE(formats))) 219 return; 220 221 mbus->code = formats[i].code; 222 mbus->colorspace = pix->colorspace; 223 mbus->field = pix->field; 224} 225 226static struct v4l2_subdev * 227isp_video_remote_subdev(struct isp_video *video, u32 *pad) 228{ 229 struct media_pad *remote; 230 231 remote = media_entity_remote_source(&video->pad); 232 233 if (remote == NULL || 234 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 235 return NULL; 236 237 if (pad) 238 *pad = remote->index; 239 240 return media_entity_to_v4l2_subdev(remote->entity); 241} 242 243/* Return a pointer to the ISP video instance at the far end of the pipeline. */ 244static struct isp_video * 245isp_video_far_end(struct isp_video *video) 246{ 247 struct media_entity_graph graph; 248 struct media_entity *entity = &video->video.entity; 249 struct media_device *mdev = entity->parent; 250 struct isp_video *far_end = NULL; 251 252 mutex_lock(&mdev->graph_mutex); 253 media_entity_graph_walk_start(&graph, entity); 254 255 while ((entity = media_entity_graph_walk_next(&graph))) { 256 if (entity == &video->video.entity) 257 continue; 258 259 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE) 260 continue; 261 262 far_end = to_isp_video(media_entity_to_video_device(entity)); 263 if (far_end->type != video->type) 264 break; 265 266 far_end = NULL; 267 } 268 269 mutex_unlock(&mdev->graph_mutex); 270 return far_end; 271} 272 273/* 274 * Validate a pipeline by checking both ends of all links for format 275 * discrepancies. 276 * 277 * Compute the minimum time per frame value as the maximum of time per frame 278 * limits reported by every block in the pipeline. 279 * 280 * Return 0 if all formats match, or -EPIPE if at least one link is found with 281 * different formats on its two ends. 282 */ 283static int isp_video_validate_pipeline(struct isp_pipeline *pipe) 284{ 285 struct isp_device *isp = pipe->output->isp; 286 struct v4l2_subdev_format fmt_source; 287 struct v4l2_subdev_format fmt_sink; 288 struct media_pad *pad; 289 struct v4l2_subdev *subdev; 290 int ret; 291 292 pipe->max_rate = pipe->l3_ick; 293 294 subdev = isp_video_remote_subdev(pipe->output, NULL); 295 if (subdev == NULL) 296 return -EPIPE; 297 298 while (1) { 299 unsigned int shifter_link; 300 /* Retrieve the sink format */ 301 pad = &subdev->entity.pads[0]; 302 if (!(pad->flags & MEDIA_PAD_FL_SINK)) 303 break; 304 305 fmt_sink.pad = pad->index; 306 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE; 307 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink); 308 if (ret < 0 && ret != -ENOIOCTLCMD) 309 return -EPIPE; 310 311 /* Update the maximum frame rate */ 312 if (subdev == &isp->isp_res.subdev) 313 omap3isp_resizer_max_rate(&isp->isp_res, 314 &pipe->max_rate); 315 316 /* Check ccdc maximum data rate when data comes from sensor 317 * TODO: Include ccdc rate in pipe->max_rate and compare the 318 * total pipe rate with the input data rate from sensor. 319 */ 320 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) { 321 unsigned int rate = UINT_MAX; 322 323 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 324 if (isp->isp_ccdc.vpcfg.pixelclk > rate) 325 return -ENOSPC; 326 } 327 328 /* If sink pad is on CCDC, the link has the lane shifter 329 * in the middle of it. */ 330 shifter_link = subdev == &isp->isp_ccdc.subdev; 331 332 /* Retrieve the source format */ 333 pad = media_entity_remote_source(pad); 334 if (pad == NULL || 335 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 336 break; 337 338 subdev = media_entity_to_v4l2_subdev(pad->entity); 339 340 fmt_source.pad = pad->index; 341 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE; 342 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source); 343 if (ret < 0 && ret != -ENOIOCTLCMD) 344 return -EPIPE; 345 346 /* Check if the two ends match */ 347 if (fmt_source.format.width != fmt_sink.format.width || 348 fmt_source.format.height != fmt_sink.format.height) 349 return -EPIPE; 350 351 if (shifter_link) { 352 unsigned int parallel_shift = 0; 353 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { 354 struct isp_parallel_platform_data *pdata = 355 &((struct isp_v4l2_subdevs_group *) 356 subdev->host_priv)->bus.parallel; 357 parallel_shift = pdata->data_lane_shift * 2; 358 } 359 if (!isp_video_is_shiftable(fmt_source.format.code, 360 fmt_sink.format.code, 361 parallel_shift)) 362 return -EPIPE; 363 } else if (fmt_source.format.code != fmt_sink.format.code) 364 return -EPIPE; 365 } 366 367 return 0; 368} 369 370static int 371__isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 372{ 373 struct v4l2_subdev_format fmt; 374 struct v4l2_subdev *subdev; 375 u32 pad; 376 int ret; 377 378 subdev = isp_video_remote_subdev(video, &pad); 379 if (subdev == NULL) 380 return -EINVAL; 381 382 mutex_lock(&video->mutex); 383 384 fmt.pad = pad; 385 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 386 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 387 if (ret == -ENOIOCTLCMD) 388 ret = -EINVAL; 389 390 mutex_unlock(&video->mutex); 391 392 if (ret) 393 return ret; 394 395 format->type = video->type; 396 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 397} 398 399static int 400isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 401{ 402 struct v4l2_format format; 403 int ret; 404 405 memcpy(&format, &vfh->format, sizeof(format)); 406 ret = __isp_video_get_format(video, &format); 407 if (ret < 0) 408 return ret; 409 410 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 411 vfh->format.fmt.pix.height != format.fmt.pix.height || 412 vfh->format.fmt.pix.width != format.fmt.pix.width || 413 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 414 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage) 415 return -EINVAL; 416 417 return ret; 418} 419 420/* ----------------------------------------------------------------------------- 421 * IOMMU management 422 */ 423 424#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8) 425 426/* 427 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list 428 * @dev: Device pointer specific to the OMAP3 ISP. 429 * @sglist: Pointer to source Scatter gather list to allocate. 430 * @sglen: Number of elements of the scatter-gatter list. 431 * 432 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if 433 * we ran out of memory. 434 */ 435static dma_addr_t 436ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen) 437{ 438 struct sg_table *sgt; 439 u32 da; 440 441 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 442 if (sgt == NULL) 443 return -ENOMEM; 444 445 sgt->sgl = (struct scatterlist *)sglist; 446 sgt->nents = sglen; 447 sgt->orig_nents = sglen; 448 449 da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); 450 if (IS_ERR_VALUE(da)) 451 kfree(sgt); 452 453 return da; 454} 455 456/* 457 * ispmmu_vunmap - Unmap a device address from the ISP MMU 458 * @dev: Device pointer specific to the OMAP3 ISP. 459 * @da: Device address generated from a ispmmu_vmap call. 460 */ 461static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da) 462{ 463 struct sg_table *sgt; 464 465 sgt = iommu_vunmap(isp->iommu, (u32)da); 466 kfree(sgt); 467} 468 469/* ----------------------------------------------------------------------------- 470 * Video queue operations 471 */ 472 473static void isp_video_queue_prepare(struct isp_video_queue *queue, 474 unsigned int *nbuffers, unsigned int *size) 475{ 476 struct isp_video_fh *vfh = 477 container_of(queue, struct isp_video_fh, queue); 478 struct isp_video *video = vfh->video; 479 480 *size = vfh->format.fmt.pix.sizeimage; 481 if (*size == 0) 482 return; 483 484 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size)); 485} 486 487static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) 488{ 489 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 490 struct isp_buffer *buffer = to_isp_buffer(buf); 491 struct isp_video *video = vfh->video; 492 493 if (buffer->isp_addr) { 494 ispmmu_vunmap(video->isp, buffer->isp_addr); 495 buffer->isp_addr = 0; 496 } 497} 498 499static int isp_video_buffer_prepare(struct isp_video_buffer *buf) 500{ 501 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 502 struct isp_buffer *buffer = to_isp_buffer(buf); 503 struct isp_video *video = vfh->video; 504 unsigned long addr; 505 506 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen); 507 if (IS_ERR_VALUE(addr)) 508 return -EIO; 509 510 if (!IS_ALIGNED(addr, 32)) { 511 dev_dbg(video->isp->dev, "Buffer address must be " 512 "aligned to 32 bytes boundary.\n"); 513 ispmmu_vunmap(video->isp, buffer->isp_addr); 514 return -EINVAL; 515 } 516 517 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage; 518 buffer->isp_addr = addr; 519 return 0; 520} 521 522/* 523 * isp_video_buffer_queue - Add buffer to streaming queue 524 * @buf: Video buffer 525 * 526 * In memory-to-memory mode, start streaming on the pipeline if buffers are 527 * queued on both the input and the output, if the pipeline isn't already busy. 528 * If the pipeline is busy, it will be restarted in the output module interrupt 529 * handler. 530 */ 531static void isp_video_buffer_queue(struct isp_video_buffer *buf) 532{ 533 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 534 struct isp_buffer *buffer = to_isp_buffer(buf); 535 struct isp_video *video = vfh->video; 536 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 537 enum isp_pipeline_state state; 538 unsigned long flags; 539 unsigned int empty; 540 unsigned int start; 541 542 empty = list_empty(&video->dmaqueue); 543 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue); 544 545 if (empty) { 546 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 547 state = ISP_PIPELINE_QUEUE_OUTPUT; 548 else 549 state = ISP_PIPELINE_QUEUE_INPUT; 550 551 spin_lock_irqsave(&pipe->lock, flags); 552 pipe->state |= state; 553 video->ops->queue(video, buffer); 554 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 555 556 start = isp_pipeline_ready(pipe); 557 if (start) 558 pipe->state |= ISP_PIPELINE_STREAM; 559 spin_unlock_irqrestore(&pipe->lock, flags); 560 561 if (start) 562 omap3isp_pipeline_set_stream(pipe, 563 ISP_PIPELINE_STREAM_SINGLESHOT); 564 } 565} 566 567static const struct isp_video_queue_operations isp_video_queue_ops = { 568 .queue_prepare = &isp_video_queue_prepare, 569 .buffer_prepare = &isp_video_buffer_prepare, 570 .buffer_queue = &isp_video_buffer_queue, 571 .buffer_cleanup = &isp_video_buffer_cleanup, 572}; 573 574/* 575 * omap3isp_video_buffer_next - Complete the current buffer and return the next 576 * @video: ISP video object 577 * @error: Whether an error occurred during capture 578 * 579 * Remove the current video buffer from the DMA queue and fill its timestamp, 580 * field count and state fields before waking up its completion handler. 581 * 582 * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0) 583 * or VIDEOBUF_ERROR otherwise (@error is non-zero). 584 * 585 * The DMA queue is expected to contain at least one buffer. 586 * 587 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 588 * empty. 589 */ 590struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video, 591 unsigned int error) 592{ 593 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 594 struct isp_video_queue *queue = video->queue; 595 enum isp_pipeline_state state; 596 struct isp_video_buffer *buf; 597 unsigned long flags; 598 struct timespec ts; 599 600 spin_lock_irqsave(&queue->irqlock, flags); 601 if (WARN_ON(list_empty(&video->dmaqueue))) { 602 spin_unlock_irqrestore(&queue->irqlock, flags); 603 return NULL; 604 } 605 606 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 607 irqlist); 608 list_del(&buf->irqlist); 609 spin_unlock_irqrestore(&queue->irqlock, flags); 610 611 ktime_get_ts(&ts); 612 buf->vbuf.timestamp.tv_sec = ts.tv_sec; 613 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 614 615 /* Do frame number propagation only if this is the output video node. 616 * Frame number either comes from the CSI receivers or it gets 617 * incremented here if H3A is not active. 618 * Note: There is no guarantee that the output buffer will finish 619 * first, so the input number might lag behind by 1 in some cases. 620 */ 621 if (video == pipe->output && !pipe->do_propagation) 622 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number); 623 else 624 buf->vbuf.sequence = atomic_read(&pipe->frame_number); 625 626 buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE; 627 628 wake_up(&buf->wait); 629 630 if (list_empty(&video->dmaqueue)) { 631 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 632 state = ISP_PIPELINE_QUEUE_OUTPUT 633 | ISP_PIPELINE_STREAM; 634 else 635 state = ISP_PIPELINE_QUEUE_INPUT 636 | ISP_PIPELINE_STREAM; 637 638 spin_lock_irqsave(&pipe->lock, flags); 639 pipe->state &= ~state; 640 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 641 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 642 spin_unlock_irqrestore(&pipe->lock, flags); 643 return NULL; 644 } 645 646 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 647 spin_lock_irqsave(&pipe->lock, flags); 648 pipe->state &= ~ISP_PIPELINE_STREAM; 649 spin_unlock_irqrestore(&pipe->lock, flags); 650 } 651 652 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 653 irqlist); 654 buf->state = ISP_BUF_STATE_ACTIVE; 655 return to_isp_buffer(buf); 656} 657 658/* 659 * omap3isp_video_resume - Perform resume operation on the buffers 660 * @video: ISP video object 661 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 662 * 663 * This function is intended to be used on suspend/resume scenario. It 664 * requests video queue layer to discard buffers marked as DONE if it's in 665 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 666 * if there's any. 667 */ 668void omap3isp_video_resume(struct isp_video *video, int continuous) 669{ 670 struct isp_buffer *buf = NULL; 671 672 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 673 omap3isp_video_queue_discard_done(video->queue); 674 675 if (!list_empty(&video->dmaqueue)) { 676 buf = list_first_entry(&video->dmaqueue, 677 struct isp_buffer, buffer.irqlist); 678 video->ops->queue(video, buf); 679 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 680 } else { 681 if (continuous) 682 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 683 } 684} 685 686/* ----------------------------------------------------------------------------- 687 * V4L2 ioctls 688 */ 689 690static int 691isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 692{ 693 struct isp_video *video = video_drvdata(file); 694 695 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 696 strlcpy(cap->card, video->video.name, sizeof(cap->card)); 697 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); 698 cap->version = ISP_VIDEO_DRIVER_VERSION; 699 700 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 701 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 702 else 703 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 704 705 return 0; 706} 707 708static int 709isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 710{ 711 struct isp_video_fh *vfh = to_isp_video_fh(fh); 712 struct isp_video *video = video_drvdata(file); 713 714 if (format->type != video->type) 715 return -EINVAL; 716 717 mutex_lock(&video->mutex); 718 *format = vfh->format; 719 mutex_unlock(&video->mutex); 720 721 return 0; 722} 723 724static int 725isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 726{ 727 struct isp_video_fh *vfh = to_isp_video_fh(fh); 728 struct isp_video *video = video_drvdata(file); 729 struct v4l2_mbus_framefmt fmt; 730 731 if (format->type != video->type) 732 return -EINVAL; 733 734 mutex_lock(&video->mutex); 735 736 /* Fill the bytesperline and sizeimage fields by converting to media bus 737 * format and back to pixel format. 738 */ 739 isp_video_pix_to_mbus(&format->fmt.pix, &fmt); 740 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); 741 742 vfh->format = *format; 743 744 mutex_unlock(&video->mutex); 745 return 0; 746} 747 748static int 749isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 750{ 751 struct isp_video *video = video_drvdata(file); 752 struct v4l2_subdev_format fmt; 753 struct v4l2_subdev *subdev; 754 u32 pad; 755 int ret; 756 757 if (format->type != video->type) 758 return -EINVAL; 759 760 subdev = isp_video_remote_subdev(video, &pad); 761 if (subdev == NULL) 762 return -EINVAL; 763 764 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 765 766 fmt.pad = pad; 767 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 768 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 769 if (ret) 770 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 771 772 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 773 return 0; 774} 775 776static int 777isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) 778{ 779 struct isp_video *video = video_drvdata(file); 780 struct v4l2_subdev *subdev; 781 int ret; 782 783 subdev = isp_video_remote_subdev(video, NULL); 784 if (subdev == NULL) 785 return -EINVAL; 786 787 mutex_lock(&video->mutex); 788 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap); 789 mutex_unlock(&video->mutex); 790 791 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 792} 793 794static int 795isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop) 796{ 797 struct isp_video *video = video_drvdata(file); 798 struct v4l2_subdev_format format; 799 struct v4l2_subdev *subdev; 800 u32 pad; 801 int ret; 802 803 subdev = isp_video_remote_subdev(video, &pad); 804 if (subdev == NULL) 805 return -EINVAL; 806 807 /* Try the get crop operation first and fallback to get format if not 808 * implemented. 809 */ 810 ret = v4l2_subdev_call(subdev, video, g_crop, crop); 811 if (ret != -ENOIOCTLCMD) 812 return ret; 813 814 format.pad = pad; 815 format.which = V4L2_SUBDEV_FORMAT_ACTIVE; 816 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 817 if (ret < 0) 818 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 819 820 crop->c.left = 0; 821 crop->c.top = 0; 822 crop->c.width = format.format.width; 823 crop->c.height = format.format.height; 824 825 return 0; 826} 827 828static int 829isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop) 830{ 831 struct isp_video *video = video_drvdata(file); 832 struct v4l2_subdev *subdev; 833 int ret; 834 835 subdev = isp_video_remote_subdev(video, NULL); 836 if (subdev == NULL) 837 return -EINVAL; 838 839 mutex_lock(&video->mutex); 840 ret = v4l2_subdev_call(subdev, video, s_crop, crop); 841 mutex_unlock(&video->mutex); 842 843 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 844} 845 846static int 847isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 848{ 849 struct isp_video_fh *vfh = to_isp_video_fh(fh); 850 struct isp_video *video = video_drvdata(file); 851 852 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 853 video->type != a->type) 854 return -EINVAL; 855 856 memset(a, 0, sizeof(*a)); 857 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 858 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 859 a->parm.output.timeperframe = vfh->timeperframe; 860 861 return 0; 862} 863 864static int 865isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 866{ 867 struct isp_video_fh *vfh = to_isp_video_fh(fh); 868 struct isp_video *video = video_drvdata(file); 869 870 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 871 video->type != a->type) 872 return -EINVAL; 873 874 if (a->parm.output.timeperframe.denominator == 0) 875 a->parm.output.timeperframe.denominator = 1; 876 877 vfh->timeperframe = a->parm.output.timeperframe; 878 879 return 0; 880} 881 882static int 883isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 884{ 885 struct isp_video_fh *vfh = to_isp_video_fh(fh); 886 887 return omap3isp_video_queue_reqbufs(&vfh->queue, rb); 888} 889 890static int 891isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 892{ 893 struct isp_video_fh *vfh = to_isp_video_fh(fh); 894 895 return omap3isp_video_queue_querybuf(&vfh->queue, b); 896} 897 898static int 899isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 900{ 901 struct isp_video_fh *vfh = to_isp_video_fh(fh); 902 903 return omap3isp_video_queue_qbuf(&vfh->queue, b); 904} 905 906static int 907isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 908{ 909 struct isp_video_fh *vfh = to_isp_video_fh(fh); 910 911 return omap3isp_video_queue_dqbuf(&vfh->queue, b, 912 file->f_flags & O_NONBLOCK); 913} 914 915/* 916 * Stream management 917 * 918 * Every ISP pipeline has a single input and a single output. The input can be 919 * either a sensor or a video node. The output is always a video node. 920 * 921 * As every pipeline has an output video node, the ISP video objects at the 922 * pipeline output stores the pipeline state. It tracks the streaming state of 923 * both the input and output, as well as the availability of buffers. 924 * 925 * In sensor-to-memory mode, frames are always available at the pipeline input. 926 * Starting the sensor usually requires I2C transfers and must be done in 927 * interruptible context. The pipeline is started and stopped synchronously 928 * to the stream on/off commands. All modules in the pipeline will get their 929 * subdev set stream handler called. The module at the end of the pipeline must 930 * delay starting the hardware until buffers are available at its output. 931 * 932 * In memory-to-memory mode, starting/stopping the stream requires 933 * synchronization between the input and output. ISP modules can't be stopped 934 * in the middle of a frame, and at least some of the modules seem to become 935 * busy as soon as they're started, even if they don't receive a frame start 936 * event. For that reason frames need to be processed in single-shot mode. The 937 * driver needs to wait until a frame is completely processed and written to 938 * memory before restarting the pipeline for the next frame. Pipelined 939 * processing might be possible but requires more testing. 940 * 941 * Stream start must be delayed until buffers are available at both the input 942 * and output. The pipeline must be started in the videobuf queue callback with 943 * the buffers queue spinlock held. The modules subdev set stream operation must 944 * not sleep. 945 */ 946static int 947isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 948{ 949 struct isp_video_fh *vfh = to_isp_video_fh(fh); 950 struct isp_video *video = video_drvdata(file); 951 enum isp_pipeline_state state; 952 struct isp_pipeline *pipe; 953 struct isp_video *far_end; 954 unsigned long flags; 955 int ret; 956 957 if (type != video->type) 958 return -EINVAL; 959 960 mutex_lock(&video->stream_lock); 961 962 if (video->streaming) { 963 mutex_unlock(&video->stream_lock); 964 return -EBUSY; 965 } 966 967 /* Start streaming on the pipeline. No link touching an entity in the 968 * pipeline can be activated or deactivated once streaming is started. 969 */ 970 pipe = video->video.entity.pipe 971 ? to_isp_pipeline(&video->video.entity) : &video->pipe; 972 media_entity_pipeline_start(&video->video.entity, &pipe->pipe); 973 974 /* Verify that the currently configured format matches the output of 975 * the connected subdev. 976 */ 977 ret = isp_video_check_format(video, vfh); 978 if (ret < 0) 979 goto error; 980 981 video->bpl_padding = ret; 982 video->bpl_value = vfh->format.fmt.pix.bytesperline; 983 984 /* Find the ISP video node connected at the far end of the pipeline and 985 * update the pipeline. 986 */ 987 far_end = isp_video_far_end(video); 988 989 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 990 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 991 pipe->input = far_end; 992 pipe->output = video; 993 } else { 994 if (far_end == NULL) { 995 ret = -EPIPE; 996 goto error; 997 } 998 999 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1000 pipe->input = video; 1001 pipe->output = far_end; 1002 } 1003 1004 if (video->isp->pdata->set_constraints) 1005 video->isp->pdata->set_constraints(video->isp, true); 1006 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1007 1008 /* Validate the pipeline and update its state. */ 1009 ret = isp_video_validate_pipeline(pipe); 1010 if (ret < 0) 1011 goto error; 1012 1013 spin_lock_irqsave(&pipe->lock, flags); 1014 pipe->state &= ~ISP_PIPELINE_STREAM; 1015 pipe->state |= state; 1016 spin_unlock_irqrestore(&pipe->lock, flags); 1017 1018 /* Set the maximum time per frame as the value requested by userspace. 1019 * This is a soft limit that can be overridden if the hardware doesn't 1020 * support the request limit. 1021 */ 1022 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1023 pipe->max_timeperframe = vfh->timeperframe; 1024 1025 video->queue = &vfh->queue; 1026 INIT_LIST_HEAD(&video->dmaqueue); 1027 atomic_set(&pipe->frame_number, -1); 1028 1029 ret = omap3isp_video_queue_streamon(&vfh->queue); 1030 if (ret < 0) 1031 goto error; 1032 1033 /* In sensor-to-memory mode, the stream can be started synchronously 1034 * to the stream on command. In memory-to-memory mode, it will be 1035 * started when buffers are queued on both the input and output. 1036 */ 1037 if (pipe->input == NULL) { 1038 ret = omap3isp_pipeline_set_stream(pipe, 1039 ISP_PIPELINE_STREAM_CONTINUOUS); 1040 if (ret < 0) 1041 goto error; 1042 spin_lock_irqsave(&video->queue->irqlock, flags); 1043 if (list_empty(&video->dmaqueue)) 1044 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 1045 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1046 } 1047 1048error: 1049 if (ret < 0) { 1050 omap3isp_video_queue_streamoff(&vfh->queue); 1051 if (video->isp->pdata->set_constraints) 1052 video->isp->pdata->set_constraints(video->isp, false); 1053 media_entity_pipeline_stop(&video->video.entity); 1054 video->queue = NULL; 1055 } 1056 1057 if (!ret) 1058 video->streaming = 1; 1059 1060 mutex_unlock(&video->stream_lock); 1061 return ret; 1062} 1063 1064static int 1065isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1066{ 1067 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1068 struct isp_video *video = video_drvdata(file); 1069 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1070 enum isp_pipeline_state state; 1071 unsigned int streaming; 1072 unsigned long flags; 1073 1074 if (type != video->type) 1075 return -EINVAL; 1076 1077 mutex_lock(&video->stream_lock); 1078 1079 /* Make sure we're not streaming yet. */ 1080 mutex_lock(&vfh->queue.lock); 1081 streaming = vfh->queue.streaming; 1082 mutex_unlock(&vfh->queue.lock); 1083 1084 if (!streaming) 1085 goto done; 1086 1087 /* Update the pipeline state. */ 1088 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1089 state = ISP_PIPELINE_STREAM_OUTPUT 1090 | ISP_PIPELINE_QUEUE_OUTPUT; 1091 else 1092 state = ISP_PIPELINE_STREAM_INPUT 1093 | ISP_PIPELINE_QUEUE_INPUT; 1094 1095 spin_lock_irqsave(&pipe->lock, flags); 1096 pipe->state &= ~state; 1097 spin_unlock_irqrestore(&pipe->lock, flags); 1098 1099 /* Stop the stream. */ 1100 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1101 omap3isp_video_queue_streamoff(&vfh->queue); 1102 video->queue = NULL; 1103 video->streaming = 0; 1104 1105 if (video->isp->pdata->set_constraints) 1106 video->isp->pdata->set_constraints(video->isp, false); 1107 media_entity_pipeline_stop(&video->video.entity); 1108 1109done: 1110 mutex_unlock(&video->stream_lock); 1111 return 0; 1112} 1113 1114static int 1115isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1116{ 1117 if (input->index > 0) 1118 return -EINVAL; 1119 1120 strlcpy(input->name, "camera", sizeof(input->name)); 1121 input->type = V4L2_INPUT_TYPE_CAMERA; 1122 1123 return 0; 1124} 1125 1126static int 1127isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1128{ 1129 *input = 0; 1130 1131 return 0; 1132} 1133 1134static int 1135isp_video_s_input(struct file *file, void *fh, unsigned int input) 1136{ 1137 return input == 0 ? 0 : -EINVAL; 1138} 1139 1140static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1141 .vidioc_querycap = isp_video_querycap, 1142 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1143 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1144 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1145 .vidioc_g_fmt_vid_out = isp_video_get_format, 1146 .vidioc_s_fmt_vid_out = isp_video_set_format, 1147 .vidioc_try_fmt_vid_out = isp_video_try_format, 1148 .vidioc_cropcap = isp_video_cropcap, 1149 .vidioc_g_crop = isp_video_get_crop, 1150 .vidioc_s_crop = isp_video_set_crop, 1151 .vidioc_g_parm = isp_video_get_param, 1152 .vidioc_s_parm = isp_video_set_param, 1153 .vidioc_reqbufs = isp_video_reqbufs, 1154 .vidioc_querybuf = isp_video_querybuf, 1155 .vidioc_qbuf = isp_video_qbuf, 1156 .vidioc_dqbuf = isp_video_dqbuf, 1157 .vidioc_streamon = isp_video_streamon, 1158 .vidioc_streamoff = isp_video_streamoff, 1159 .vidioc_enum_input = isp_video_enum_input, 1160 .vidioc_g_input = isp_video_g_input, 1161 .vidioc_s_input = isp_video_s_input, 1162}; 1163 1164/* ----------------------------------------------------------------------------- 1165 * V4L2 file operations 1166 */ 1167 1168static int isp_video_open(struct file *file) 1169{ 1170 struct isp_video *video = video_drvdata(file); 1171 struct isp_video_fh *handle; 1172 int ret = 0; 1173 1174 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1175 if (handle == NULL) 1176 return -ENOMEM; 1177 1178 v4l2_fh_init(&handle->vfh, &video->video); 1179 v4l2_fh_add(&handle->vfh); 1180 1181 /* If this is the first user, initialise the pipeline. */ 1182 if (omap3isp_get(video->isp) == NULL) { 1183 ret = -EBUSY; 1184 goto done; 1185 } 1186 1187 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1); 1188 if (ret < 0) { 1189 omap3isp_put(video->isp); 1190 goto done; 1191 } 1192 1193 omap3isp_video_queue_init(&handle->queue, video->type, 1194 &isp_video_queue_ops, video->isp->dev, 1195 sizeof(struct isp_buffer)); 1196 1197 memset(&handle->format, 0, sizeof(handle->format)); 1198 handle->format.type = video->type; 1199 handle->timeperframe.denominator = 1; 1200 1201 handle->video = video; 1202 file->private_data = &handle->vfh; 1203 1204done: 1205 if (ret < 0) { 1206 v4l2_fh_del(&handle->vfh); 1207 kfree(handle); 1208 } 1209 1210 return ret; 1211} 1212 1213static int isp_video_release(struct file *file) 1214{ 1215 struct isp_video *video = video_drvdata(file); 1216 struct v4l2_fh *vfh = file->private_data; 1217 struct isp_video_fh *handle = to_isp_video_fh(vfh); 1218 1219 /* Disable streaming and free the buffers queue resources. */ 1220 isp_video_streamoff(file, vfh, video->type); 1221 1222 mutex_lock(&handle->queue.lock); 1223 omap3isp_video_queue_cleanup(&handle->queue); 1224 mutex_unlock(&handle->queue.lock); 1225 1226 omap3isp_pipeline_pm_use(&video->video.entity, 0); 1227 1228 /* Release the file handle. */ 1229 v4l2_fh_del(vfh); 1230 kfree(handle); 1231 file->private_data = NULL; 1232 1233 omap3isp_put(video->isp); 1234 1235 return 0; 1236} 1237 1238static unsigned int isp_video_poll(struct file *file, poll_table *wait) 1239{ 1240 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1241 struct isp_video_queue *queue = &vfh->queue; 1242 1243 return omap3isp_video_queue_poll(queue, file, wait); 1244} 1245 1246static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1247{ 1248 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1249 1250 return omap3isp_video_queue_mmap(&vfh->queue, vma); 1251} 1252 1253static struct v4l2_file_operations isp_video_fops = { 1254 .owner = THIS_MODULE, 1255 .unlocked_ioctl = video_ioctl2, 1256 .open = isp_video_open, 1257 .release = isp_video_release, 1258 .poll = isp_video_poll, 1259 .mmap = isp_video_mmap, 1260}; 1261 1262/* ----------------------------------------------------------------------------- 1263 * ISP video core 1264 */ 1265 1266static const struct isp_video_operations isp_video_dummy_ops = { 1267}; 1268 1269int omap3isp_video_init(struct isp_video *video, const char *name) 1270{ 1271 const char *direction; 1272 int ret; 1273 1274 switch (video->type) { 1275 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1276 direction = "output"; 1277 video->pad.flags = MEDIA_PAD_FL_SINK; 1278 break; 1279 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1280 direction = "input"; 1281 video->pad.flags = MEDIA_PAD_FL_SOURCE; 1282 break; 1283 1284 default: 1285 return -EINVAL; 1286 } 1287 1288 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0); 1289 if (ret < 0) 1290 return ret; 1291 1292 mutex_init(&video->mutex); 1293 atomic_set(&video->active, 0); 1294 1295 spin_lock_init(&video->pipe.lock); 1296 mutex_init(&video->stream_lock); 1297 1298 /* Initialize the video device. */ 1299 if (video->ops == NULL) 1300 video->ops = &isp_video_dummy_ops; 1301 1302 video->video.fops = &isp_video_fops; 1303 snprintf(video->video.name, sizeof(video->video.name), 1304 "OMAP3 ISP %s %s", name, direction); 1305 video->video.vfl_type = VFL_TYPE_GRABBER; 1306 video->video.release = video_device_release_empty; 1307 video->video.ioctl_ops = &isp_video_ioctl_ops; 1308 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1309 1310 video_set_drvdata(&video->video, video); 1311 1312 return 0; 1313} 1314 1315int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1316{ 1317 int ret; 1318 1319 video->video.v4l2_dev = vdev; 1320 1321 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); 1322 if (ret < 0) 1323 printk(KERN_ERR "%s: could not register video device (%d)\n", 1324 __func__, ret); 1325 1326 return ret; 1327} 1328 1329void omap3isp_video_unregister(struct isp_video *video) 1330{ 1331 if (video_is_registered(&video->video)) { 1332 media_entity_cleanup(&video->video.entity); 1333 video_unregister_device(&video->video); 1334 } 1335}