PageRenderTime 51ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/media/pci/ivtv/ivtv-irq.c

https://bitbucket.org/advance38/linux
C | 1088 lines | 816 code | 134 blank | 138 comment | 218 complexity | 22585832db05382745769bfe4c1a8b15 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /* interrupt handling
  2. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
  3. Copyright (C) 2004 Chris Kennedy <c@groovy.org>
  4. Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  16. */
  17. #include "ivtv-driver.h"
  18. #include "ivtv-queue.h"
  19. #include "ivtv-udma.h"
  20. #include "ivtv-irq.h"
  21. #include "ivtv-mailbox.h"
  22. #include "ivtv-vbi.h"
  23. #include "ivtv-yuv.h"
  24. #include <media/v4l2-event.h>
  25. #define DMA_MAGIC_COOKIE 0x000001fe
  26. static void ivtv_dma_dec_start(struct ivtv_stream *s);
  27. static const int ivtv_stream_map[] = {
  28. IVTV_ENC_STREAM_TYPE_MPG,
  29. IVTV_ENC_STREAM_TYPE_YUV,
  30. IVTV_ENC_STREAM_TYPE_PCM,
  31. IVTV_ENC_STREAM_TYPE_VBI,
  32. };
  33. static void ivtv_pcm_work_handler(struct ivtv *itv)
  34. {
  35. struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
  36. struct ivtv_buffer *buf;
  37. /* Pass the PCM data to ivtv-alsa */
  38. while (1) {
  39. /*
  40. * Users should not be using both the ALSA and V4L2 PCM audio
  41. * capture interfaces at the same time. If the user is doing
  42. * this, there maybe a buffer in q_io to grab, use, and put
  43. * back in rotation.
  44. */
  45. buf = ivtv_dequeue(s, &s->q_io);
  46. if (buf == NULL)
  47. buf = ivtv_dequeue(s, &s->q_full);
  48. if (buf == NULL)
  49. break;
  50. if (buf->readpos < buf->bytesused)
  51. itv->pcm_announce_callback(itv->alsa,
  52. (u8 *)(buf->buf + buf->readpos),
  53. (size_t)(buf->bytesused - buf->readpos));
  54. ivtv_enqueue(s, buf, &s->q_free);
  55. }
  56. }
  57. static void ivtv_pio_work_handler(struct ivtv *itv)
  58. {
  59. struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
  60. struct ivtv_buffer *buf;
  61. int i = 0;
  62. IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
  63. if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
  64. s->vdev == NULL || !ivtv_use_pio(s)) {
  65. itv->cur_pio_stream = -1;
  66. /* trigger PIO complete user interrupt */
  67. write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
  68. return;
  69. }
  70. IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
  71. list_for_each_entry(buf, &s->q_dma.list, list) {
  72. u32 size = s->sg_processing[i].size & 0x3ffff;
  73. /* Copy the data from the card to the buffer */
  74. if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
  75. memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
  76. }
  77. else {
  78. memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
  79. }
  80. i++;
  81. if (i == s->sg_processing_size)
  82. break;
  83. }
  84. write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
  85. }
  86. void ivtv_irq_work_handler(struct kthread_work *work)
  87. {
  88. struct ivtv *itv = container_of(work, struct ivtv, irq_work);
  89. if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
  90. ivtv_pio_work_handler(itv);
  91. if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
  92. ivtv_vbi_work_handler(itv);
  93. if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
  94. ivtv_yuv_work_handler(itv);
  95. if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
  96. ivtv_pcm_work_handler(itv);
  97. }
  98. /* Determine the required DMA size, setup enough buffers in the predma queue and
  99. actually copy the data from the card to the buffers in case a PIO transfer is
  100. required for this stream.
  101. */
  102. static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
  103. {
  104. struct ivtv *itv = s->itv;
  105. struct ivtv_buffer *buf;
  106. u32 bytes_needed = 0;
  107. u32 offset, size;
  108. u32 UVoffset = 0, UVsize = 0;
  109. int skip_bufs = s->q_predma.buffers;
  110. int idx = s->sg_pending_size;
  111. int rc;
  112. /* sanity checks */
  113. if (s->vdev == NULL) {
  114. IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
  115. return -1;
  116. }
  117. if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
  118. IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
  119. return -1;
  120. }
  121. /* determine offset, size and PTS for the various streams */
  122. switch (s->type) {
  123. case IVTV_ENC_STREAM_TYPE_MPG:
  124. offset = data[1];
  125. size = data[2];
  126. s->pending_pts = 0;
  127. break;
  128. case IVTV_ENC_STREAM_TYPE_YUV:
  129. offset = data[1];
  130. size = data[2];
  131. UVoffset = data[3];
  132. UVsize = data[4];
  133. s->pending_pts = ((u64) data[5] << 32) | data[6];
  134. break;
  135. case IVTV_ENC_STREAM_TYPE_PCM:
  136. offset = data[1] + 12;
  137. size = data[2] - 12;
  138. s->pending_pts = read_dec(offset - 8) |
  139. ((u64)(read_dec(offset - 12)) << 32);
  140. if (itv->has_cx23415)
  141. offset += IVTV_DECODER_OFFSET;
  142. break;
  143. case IVTV_ENC_STREAM_TYPE_VBI:
  144. size = itv->vbi.enc_size * itv->vbi.fpi;
  145. offset = read_enc(itv->vbi.enc_start - 4) + 12;
  146. if (offset == 12) {
  147. IVTV_DEBUG_INFO("VBI offset == 0\n");
  148. return -1;
  149. }
  150. s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
  151. break;
  152. case IVTV_DEC_STREAM_TYPE_VBI:
  153. size = read_dec(itv->vbi.dec_start + 4) + 8;
  154. offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
  155. s->pending_pts = 0;
  156. offset += IVTV_DECODER_OFFSET;
  157. break;
  158. default:
  159. /* shouldn't happen */
  160. return -1;
  161. }
  162. /* if this is the start of the DMA then fill in the magic cookie */
  163. if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
  164. if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
  165. s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
  166. s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
  167. write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
  168. }
  169. else {
  170. s->pending_backup = read_enc(offset);
  171. write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
  172. }
  173. s->pending_offset = offset;
  174. }
  175. bytes_needed = size;
  176. if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
  177. /* The size for the Y samples needs to be rounded upwards to a
  178. multiple of the buf_size. The UV samples then start in the
  179. next buffer. */
  180. bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
  181. bytes_needed += UVsize;
  182. }
  183. IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
  184. ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
  185. rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
  186. if (rc < 0) { /* Insufficient buffers */
  187. IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
  188. bytes_needed, s->name);
  189. return -1;
  190. }
  191. if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
  192. IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
  193. IVTV_WARN("Cause: the application is not reading fast enough.\n");
  194. }
  195. s->buffers_stolen = rc;
  196. /* got the buffers, now fill in sg_pending */
  197. buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
  198. memset(buf->buf, 0, 128);
  199. list_for_each_entry(buf, &s->q_predma.list, list) {
  200. if (skip_bufs-- > 0)
  201. continue;
  202. s->sg_pending[idx].dst = buf->dma_handle;
  203. s->sg_pending[idx].src = offset;
  204. s->sg_pending[idx].size = s->buf_size;
  205. buf->bytesused = min(size, s->buf_size);
  206. buf->dma_xfer_cnt = s->dma_xfer_cnt;
  207. s->q_predma.bytesused += buf->bytesused;
  208. size -= buf->bytesused;
  209. offset += s->buf_size;
  210. /* Sync SG buffers */
  211. ivtv_buf_sync_for_device(s, buf);
  212. if (size == 0) { /* YUV */
  213. /* process the UV section */
  214. offset = UVoffset;
  215. size = UVsize;
  216. }
  217. idx++;
  218. }
  219. s->sg_pending_size = idx;
  220. return 0;
  221. }
  222. static void dma_post(struct ivtv_stream *s)
  223. {
  224. struct ivtv *itv = s->itv;
  225. struct ivtv_buffer *buf = NULL;
  226. struct list_head *p;
  227. u32 offset;
  228. __le32 *u32buf;
  229. int x = 0;
  230. IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
  231. s->name, s->dma_offset);
  232. list_for_each(p, &s->q_dma.list) {
  233. buf = list_entry(p, struct ivtv_buffer, list);
  234. u32buf = (__le32 *)buf->buf;
  235. /* Sync Buffer */
  236. ivtv_buf_sync_for_cpu(s, buf);
  237. if (x == 0 && ivtv_use_dma(s)) {
  238. offset = s->dma_last_offset;
  239. if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
  240. {
  241. for (offset = 0; offset < 64; offset++) {
  242. if (u32buf[offset] == DMA_MAGIC_COOKIE) {
  243. break;
  244. }
  245. }
  246. offset *= 4;
  247. if (offset == 256) {
  248. IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
  249. offset = s->dma_last_offset;
  250. }
  251. if (s->dma_last_offset != offset)
  252. IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
  253. s->dma_last_offset = offset;
  254. }
  255. if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
  256. s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
  257. write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
  258. }
  259. else {
  260. write_enc_sync(0, s->dma_offset);
  261. }
  262. if (offset) {
  263. buf->bytesused -= offset;
  264. memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
  265. }
  266. *u32buf = cpu_to_le32(s->dma_backup);
  267. }
  268. x++;
  269. /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
  270. if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
  271. s->type == IVTV_ENC_STREAM_TYPE_VBI)
  272. buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
  273. }
  274. if (buf)
  275. buf->bytesused += s->dma_last_offset;
  276. if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
  277. list_for_each_entry(buf, &s->q_dma.list, list) {
  278. /* Parse and Groom VBI Data */
  279. s->q_dma.bytesused -= buf->bytesused;
  280. ivtv_process_vbi_data(itv, buf, 0, s->type);
  281. s->q_dma.bytesused += buf->bytesused;
  282. }
  283. if (s->fh == NULL) {
  284. ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
  285. return;
  286. }
  287. }
  288. ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
  289. if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
  290. itv->pcm_announce_callback != NULL) {
  291. /*
  292. * Set up the work handler to pass the data to ivtv-alsa.
  293. *
  294. * We just use q_full and let the work handler race with users
  295. * making ivtv-fileops.c calls on the PCM device node.
  296. *
  297. * Users should not be using both the ALSA and V4L2 PCM audio
  298. * capture interfaces at the same time. If the user does this,
  299. * fragments of data will just go out each interface as they
  300. * race for PCM data.
  301. */
  302. set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
  303. set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
  304. }
  305. if (s->fh)
  306. wake_up(&s->waitq);
  307. }
  308. void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
  309. {
  310. struct ivtv *itv = s->itv;
  311. struct yuv_playback_info *yi = &itv->yuv_info;
  312. u8 frame = yi->draw_frame;
  313. struct yuv_frame_info *f = &yi->new_frame_info[frame];
  314. struct ivtv_buffer *buf;
  315. u32 y_size = 720 * ((f->src_h + 31) & ~31);
  316. u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
  317. int y_done = 0;
  318. int bytes_written = 0;
  319. unsigned long flags = 0;
  320. int idx = 0;
  321. IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
  322. /* Insert buffer block for YUV if needed */
  323. if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
  324. if (yi->blanking_dmaptr) {
  325. s->sg_pending[idx].src = yi->blanking_dmaptr;
  326. s->sg_pending[idx].dst = offset;
  327. s->sg_pending[idx].size = 720 * 16;
  328. }
  329. offset += 720 * 16;
  330. idx++;
  331. }
  332. list_for_each_entry(buf, &s->q_predma.list, list) {
  333. /* YUV UV Offset from Y Buffer */
  334. if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
  335. (bytes_written + buf->bytesused) >= y_size) {
  336. s->sg_pending[idx].src = buf->dma_handle;
  337. s->sg_pending[idx].dst = offset;
  338. s->sg_pending[idx].size = y_size - bytes_written;
  339. offset = uv_offset;
  340. if (s->sg_pending[idx].size != buf->bytesused) {
  341. idx++;
  342. s->sg_pending[idx].src =
  343. buf->dma_handle + s->sg_pending[idx - 1].size;
  344. s->sg_pending[idx].dst = offset;
  345. s->sg_pending[idx].size =
  346. buf->bytesused - s->sg_pending[idx - 1].size;
  347. offset += s->sg_pending[idx].size;
  348. }
  349. y_done = 1;
  350. } else {
  351. s->sg_pending[idx].src = buf->dma_handle;
  352. s->sg_pending[idx].dst = offset;
  353. s->sg_pending[idx].size = buf->bytesused;
  354. offset += buf->bytesused;
  355. }
  356. bytes_written += buf->bytesused;
  357. /* Sync SG buffers */
  358. ivtv_buf_sync_for_device(s, buf);
  359. idx++;
  360. }
  361. s->sg_pending_size = idx;
  362. /* Sync Hardware SG List of buffers */
  363. ivtv_stream_sync_for_device(s);
  364. if (lock)
  365. spin_lock_irqsave(&itv->dma_reg_lock, flags);
  366. if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
  367. ivtv_dma_dec_start(s);
  368. }
  369. else {
  370. set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
  371. }
  372. if (lock)
  373. spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
  374. }
  375. static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
  376. {
  377. struct ivtv *itv = s->itv;
  378. s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
  379. s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
  380. s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
  381. s->sg_processed++;
  382. /* Sync Hardware SG List of buffers */
  383. ivtv_stream_sync_for_device(s);
  384. write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
  385. write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
  386. itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
  387. add_timer(&itv->dma_timer);
  388. }
  389. static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
  390. {
  391. struct ivtv *itv = s->itv;
  392. s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
  393. s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
  394. s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
  395. s->sg_processed++;
  396. /* Sync Hardware SG List of buffers */
  397. ivtv_stream_sync_for_device(s);
  398. write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
  399. write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
  400. itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
  401. add_timer(&itv->dma_timer);
  402. }
  403. /* start the encoder DMA */
  404. static void ivtv_dma_enc_start(struct ivtv_stream *s)
  405. {
  406. struct ivtv *itv = s->itv;
  407. struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
  408. int i;
  409. IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
  410. if (s->q_predma.bytesused)
  411. ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
  412. if (ivtv_use_dma(s))
  413. s->sg_pending[s->sg_pending_size - 1].size += 256;
  414. /* If this is an MPEG stream, and VBI data is also pending, then append the
  415. VBI DMA to the MPEG DMA and transfer both sets of data at once.
  416. VBI DMA is a second class citizen compared to MPEG and mixing them together
  417. will confuse the firmware (the end of a VBI DMA is seen as the end of a
  418. MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
  419. sure we only use the MPEG DMA to transfer the VBI DMA if both are in
  420. use. This way no conflicts occur. */
  421. clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
  422. if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
  423. s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
  424. ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
  425. if (ivtv_use_dma(s_vbi))
  426. s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
  427. for (i = 0; i < s_vbi->sg_pending_size; i++) {
  428. s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
  429. }
  430. s_vbi->dma_offset = s_vbi->pending_offset;
  431. s_vbi->sg_pending_size = 0;
  432. s_vbi->dma_xfer_cnt++;
  433. set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
  434. IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
  435. }
  436. s->dma_xfer_cnt++;
  437. memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
  438. s->sg_processing_size = s->sg_pending_size;
  439. s->sg_pending_size = 0;
  440. s->sg_processed = 0;
  441. s->dma_offset = s->pending_offset;
  442. s->dma_backup = s->pending_backup;
  443. s->dma_pts = s->pending_pts;
  444. if (ivtv_use_pio(s)) {
  445. set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
  446. set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
  447. set_bit(IVTV_F_I_PIO, &itv->i_flags);
  448. itv->cur_pio_stream = s->type;
  449. }
  450. else {
  451. itv->dma_retries = 0;
  452. ivtv_dma_enc_start_xfer(s);
  453. set_bit(IVTV_F_I_DMA, &itv->i_flags);
  454. itv->cur_dma_stream = s->type;
  455. }
  456. }
  457. static void ivtv_dma_dec_start(struct ivtv_stream *s)
  458. {
  459. struct ivtv *itv = s->itv;
  460. if (s->q_predma.bytesused)
  461. ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
  462. s->dma_xfer_cnt++;
  463. memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
  464. s->sg_processing_size = s->sg_pending_size;
  465. s->sg_pending_size = 0;
  466. s->sg_processed = 0;
  467. IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
  468. itv->dma_retries = 0;
  469. ivtv_dma_dec_start_xfer(s);
  470. set_bit(IVTV_F_I_DMA, &itv->i_flags);
  471. itv->cur_dma_stream = s->type;
  472. }
  473. static void ivtv_irq_dma_read(struct ivtv *itv)
  474. {
  475. struct ivtv_stream *s = NULL;
  476. struct ivtv_buffer *buf;
  477. int hw_stream_type = 0;
  478. IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
  479. del_timer(&itv->dma_timer);
  480. if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
  481. return;
  482. if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
  483. s = &itv->streams[itv->cur_dma_stream];
  484. ivtv_stream_sync_for_cpu(s);
  485. if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
  486. IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
  487. read_reg(IVTV_REG_DMASTATUS),
  488. s->sg_processed, s->sg_processing_size, itv->dma_retries);
  489. write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
  490. if (itv->dma_retries == 3) {
  491. /* Too many retries, give up on this frame */
  492. itv->dma_retries = 0;
  493. s->sg_processed = s->sg_processing_size;
  494. }
  495. else {
  496. /* Retry, starting with the first xfer segment.
  497. Just retrying the current segment is not sufficient. */
  498. s->sg_processed = 0;
  499. itv->dma_retries++;
  500. }
  501. }
  502. if (s->sg_processed < s->sg_processing_size) {
  503. /* DMA next buffer */
  504. ivtv_dma_dec_start_xfer(s);
  505. return;
  506. }
  507. if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
  508. hw_stream_type = 2;
  509. IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
  510. /* For some reason must kick the firmware, like PIO mode,
  511. I think this tells the firmware we are done and the size
  512. of the xfer so it can calculate what we need next.
  513. I think we can do this part ourselves but would have to
  514. fully calculate xfer info ourselves and not use interrupts
  515. */
  516. ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
  517. hw_stream_type);
  518. /* Free last DMA call */
  519. while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
  520. ivtv_buf_sync_for_cpu(s, buf);
  521. ivtv_enqueue(s, buf, &s->q_free);
  522. }
  523. wake_up(&s->waitq);
  524. }
  525. clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
  526. clear_bit(IVTV_F_I_DMA, &itv->i_flags);
  527. itv->cur_dma_stream = -1;
  528. wake_up(&itv->dma_waitq);
  529. }
  530. static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
  531. {
  532. u32 data[CX2341X_MBOX_MAX_DATA];
  533. struct ivtv_stream *s;
  534. ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
  535. IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
  536. del_timer(&itv->dma_timer);
  537. if (itv->cur_dma_stream < 0)
  538. return;
  539. s = &itv->streams[itv->cur_dma_stream];
  540. ivtv_stream_sync_for_cpu(s);
  541. if (data[0] & 0x18) {
  542. IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
  543. s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
  544. write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
  545. if (itv->dma_retries == 3) {
  546. /* Too many retries, give up on this frame */
  547. itv->dma_retries = 0;
  548. s->sg_processed = s->sg_processing_size;
  549. }
  550. else {
  551. /* Retry, starting with the first xfer segment.
  552. Just retrying the current segment is not sufficient. */
  553. s->sg_processed = 0;
  554. itv->dma_retries++;
  555. }
  556. }
  557. if (s->sg_processed < s->sg_processing_size) {
  558. /* DMA next buffer */
  559. ivtv_dma_enc_start_xfer(s);
  560. return;
  561. }
  562. clear_bit(IVTV_F_I_DMA, &itv->i_flags);
  563. itv->cur_dma_stream = -1;
  564. dma_post(s);
  565. if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
  566. s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
  567. dma_post(s);
  568. }
  569. s->sg_processing_size = 0;
  570. s->sg_processed = 0;
  571. wake_up(&itv->dma_waitq);
  572. }
  573. static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
  574. {
  575. struct ivtv_stream *s;
  576. if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
  577. itv->cur_pio_stream = -1;
  578. return;
  579. }
  580. s = &itv->streams[itv->cur_pio_stream];
  581. IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
  582. clear_bit(IVTV_F_I_PIO, &itv->i_flags);
  583. itv->cur_pio_stream = -1;
  584. dma_post(s);
  585. if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
  586. ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
  587. else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
  588. ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
  589. else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
  590. ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
  591. clear_bit(IVTV_F_I_PIO, &itv->i_flags);
  592. if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
  593. s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
  594. dma_post(s);
  595. }
  596. wake_up(&itv->dma_waitq);
  597. }
  598. static void ivtv_irq_dma_err(struct ivtv *itv)
  599. {
  600. u32 data[CX2341X_MBOX_MAX_DATA];
  601. u32 status;
  602. del_timer(&itv->dma_timer);
  603. ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
  604. status = read_reg(IVTV_REG_DMASTATUS);
  605. IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
  606. status, itv->cur_dma_stream);
  607. /*
  608. * We do *not* write back to the IVTV_REG_DMASTATUS register to
  609. * clear the error status, if either the encoder write (0x02) or
  610. * decoder read (0x01) bus master DMA operation do not indicate
  611. * completed. We can race with the DMA engine, which may have
  612. * transitioned to completed status *after* we read the register.
  613. * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
  614. * DMA engine has completed, will cause the DMA engine to stop working.
  615. */
  616. status &= 0x3;
  617. if (status == 0x3)
  618. write_reg(status, IVTV_REG_DMASTATUS);
  619. if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
  620. itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
  621. struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
  622. if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
  623. /* retry */
  624. /*
  625. * FIXME - handle cases of DMA error similar to
  626. * encoder below, except conditioned on status & 0x1
  627. */
  628. ivtv_dma_dec_start(s);
  629. return;
  630. } else {
  631. if ((status & 0x2) == 0) {
  632. /*
  633. * CX2341x Bus Master DMA write is ongoing.
  634. * Reset the timer and let it complete.
  635. */
  636. itv->dma_timer.expires =
  637. jiffies + msecs_to_jiffies(600);
  638. add_timer(&itv->dma_timer);
  639. return;
  640. }
  641. if (itv->dma_retries < 3) {
  642. /*
  643. * CX2341x Bus Master DMA write has ended.
  644. * Retry the write, starting with the first
  645. * xfer segment. Just retrying the current
  646. * segment is not sufficient.
  647. */
  648. s->sg_processed = 0;
  649. itv->dma_retries++;
  650. ivtv_dma_enc_start_xfer(s);
  651. return;
  652. }
  653. /* Too many retries, give up on this one */
  654. }
  655. }
  656. if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
  657. ivtv_udma_start(itv);
  658. return;
  659. }
  660. clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
  661. clear_bit(IVTV_F_I_DMA, &itv->i_flags);
  662. itv->cur_dma_stream = -1;
  663. wake_up(&itv->dma_waitq);
  664. }
  665. static void ivtv_irq_enc_start_cap(struct ivtv *itv)
  666. {
  667. u32 data[CX2341X_MBOX_MAX_DATA];
  668. struct ivtv_stream *s;
  669. /* Get DMA destination and size arguments from card */
  670. ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
  671. IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
  672. if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
  673. IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
  674. data[0], data[1], data[2]);
  675. return;
  676. }
  677. s = &itv->streams[ivtv_stream_map[data[0]]];
  678. if (!stream_enc_dma_append(s, data)) {
  679. set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
  680. }
  681. }
  682. static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
  683. {
  684. u32 data[CX2341X_MBOX_MAX_DATA];
  685. struct ivtv_stream *s;
  686. IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
  687. s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
  688. if (!stream_enc_dma_append(s, data))
  689. set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
  690. }
  691. static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
  692. {
  693. u32 data[CX2341X_MBOX_MAX_DATA];
  694. struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
  695. IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
  696. if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
  697. !stream_enc_dma_append(s, data)) {
  698. set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
  699. }
  700. }
  701. static void ivtv_irq_dec_data_req(struct ivtv *itv)
  702. {
  703. u32 data[CX2341X_MBOX_MAX_DATA];
  704. struct ivtv_stream *s;
  705. /* YUV or MPG */
  706. if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
  707. ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
  708. itv->dma_data_req_size =
  709. 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
  710. itv->dma_data_req_offset = data[1];
  711. if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
  712. ivtv_yuv_frame_complete(itv);
  713. s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
  714. }
  715. else {
  716. ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
  717. itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
  718. itv->dma_data_req_offset = data[1];
  719. s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
  720. }
  721. IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
  722. itv->dma_data_req_offset, itv->dma_data_req_size);
  723. if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
  724. set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
  725. }
  726. else {
  727. if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
  728. ivtv_yuv_setup_stream_frame(itv);
  729. clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
  730. ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
  731. ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
  732. }
  733. }
  734. static void ivtv_irq_vsync(struct ivtv *itv)
  735. {
  736. /* The vsync interrupt is unusual in that it won't clear until
  737. * the end of the first line for the current field, at which
  738. * point it clears itself. This can result in repeated vsync
  739. * interrupts, or a missed vsync. Read some of the registers
  740. * to determine the line being displayed and ensure we handle
  741. * one vsync per frame.
  742. */
  743. unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
  744. struct yuv_playback_info *yi = &itv->yuv_info;
  745. int last_dma_frame = atomic_read(&yi->next_dma_frame);
  746. struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
  747. if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
  748. if (((frame ^ f->sync_field) == 0 &&
  749. ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
  750. (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
  751. int next_dma_frame = last_dma_frame;
  752. if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
  753. if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
  754. write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
  755. write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
  756. write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
  757. write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
  758. next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
  759. atomic_set(&yi->next_dma_frame, next_dma_frame);
  760. yi->fields_lapsed = -1;
  761. yi->running = 1;
  762. }
  763. }
  764. }
  765. if (frame != (itv->last_vsync_field & 1)) {
  766. static const struct v4l2_event evtop = {
  767. .type = V4L2_EVENT_VSYNC,
  768. .u.vsync.field = V4L2_FIELD_TOP,
  769. };
  770. static const struct v4l2_event evbottom = {
  771. .type = V4L2_EVENT_VSYNC,
  772. .u.vsync.field = V4L2_FIELD_BOTTOM,
  773. };
  774. struct ivtv_stream *s = ivtv_get_output_stream(itv);
  775. itv->last_vsync_field += 1;
  776. if (frame == 0) {
  777. clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
  778. clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
  779. }
  780. else {
  781. set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
  782. }
  783. if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
  784. set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
  785. wake_up(&itv->event_waitq);
  786. if (s)
  787. wake_up(&s->waitq);
  788. }
  789. if (s && s->vdev)
  790. v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
  791. wake_up(&itv->vsync_waitq);
  792. /* Send VBI to saa7127 */
  793. if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
  794. test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
  795. test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
  796. test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
  797. set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
  798. set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
  799. }
  800. /* Check if we need to update the yuv registers */
  801. if (yi->running && (yi->yuv_forced_update || f->update)) {
  802. if (!f->update) {
  803. last_dma_frame =
  804. (u8)(atomic_read(&yi->next_dma_frame) -
  805. 1) % IVTV_YUV_BUFFERS;
  806. f = &yi->new_frame_info[last_dma_frame];
  807. }
  808. if (f->src_w) {
  809. yi->update_frame = last_dma_frame;
  810. f->update = 0;
  811. yi->yuv_forced_update = 0;
  812. set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
  813. set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
  814. }
  815. }
  816. yi->fields_lapsed++;
  817. }
  818. }
  819. #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
  820. irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
  821. {
  822. struct ivtv *itv = (struct ivtv *)dev_id;
  823. u32 combo;
  824. u32 stat;
  825. int i;
  826. u8 vsync_force = 0;
  827. spin_lock(&itv->dma_reg_lock);
  828. /* get contents of irq status register */
  829. stat = read_reg(IVTV_REG_IRQSTATUS);
  830. combo = ~itv->irqmask & stat;
  831. /* Clear out IRQ */
  832. if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
  833. if (0 == combo) {
  834. /* The vsync interrupt is unusual and clears itself. If we
  835. * took too long, we may have missed it. Do some checks
  836. */
  837. if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
  838. /* vsync is enabled, see if we're in a new field */
  839. if ((itv->last_vsync_field & 1) !=
  840. (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
  841. /* New field, looks like we missed it */
  842. IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
  843. read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
  844. vsync_force = 1;
  845. }
  846. }
  847. if (!vsync_force) {
  848. /* No Vsync expected, wasn't for us */
  849. spin_unlock(&itv->dma_reg_lock);
  850. return IRQ_NONE;
  851. }
  852. }
  853. /* Exclude interrupts noted below from the output, otherwise the log is flooded with
  854. these messages */
  855. if (combo & ~0xff6d0400)
  856. IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
  857. if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
  858. IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
  859. }
  860. if (combo & IVTV_IRQ_DMA_READ) {
  861. ivtv_irq_dma_read(itv);
  862. }
  863. if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
  864. ivtv_irq_enc_dma_complete(itv);
  865. }
  866. if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
  867. ivtv_irq_enc_pio_complete(itv);
  868. }
  869. if (combo & IVTV_IRQ_DMA_ERR) {
  870. ivtv_irq_dma_err(itv);
  871. }
  872. if (combo & IVTV_IRQ_ENC_START_CAP) {
  873. ivtv_irq_enc_start_cap(itv);
  874. }
  875. if (combo & IVTV_IRQ_ENC_VBI_CAP) {
  876. ivtv_irq_enc_vbi_cap(itv);
  877. }
  878. if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
  879. ivtv_irq_dec_vbi_reinsert(itv);
  880. }
  881. if (combo & IVTV_IRQ_ENC_EOS) {
  882. IVTV_DEBUG_IRQ("ENC EOS\n");
  883. set_bit(IVTV_F_I_EOS, &itv->i_flags);
  884. wake_up(&itv->eos_waitq);
  885. }
  886. if (combo & IVTV_IRQ_DEC_DATA_REQ) {
  887. ivtv_irq_dec_data_req(itv);
  888. }
  889. /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
  890. if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
  891. ivtv_irq_vsync(itv);
  892. }
  893. if (combo & IVTV_IRQ_ENC_VIM_RST) {
  894. IVTV_DEBUG_IRQ("VIM RST\n");
  895. /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
  896. }
  897. if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
  898. IVTV_DEBUG_INFO("Stereo mode changed\n");
  899. }
  900. if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
  901. itv->irq_rr_idx++;
  902. for (i = 0; i < IVTV_MAX_STREAMS; i++) {
  903. int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
  904. struct ivtv_stream *s = &itv->streams[idx];
  905. if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
  906. continue;
  907. if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
  908. ivtv_dma_dec_start(s);
  909. else
  910. ivtv_dma_enc_start(s);
  911. break;
  912. }
  913. if (i == IVTV_MAX_STREAMS &&
  914. test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
  915. ivtv_udma_start(itv);
  916. }
  917. if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
  918. itv->irq_rr_idx++;
  919. for (i = 0; i < IVTV_MAX_STREAMS; i++) {
  920. int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
  921. struct ivtv_stream *s = &itv->streams[idx];
  922. if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
  923. continue;
  924. if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
  925. ivtv_dma_enc_start(s);
  926. break;
  927. }
  928. }
  929. if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
  930. queue_kthread_work(&itv->irq_worker, &itv->irq_work);
  931. }
  932. spin_unlock(&itv->dma_reg_lock);
  933. /* If we've just handled a 'forced' vsync, it's safest to say it
  934. * wasn't ours. Another device may have triggered it at just
  935. * the right time.
  936. */
  937. return vsync_force ? IRQ_NONE : IRQ_HANDLED;
  938. }
  939. void ivtv_unfinished_dma(unsigned long arg)
  940. {
  941. struct ivtv *itv = (struct ivtv *)arg;
  942. if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
  943. return;
  944. IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
  945. write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
  946. clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
  947. clear_bit(IVTV_F_I_DMA, &itv->i_flags);
  948. itv->cur_dma_stream = -1;
  949. wake_up(&itv->dma_waitq);
  950. }