Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ivtv-irq.c
Go to the documentation of this file.
1 /* interrupt handling
2  Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3  Copyright (C) 2004 Chris Kennedy <[email protected]>
4  Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  */
20 
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28 #include <media/v4l2-event.h>
29 
30 #define DMA_MAGIC_COOKIE 0x000001fe
31 
32 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33 
34 static const int ivtv_stream_map[] = {
39 };
40 
41 static void ivtv_pcm_work_handler(struct ivtv *itv)
42 {
44  struct ivtv_buffer *buf;
45 
46  /* Pass the PCM data to ivtv-alsa */
47 
48  while (1) {
49  /*
50  * Users should not be using both the ALSA and V4L2 PCM audio
51  * capture interfaces at the same time. If the user is doing
52  * this, there maybe a buffer in q_io to grab, use, and put
53  * back in rotation.
54  */
55  buf = ivtv_dequeue(s, &s->q_io);
56  if (buf == NULL)
57  buf = ivtv_dequeue(s, &s->q_full);
58  if (buf == NULL)
59  break;
60 
61  if (buf->readpos < buf->bytesused)
62  itv->pcm_announce_callback(itv->alsa,
63  (u8 *)(buf->buf + buf->readpos),
64  (size_t)(buf->bytesused - buf->readpos));
65 
66  ivtv_enqueue(s, buf, &s->q_free);
67  }
68 }
69 
70 static void ivtv_pio_work_handler(struct ivtv *itv)
71 {
72  struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
73  struct ivtv_buffer *buf;
74  int i = 0;
75 
76  IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
77  if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
78  s->vdev == NULL || !ivtv_use_pio(s)) {
79  itv->cur_pio_stream = -1;
80  /* trigger PIO complete user interrupt */
82  return;
83  }
84  IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
85  list_for_each_entry(buf, &s->q_dma.list, list) {
86  u32 size = s->sg_processing[i].size & 0x3ffff;
87 
88  /* Copy the data from the card to the buffer */
89  if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
90  memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
91  }
92  else {
93  memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
94  }
95  i++;
96  if (i == s->sg_processing_size)
97  break;
98  }
100 }
101 
103 {
104  struct ivtv *itv = container_of(work, struct ivtv, irq_work);
105 
107  ivtv_pio_work_handler(itv);
108 
111 
114 
116  ivtv_pcm_work_handler(itv);
117 }
118 
119 /* Determine the required DMA size, setup enough buffers in the predma queue and
120  actually copy the data from the card to the buffers in case a PIO transfer is
121  required for this stream.
122  */
123 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
124 {
125  struct ivtv *itv = s->itv;
126  struct ivtv_buffer *buf;
127  u32 bytes_needed = 0;
128  u32 offset, size;
129  u32 UVoffset = 0, UVsize = 0;
130  int skip_bufs = s->q_predma.buffers;
131  int idx = s->sg_pending_size;
132  int rc;
133 
134  /* sanity checks */
135  if (s->vdev == NULL) {
136  IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
137  return -1;
138  }
139  if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
140  IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
141  return -1;
142  }
143 
144  /* determine offset, size and PTS for the various streams */
145  switch (s->type) {
147  offset = data[1];
148  size = data[2];
149  s->pending_pts = 0;
150  break;
151 
153  offset = data[1];
154  size = data[2];
155  UVoffset = data[3];
156  UVsize = data[4];
157  s->pending_pts = ((u64) data[5] << 32) | data[6];
158  break;
159 
161  offset = data[1] + 12;
162  size = data[2] - 12;
163  s->pending_pts = read_dec(offset - 8) |
164  ((u64)(read_dec(offset - 12)) << 32);
165  if (itv->has_cx23415)
166  offset += IVTV_DECODER_OFFSET;
167  break;
168 
170  size = itv->vbi.enc_size * itv->vbi.fpi;
171  offset = read_enc(itv->vbi.enc_start - 4) + 12;
172  if (offset == 12) {
173  IVTV_DEBUG_INFO("VBI offset == 0\n");
174  return -1;
175  }
176  s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
177  break;
178 
180  size = read_dec(itv->vbi.dec_start + 4) + 8;
181  offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
182  s->pending_pts = 0;
183  offset += IVTV_DECODER_OFFSET;
184  break;
185  default:
186  /* shouldn't happen */
187  return -1;
188  }
189 
190  /* if this is the start of the DMA then fill in the magic cookie */
191  if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
192  if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
196  }
197  else {
198  s->pending_backup = read_enc(offset);
200  }
201  s->pending_offset = offset;
202  }
203 
204  bytes_needed = size;
205  if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
206  /* The size for the Y samples needs to be rounded upwards to a
207  multiple of the buf_size. The UV samples then start in the
208  next buffer. */
209  bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
210  bytes_needed += UVsize;
211  }
212 
213  IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
214  ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
215 
216  rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
217  if (rc < 0) { /* Insufficient buffers */
218  IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
219  bytes_needed, s->name);
220  return -1;
221  }
222  if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
223  IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
224  IVTV_WARN("Cause: the application is not reading fast enough.\n");
225  }
226  s->buffers_stolen = rc;
227 
228  /* got the buffers, now fill in sg_pending */
229  buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
230  memset(buf->buf, 0, 128);
231  list_for_each_entry(buf, &s->q_predma.list, list) {
232  if (skip_bufs-- > 0)
233  continue;
234  s->sg_pending[idx].dst = buf->dma_handle;
235  s->sg_pending[idx].src = offset;
236  s->sg_pending[idx].size = s->buf_size;
237  buf->bytesused = min(size, s->buf_size);
238  buf->dma_xfer_cnt = s->dma_xfer_cnt;
239 
240  s->q_predma.bytesused += buf->bytesused;
241  size -= buf->bytesused;
242  offset += s->buf_size;
243 
244  /* Sync SG buffers */
245  ivtv_buf_sync_for_device(s, buf);
246 
247  if (size == 0) { /* YUV */
248  /* process the UV section */
249  offset = UVoffset;
250  size = UVsize;
251  }
252  idx++;
253  }
254  s->sg_pending_size = idx;
255  return 0;
256 }
257 
258 static void dma_post(struct ivtv_stream *s)
259 {
260  struct ivtv *itv = s->itv;
261  struct ivtv_buffer *buf = NULL;
262  struct list_head *p;
263  u32 offset;
264  __le32 *u32buf;
265  int x = 0;
266 
267  IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
268  s->name, s->dma_offset);
269  list_for_each(p, &s->q_dma.list) {
270  buf = list_entry(p, struct ivtv_buffer, list);
271  u32buf = (__le32 *)buf->buf;
272 
273  /* Sync Buffer */
274  ivtv_buf_sync_for_cpu(s, buf);
275 
276  if (x == 0 && ivtv_use_dma(s)) {
277  offset = s->dma_last_offset;
278  if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
279  {
280  for (offset = 0; offset < 64; offset++) {
281  if (u32buf[offset] == DMA_MAGIC_COOKIE) {
282  break;
283  }
284  }
285  offset *= 4;
286  if (offset == 256) {
287  IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
288  offset = s->dma_last_offset;
289  }
290  if (s->dma_last_offset != offset)
291  IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
292  s->dma_last_offset = offset;
293  }
294  if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
297  }
298  else {
299  write_enc_sync(0, s->dma_offset);
300  }
301  if (offset) {
302  buf->bytesused -= offset;
303  memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
304  }
305  *u32buf = cpu_to_le32(s->dma_backup);
306  }
307  x++;
308  /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
309  if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
312  }
313  if (buf)
314  buf->bytesused += s->dma_last_offset;
315  if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
316  list_for_each_entry(buf, &s->q_dma.list, list) {
317  /* Parse and Groom VBI Data */
318  s->q_dma.bytesused -= buf->bytesused;
319  ivtv_process_vbi_data(itv, buf, 0, s->type);
320  s->q_dma.bytesused += buf->bytesused;
321  }
322  if (s->fh == NULL) {
323  ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
324  return;
325  }
326  }
327 
328  ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
329 
330  if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
331  itv->pcm_announce_callback != NULL) {
332  /*
333  * Set up the work handler to pass the data to ivtv-alsa.
334  *
335  * We just use q_full and let the work handler race with users
336  * making ivtv-fileops.c calls on the PCM device node.
337  *
338  * Users should not be using both the ALSA and V4L2 PCM audio
339  * capture interfaces at the same time. If the user does this,
340  * fragments of data will just go out each interface as they
341  * race for PCM data.
342  */
345  }
346 
347  if (s->fh)
348  wake_up(&s->waitq);
349 }
350 
351 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
352 {
353  struct ivtv *itv = s->itv;
354  struct yuv_playback_info *yi = &itv->yuv_info;
355  u8 frame = yi->draw_frame;
356  struct yuv_frame_info *f = &yi->new_frame_info[frame];
357  struct ivtv_buffer *buf;
358  u32 y_size = 720 * ((f->src_h + 31) & ~31);
359  u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
360  int y_done = 0;
361  int bytes_written = 0;
362  unsigned long flags = 0;
363  int idx = 0;
364 
365  IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
366 
367  /* Insert buffer block for YUV if needed */
368  if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
369  if (yi->blanking_dmaptr) {
370  s->sg_pending[idx].src = yi->blanking_dmaptr;
371  s->sg_pending[idx].dst = offset;
372  s->sg_pending[idx].size = 720 * 16;
373  }
374  offset += 720 * 16;
375  idx++;
376  }
377 
378  list_for_each_entry(buf, &s->q_predma.list, list) {
379  /* YUV UV Offset from Y Buffer */
380  if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
381  (bytes_written + buf->bytesused) >= y_size) {
382  s->sg_pending[idx].src = buf->dma_handle;
383  s->sg_pending[idx].dst = offset;
384  s->sg_pending[idx].size = y_size - bytes_written;
385  offset = uv_offset;
386  if (s->sg_pending[idx].size != buf->bytesused) {
387  idx++;
388  s->sg_pending[idx].src =
389  buf->dma_handle + s->sg_pending[idx - 1].size;
390  s->sg_pending[idx].dst = offset;
391  s->sg_pending[idx].size =
392  buf->bytesused - s->sg_pending[idx - 1].size;
393  offset += s->sg_pending[idx].size;
394  }
395  y_done = 1;
396  } else {
397  s->sg_pending[idx].src = buf->dma_handle;
398  s->sg_pending[idx].dst = offset;
399  s->sg_pending[idx].size = buf->bytesused;
400  offset += buf->bytesused;
401  }
402  bytes_written += buf->bytesused;
403 
404  /* Sync SG buffers */
405  ivtv_buf_sync_for_device(s, buf);
406  idx++;
407  }
408  s->sg_pending_size = idx;
409 
410  /* Sync Hardware SG List of buffers */
411  ivtv_stream_sync_for_device(s);
412  if (lock)
413  spin_lock_irqsave(&itv->dma_reg_lock, flags);
414  if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
415  ivtv_dma_dec_start(s);
416  }
417  else {
419  }
420  if (lock)
421  spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
422 }
423 
424 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
425 {
426  struct ivtv *itv = s->itv;
427 
428  s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
429  s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
430  s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
431  s->sg_processed++;
432  /* Sync Hardware SG List of buffers */
433  ivtv_stream_sync_for_device(s);
436  itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
437  add_timer(&itv->dma_timer);
438 }
439 
440 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
441 {
442  struct ivtv *itv = s->itv;
443 
444  s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
445  s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
446  s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
447  s->sg_processed++;
448  /* Sync Hardware SG List of buffers */
449  ivtv_stream_sync_for_device(s);
452  itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
453  add_timer(&itv->dma_timer);
454 }
455 
456 /* start the encoder DMA */
457 static void ivtv_dma_enc_start(struct ivtv_stream *s)
458 {
459  struct ivtv *itv = s->itv;
460  struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
461  int i;
462 
463  IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
464 
465  if (s->q_predma.bytesused)
466  ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
467 
468  if (ivtv_use_dma(s))
469  s->sg_pending[s->sg_pending_size - 1].size += 256;
470 
471  /* If this is an MPEG stream, and VBI data is also pending, then append the
472  VBI DMA to the MPEG DMA and transfer both sets of data at once.
473 
474  VBI DMA is a second class citizen compared to MPEG and mixing them together
475  will confuse the firmware (the end of a VBI DMA is seen as the end of a
476  MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
477  sure we only use the MPEG DMA to transfer the VBI DMA if both are in
478  use. This way no conflicts occur. */
480  if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
481  s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
482  ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
483  if (ivtv_use_dma(s_vbi))
484  s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
485  for (i = 0; i < s_vbi->sg_pending_size; i++) {
486  s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
487  }
488  s_vbi->dma_offset = s_vbi->pending_offset;
489  s_vbi->sg_pending_size = 0;
490  s_vbi->dma_xfer_cnt++;
492  IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
493  }
494 
495  s->dma_xfer_cnt++;
498  s->sg_pending_size = 0;
499  s->sg_processed = 0;
500  s->dma_offset = s->pending_offset;
501  s->dma_backup = s->pending_backup;
502  s->dma_pts = s->pending_pts;
503 
504  if (ivtv_use_pio(s)) {
507  set_bit(IVTV_F_I_PIO, &itv->i_flags);
508  itv->cur_pio_stream = s->type;
509  }
510  else {
511  itv->dma_retries = 0;
512  ivtv_dma_enc_start_xfer(s);
513  set_bit(IVTV_F_I_DMA, &itv->i_flags);
514  itv->cur_dma_stream = s->type;
515  }
516 }
517 
518 static void ivtv_dma_dec_start(struct ivtv_stream *s)
519 {
520  struct ivtv *itv = s->itv;
521 
522  if (s->q_predma.bytesused)
523  ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
524  s->dma_xfer_cnt++;
527  s->sg_pending_size = 0;
528  s->sg_processed = 0;
529 
530  IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
531  itv->dma_retries = 0;
532  ivtv_dma_dec_start_xfer(s);
533  set_bit(IVTV_F_I_DMA, &itv->i_flags);
534  itv->cur_dma_stream = s->type;
535 }
536 
537 static void ivtv_irq_dma_read(struct ivtv *itv)
538 {
539  struct ivtv_stream *s = NULL;
540  struct ivtv_buffer *buf;
541  int hw_stream_type = 0;
542 
543  IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
544 
545  del_timer(&itv->dma_timer);
546 
547  if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
548  return;
549 
550  if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
551  s = &itv->streams[itv->cur_dma_stream];
552  ivtv_stream_sync_for_cpu(s);
553 
554  if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
555  IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
559  if (itv->dma_retries == 3) {
560  /* Too many retries, give up on this frame */
561  itv->dma_retries = 0;
563  }
564  else {
565  /* Retry, starting with the first xfer segment.
566  Just retrying the current segment is not sufficient. */
567  s->sg_processed = 0;
568  itv->dma_retries++;
569  }
570  }
571  if (s->sg_processed < s->sg_processing_size) {
572  /* DMA next buffer */
573  ivtv_dma_dec_start_xfer(s);
574  return;
575  }
576  if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
577  hw_stream_type = 2;
578  IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
579 
580  /* For some reason must kick the firmware, like PIO mode,
581  I think this tells the firmware we are done and the size
582  of the xfer so it can calculate what we need next.
583  I think we can do this part ourselves but would have to
584  fully calculate xfer info ourselves and not use interrupts
585  */
586  ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
587  hw_stream_type);
588 
589  /* Free last DMA call */
590  while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
591  ivtv_buf_sync_for_cpu(s, buf);
592  ivtv_enqueue(s, buf, &s->q_free);
593  }
594  wake_up(&s->waitq);
595  }
598  itv->cur_dma_stream = -1;
599  wake_up(&itv->dma_waitq);
600 }
601 
602 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
603 {
605  struct ivtv_stream *s;
606 
608  IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
609 
610  del_timer(&itv->dma_timer);
611 
612  if (itv->cur_dma_stream < 0)
613  return;
614 
615  s = &itv->streams[itv->cur_dma_stream];
616  ivtv_stream_sync_for_cpu(s);
617 
618  if (data[0] & 0x18) {
619  IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
622  if (itv->dma_retries == 3) {
623  /* Too many retries, give up on this frame */
624  itv->dma_retries = 0;
626  }
627  else {
628  /* Retry, starting with the first xfer segment.
629  Just retrying the current segment is not sufficient. */
630  s->sg_processed = 0;
631  itv->dma_retries++;
632  }
633  }
634  if (s->sg_processed < s->sg_processing_size) {
635  /* DMA next buffer */
636  ivtv_dma_enc_start_xfer(s);
637  return;
638  }
640  itv->cur_dma_stream = -1;
641  dma_post(s);
644  dma_post(s);
645  }
646  s->sg_processing_size = 0;
647  s->sg_processed = 0;
648  wake_up(&itv->dma_waitq);
649 }
650 
651 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
652 {
653  struct ivtv_stream *s;
654 
655  if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
656  itv->cur_pio_stream = -1;
657  return;
658  }
659  s = &itv->streams[itv->cur_pio_stream];
660  IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
662  itv->cur_pio_stream = -1;
663  dma_post(s);
664  if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
665  ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
666  else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
667  ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
668  else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
669  ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
673  dma_post(s);
674  }
675  wake_up(&itv->dma_waitq);
676 }
677 
678 static void ivtv_irq_dma_err(struct ivtv *itv)
679 {
681  u32 status;
682 
683  del_timer(&itv->dma_timer);
684 
686  status = read_reg(IVTV_REG_DMASTATUS);
687  IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
688  status, itv->cur_dma_stream);
689  /*
690  * We do *not* write back to the IVTV_REG_DMASTATUS register to
691  * clear the error status, if either the encoder write (0x02) or
692  * decoder read (0x01) bus master DMA operation do not indicate
693  * completed. We can race with the DMA engine, which may have
694  * transitioned to completed status *after* we read the register.
695  * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
696  * DMA engine has completed, will cause the DMA engine to stop working.
697  */
698  status &= 0x3;
699  if (status == 0x3)
700  write_reg(status, IVTV_REG_DMASTATUS);
701 
702  if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
703  itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
704  struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
705 
706  if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
707  /* retry */
708  /*
709  * FIXME - handle cases of DMA error similar to
710  * encoder below, except conditioned on status & 0x1
711  */
712  ivtv_dma_dec_start(s);
713  return;
714  } else {
715  if ((status & 0x2) == 0) {
716  /*
717  * CX2341x Bus Master DMA write is ongoing.
718  * Reset the timer and let it complete.
719  */
720  itv->dma_timer.expires =
721  jiffies + msecs_to_jiffies(600);
722  add_timer(&itv->dma_timer);
723  return;
724  }
725 
726  if (itv->dma_retries < 3) {
727  /*
728  * CX2341x Bus Master DMA write has ended.
729  * Retry the write, starting with the first
730  * xfer segment. Just retrying the current
731  * segment is not sufficient.
732  */
733  s->sg_processed = 0;
734  itv->dma_retries++;
735  ivtv_dma_enc_start_xfer(s);
736  return;
737  }
738  /* Too many retries, give up on this one */
739  }
740 
741  }
742  if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
743  ivtv_udma_start(itv);
744  return;
745  }
748  itv->cur_dma_stream = -1;
749  wake_up(&itv->dma_waitq);
750 }
751 
752 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
753 {
755  struct ivtv_stream *s;
756 
757  /* Get DMA destination and size arguments from card */
758  ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
759  IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
760 
761  if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
762  IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
763  data[0], data[1], data[2]);
764  return;
765  }
766  s = &itv->streams[ivtv_stream_map[data[0]]];
767  if (!stream_enc_dma_append(s, data)) {
768  set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
769  }
770 }
771 
772 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
773 {
775  struct ivtv_stream *s;
776 
777  IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
779 
780  if (!stream_enc_dma_append(s, data))
781  set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
782 }
783 
784 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
785 {
787  struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
788 
789  IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
790  if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
791  !stream_enc_dma_append(s, data)) {
793  }
794 }
795 
796 static void ivtv_irq_dec_data_req(struct ivtv *itv)
797 {
799  struct ivtv_stream *s;
800 
801  /* YUV or MPG */
802 
803  if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
804  ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
805  itv->dma_data_req_size =
806  1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
807  itv->dma_data_req_offset = data[1];
808  if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
811  }
812  else {
813  ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
814  itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
815  itv->dma_data_req_offset = data[1];
817  }
818  IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
820  if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
822  }
823  else {
824  if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
829  }
830 }
831 
832 static void ivtv_irq_vsync(struct ivtv *itv)
833 {
834  /* The vsync interrupt is unusual in that it won't clear until
835  * the end of the first line for the current field, at which
836  * point it clears itself. This can result in repeated vsync
837  * interrupts, or a missed vsync. Read some of the registers
838  * to determine the line being displayed and ensure we handle
839  * one vsync per frame.
840  */
841  unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
842  struct yuv_playback_info *yi = &itv->yuv_info;
843  int last_dma_frame = atomic_read(&yi->next_dma_frame);
844  struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
845 
846  if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
847 
848  if (((frame ^ f->sync_field) == 0 &&
849  ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
850  (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
851  int next_dma_frame = last_dma_frame;
852 
853  if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
854  if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
855  write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
856  write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
857  write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
858  write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
859  next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
860  atomic_set(&yi->next_dma_frame, next_dma_frame);
861  yi->fields_lapsed = -1;
862  yi->running = 1;
863  }
864  }
865  }
866  if (frame != (itv->last_vsync_field & 1)) {
867  static const struct v4l2_event evtop = {
869  .u.vsync.field = V4L2_FIELD_TOP,
870  };
871  static const struct v4l2_event evbottom = {
873  .u.vsync.field = V4L2_FIELD_BOTTOM,
874  };
875  struct ivtv_stream *s = ivtv_get_output_stream(itv);
876 
877  itv->last_vsync_field += 1;
878  if (frame == 0) {
881  }
882  else {
884  }
887  wake_up(&itv->event_waitq);
888  if (s)
889  wake_up(&s->waitq);
890  }
891  if (s && s->vdev)
892  v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
893  wake_up(&itv->vsync_waitq);
894 
895  /* Send VBI to saa7127 */
896  if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
902  }
903 
904  /* Check if we need to update the yuv registers */
905  if (yi->running && (yi->yuv_forced_update || f->update)) {
906  if (!f->update) {
907  last_dma_frame =
908  (u8)(atomic_read(&yi->next_dma_frame) -
909  1) % IVTV_YUV_BUFFERS;
910  f = &yi->new_frame_info[last_dma_frame];
911  }
912 
913  if (f->src_w) {
914  yi->update_frame = last_dma_frame;
915  f->update = 0;
916  yi->yuv_forced_update = 0;
919  }
920  }
921 
922  yi->fields_lapsed++;
923  }
924 }
925 
926 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
927 
929 {
930  struct ivtv *itv = (struct ivtv *)dev_id;
931  u32 combo;
932  u32 stat;
933  int i;
934  u8 vsync_force = 0;
935 
936  spin_lock(&itv->dma_reg_lock);
937  /* get contents of irq status register */
939 
940  combo = ~itv->irqmask & stat;
941 
942  /* Clear out IRQ */
943  if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
944 
945  if (0 == combo) {
946  /* The vsync interrupt is unusual and clears itself. If we
947  * took too long, we may have missed it. Do some checks
948  */
949  if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
950  /* vsync is enabled, see if we're in a new field */
951  if ((itv->last_vsync_field & 1) !=
953  /* New field, looks like we missed it */
954  IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
956  vsync_force = 1;
957  }
958  }
959 
960  if (!vsync_force) {
961  /* No Vsync expected, wasn't for us */
962  spin_unlock(&itv->dma_reg_lock);
963  return IRQ_NONE;
964  }
965  }
966 
967  /* Exclude interrupts noted below from the output, otherwise the log is flooded with
968  these messages */
969  if (combo & ~0xff6d0400)
970  IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
971 
972  if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
973  IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
974  }
975 
976  if (combo & IVTV_IRQ_DMA_READ) {
977  ivtv_irq_dma_read(itv);
978  }
979 
980  if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
981  ivtv_irq_enc_dma_complete(itv);
982  }
983 
984  if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
985  ivtv_irq_enc_pio_complete(itv);
986  }
987 
988  if (combo & IVTV_IRQ_DMA_ERR) {
989  ivtv_irq_dma_err(itv);
990  }
991 
992  if (combo & IVTV_IRQ_ENC_START_CAP) {
993  ivtv_irq_enc_start_cap(itv);
994  }
995 
996  if (combo & IVTV_IRQ_ENC_VBI_CAP) {
997  ivtv_irq_enc_vbi_cap(itv);
998  }
999 
1000  if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
1001  ivtv_irq_dec_vbi_reinsert(itv);
1002  }
1003 
1004  if (combo & IVTV_IRQ_ENC_EOS) {
1005  IVTV_DEBUG_IRQ("ENC EOS\n");
1006  set_bit(IVTV_F_I_EOS, &itv->i_flags);
1007  wake_up(&itv->eos_waitq);
1008  }
1009 
1010  if (combo & IVTV_IRQ_DEC_DATA_REQ) {
1011  ivtv_irq_dec_data_req(itv);
1012  }
1013 
1014  /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1015  if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
1016  ivtv_irq_vsync(itv);
1017  }
1018 
1019  if (combo & IVTV_IRQ_ENC_VIM_RST) {
1020  IVTV_DEBUG_IRQ("VIM RST\n");
1021  /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1022  }
1023 
1024  if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
1025  IVTV_DEBUG_INFO("Stereo mode changed\n");
1026  }
1027 
1028  if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
1029  itv->irq_rr_idx++;
1030  for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1031  int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1032  struct ivtv_stream *s = &itv->streams[idx];
1033 
1035  continue;
1036  if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
1037  ivtv_dma_dec_start(s);
1038  else
1039  ivtv_dma_enc_start(s);
1040  break;
1041  }
1042 
1043  if (i == IVTV_MAX_STREAMS &&
1045  ivtv_udma_start(itv);
1046  }
1047 
1048  if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
1049  itv->irq_rr_idx++;
1050  for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1051  int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1052  struct ivtv_stream *s = &itv->streams[idx];
1053 
1055  continue;
1057  ivtv_dma_enc_start(s);
1058  break;
1059  }
1060  }
1061 
1063  queue_kthread_work(&itv->irq_worker, &itv->irq_work);
1064  }
1065 
1066  spin_unlock(&itv->dma_reg_lock);
1067 
1068  /* If we've just handled a 'forced' vsync, it's safest to say it
1069  * wasn't ours. Another device may have triggered it at just
1070  * the right time.
1071  */
1072  return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1073 }
1074 
1075 void ivtv_unfinished_dma(unsigned long arg)
1076 {
1077  struct ivtv *itv = (struct ivtv *)arg;
1078 
1079  if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1080  return;
1081  IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1082 
1085  clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1086  itv->cur_dma_stream = -1;
1087  wake_up(&itv->dma_waitq);
1088 }