23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/bitmap.h>
28 #include <linux/i2c.h>
31 #include <linux/slab.h>
44 char *errmsg =
"Unknown";
48 errmsg =
"unlinked synchronuously";
51 errmsg =
"unlinked asynchronuously";
54 errmsg =
"Buffer error (overrun)";
57 errmsg =
"Stalled (device not responding)";
60 errmsg =
"Babble (bad cable?)";
63 errmsg =
"Bit-stuff error (bad cable?)";
66 errmsg =
"CRC/Timeout (could be anything)";
69 errmsg =
"Device does not respond";
77 packet, status, errmsg);
84 static inline int cx231xx_isoc_vbi_copy(
struct cx231xx *dev,
struct urb *
urb)
88 unsigned char *p_buffer;
98 if (urb->status < 0) {
99 print_err_status(dev, -1, urb->status);
100 if (urb->status == -
ENOENT)
105 p_buffer = urb->transfer_buffer;
108 if (buffer_size > 0) {
130 p_buffer + bytes_parsed,
131 buffer_size - bytes_parsed);
137 while (bytes_parsed < buffer_size) {
141 p_buffer + bytes_parsed,
142 buffer_size - bytes_parsed,
148 if (sav_eav && (bytes_parsed < buffer_size)) {
151 p_buffer+bytes_parsed,
152 buffer_size-bytes_parsed);
180 *size = (dev->
width * height * 2 * 2);
195 unsigned long flags = 0;
209 if (dev->
vbi_mode.bulk_ctl.buf == buf)
211 spin_unlock_irqrestore(&dev->
vbi_mode.slock, flags);
225 int rc = 0, urb_init = 0;
230 buf->
vb.size = ((dev->
width << 1) * height * 2);
232 if (0 != buf->
vb.baddr && buf->
vb.bsize < buf->
vb.size)
246 if (!dev->
vbi_mode.bulk_ctl.num_bufs)
253 cx231xx_isoc_vbi_copy);
291 .buf_setup = vbi_buffer_setup,
292 .buf_prepare = vbi_buffer_prepare,
293 .buf_queue = vbi_buffer_queue,
294 .buf_release = vbi_buffer_release,
304 static void cx231xx_irq_vbi_callback(
struct urb *urb)
311 switch (urb->status) {
327 dev->
vbi_mode.bulk_ctl.bulk_copy(dev, urb);
350 dev->
vbi_mode.bulk_ctl.nfields = -1;
351 for (i = 0; i < dev->
vbi_mode.bulk_ctl.num_bufs; i++) {
359 if (dev->
vbi_mode.bulk_ctl.transfer_buffer[i]) {
363 dev->
vbi_mode.bulk_ctl.transfer_buffer[
i] =
377 dev->
vbi_mode.bulk_ctl.num_bufs = 0;
387 int num_bufs,
int max_pkt_size,
388 int (*bulk_copy) (
struct cx231xx *dev,
404 usb_rcvbulkpipe(dev->
udev,
407 dev->
vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
408 dev->
vbi_mode.bulk_ctl.num_bufs = num_bufs;
417 for (i = 0; i < 8; i++)
420 dev->
vbi_mode.bulk_ctl.urb = kzalloc(
sizeof(
void *) * num_bufs,
427 dev->
vbi_mode.bulk_ctl.transfer_buffer =
428 kzalloc(
sizeof(
void *) * num_bufs,
GFP_KERNEL);
429 if (!dev->
vbi_mode.bulk_ctl.transfer_buffer) {
435 dev->
vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
438 sb_size = max_packets * dev->
vbi_mode.bulk_ctl.max_pkt_size;
441 for (i = 0; i < dev->
vbi_mode.bulk_ctl.num_bufs; i++) {
446 ": cannot alloc bulk_ctl.urb %i\n", i);
451 urb->transfer_flags = 0;
453 dev->
vbi_mode.bulk_ctl.transfer_buffer[
i] =
455 if (!dev->
vbi_mode.bulk_ctl.transfer_buffer[i]) {
457 ": unable to allocate %i bytes for transfer"
458 " buffer %i%s\n", sb_size, i,
464 pipe = usb_rcvbulkpipe(dev->
udev, dev->
vbi_mode.end_point_addr);
465 usb_fill_bulk_urb(urb, dev->
udev, pipe,
466 dev->
vbi_mode.bulk_ctl.transfer_buffer[i],
467 sb_size, cx231xx_irq_vbi_callback, dma_q);
473 for (i = 0; i < dev->
vbi_mode.bulk_ctl.num_bufs; i++) {
477 ": submit of urb %i failed (error=%i)\n", i,
491 u8 sav_eav,
u8 *p_buffer,
u32 buffer_size)
493 u32 bytes_copied = 0;
494 int current_field = -1;
509 if (current_field < 0)
524 static inline void vbi_buffer_filled(
struct cx231xx *dev,
532 buf->
vb.field_count++;
563 if (bytes_to_copy > length)
581 return bytes_to_copy;
587 dma_q->
pos += bytes_to_copy;
598 vbi_buffer_filled(dev, dma_q, buf);
606 return bytes_to_copy;
620 if (list_empty(&dma_q->
active)) {
632 memset(outp, 0, (*buf)->vb.size);
648 get_next_vbi_buf(dma_q, &buf);
659 u8 *p_buffer,
u32 bytes_to_copy)
662 u32 current_line_bytes_copied = 0;
676 current_line_bytes_copied =
681 current_line_bytes_copied;
689 startwrite = p_out_buffer +
offset;
694 memcpy(startwrite, p_buffer, lencopy);