Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ivtv-queue.c
Go to the documentation of this file.
1 /*
2  buffer queues.
3  Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4  Copyright (C) 2004 Chris Kennedy <[email protected]>
5  Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
6 
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of the GNU General Public License as published by
9  the Free Software Foundation; either version 2 of the License, or
10  (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  */
21 
22 #include "ivtv-driver.h"
23 #include "ivtv-queue.h"
24 
25 int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
26 {
27  if (s->buf_size - buf->bytesused < copybytes)
28  copybytes = s->buf_size - buf->bytesused;
29  if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
30  return -EFAULT;
31  }
32  buf->bytesused += copybytes;
33  return copybytes;
34 }
35 
37 {
38  int i;
39 
40  for (i = 0; i < buf->bytesused; i += 4)
41  swab32s((u32 *)(buf->buf + i));
42 }
43 
45 {
46  INIT_LIST_HEAD(&q->list);
47  q->buffers = 0;
48  q->length = 0;
49  q->bytesused = 0;
50 }
51 
52 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
53 {
54  unsigned long flags;
55 
56  /* clear the buffer if it is going to be enqueued to the free queue */
57  if (q == &s->q_free) {
58  buf->bytesused = 0;
59  buf->readpos = 0;
60  buf->b_flags = 0;
61  buf->dma_xfer_cnt = 0;
62  }
63  spin_lock_irqsave(&s->qlock, flags);
64  list_add_tail(&buf->list, &q->list);
65  q->buffers++;
66  q->length += s->buf_size;
67  q->bytesused += buf->bytesused - buf->readpos;
68  spin_unlock_irqrestore(&s->qlock, flags);
69 }
70 
71 struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
72 {
73  struct ivtv_buffer *buf = NULL;
74  unsigned long flags;
75 
76  spin_lock_irqsave(&s->qlock, flags);
77  if (!list_empty(&q->list)) {
78  buf = list_entry(q->list.next, struct ivtv_buffer, list);
79  list_del_init(q->list.next);
80  q->buffers--;
81  q->length -= s->buf_size;
82  q->bytesused -= buf->bytesused - buf->readpos;
83  }
84  spin_unlock_irqrestore(&s->qlock, flags);
85  return buf;
86 }
87 
88 static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
89  struct ivtv_queue *to, int clear)
90 {
91  struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
92 
93  list_move_tail(from->list.next, &to->list);
94  from->buffers--;
95  from->length -= s->buf_size;
96  from->bytesused -= buf->bytesused - buf->readpos;
97  /* special handling for q_free */
98  if (clear)
99  buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
100  to->buffers++;
101  to->length += s->buf_size;
102  to->bytesused += buf->bytesused - buf->readpos;
103 }
104 
105 /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
106  If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
107  If 'steal' != NULL, then buffers may also taken from that queue if
108  needed, but only if 'from' is the free queue.
109 
110  The buffer is automatically cleared if it goes to the free queue. It is
111  also cleared if buffers need to be taken from the 'steal' queue and
112  the 'from' queue is the free queue.
113 
114  When 'from' is q_free, then needed_bytes is compared to the total
115  available buffer length, otherwise needed_bytes is compared to the
116  bytesused value. For the 'steal' queue the total available buffer
117  length is always used.
118 
119  -ENOMEM is returned if the buffers could not be obtained, 0 if all
120  buffers where obtained from the 'from' list and if non-zero then
121  the number of stolen buffers is returned. */
122 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
123  struct ivtv_queue *to, int needed_bytes)
124 {
125  unsigned long flags;
126  int rc = 0;
127  int from_free = from == &s->q_free;
128  int to_free = to == &s->q_free;
129  int bytes_available, bytes_steal;
130 
131  spin_lock_irqsave(&s->qlock, flags);
132  if (needed_bytes == 0) {
133  from_free = 1;
134  needed_bytes = from->length;
135  }
136 
137  bytes_available = from_free ? from->length : from->bytesused;
138  bytes_steal = (from_free && steal) ? steal->length : 0;
139 
140  if (bytes_available + bytes_steal < needed_bytes) {
141  spin_unlock_irqrestore(&s->qlock, flags);
142  return -ENOMEM;
143  }
144  while (bytes_available < needed_bytes) {
145  struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
147 
148  /* move buffers from the tail of the 'steal' queue to the tail of the
149  'from' queue. Always copy all the buffers with the same dma_xfer_cnt
150  value, this ensures that you do not end up with partial frame data
151  if one frame is stored in multiple buffers. */
152  while (dma_xfer_cnt == buf->dma_xfer_cnt) {
153  list_move_tail(steal->list.prev, &from->list);
154  rc++;
155  steal->buffers--;
156  steal->length -= s->buf_size;
157  steal->bytesused -= buf->bytesused - buf->readpos;
158  buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
159  from->buffers++;
160  from->length += s->buf_size;
161  bytes_available += s->buf_size;
162  if (list_empty(&steal->list))
163  break;
164  buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
165  }
166  }
167  if (from_free) {
168  u32 old_length = to->length;
169 
170  while (to->length - old_length < needed_bytes) {
171  ivtv_queue_move_buf(s, from, to, 1);
172  }
173  }
174  else {
175  u32 old_bytesused = to->bytesused;
176 
177  while (to->bytesused - old_bytesused < needed_bytes) {
178  ivtv_queue_move_buf(s, from, to, to_free);
179  }
180  }
181  spin_unlock_irqrestore(&s->qlock, flags);
182  return rc;
183 }
184 
186 {
187  ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
188  ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
189  ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
190  ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
191 }
192 
194 {
195  struct ivtv *itv = s->itv;
196  int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
197  int i;
198 
199  if (s->buffers == 0)
200  return 0;
201 
202  IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
203  s->dma != PCI_DMA_NONE ? "DMA " : "",
204  s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
205 
206  s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
207  if (s->sg_pending == NULL) {
208  IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
209  return -ENOMEM;
210  }
211  s->sg_pending_size = 0;
212 
213  s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
214  if (s->sg_processing == NULL) {
215  IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
216  kfree(s->sg_pending);
217  s->sg_pending = NULL;
218  return -ENOMEM;
219  }
220  s->sg_processing_size = 0;
221 
222  s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
224  if (s->sg_dma == NULL) {
225  IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
226  kfree(s->sg_pending);
227  s->sg_pending = NULL;
228  kfree(s->sg_processing);
229  s->sg_processing = NULL;
230  return -ENOMEM;
231  }
232  if (ivtv_might_use_dma(s)) {
233  s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
234  sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
235  ivtv_stream_sync_for_cpu(s);
236  }
237 
238  /* allocate stream buffers. Initially all buffers are in q_free. */
239  for (i = 0; i < s->buffers; i++) {
240  struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
242 
243  if (buf == NULL)
244  break;
245  buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
246  if (buf->buf == NULL) {
247  kfree(buf);
248  break;
249  }
250  INIT_LIST_HEAD(&buf->list);
251  if (ivtv_might_use_dma(s)) {
252  buf->dma_handle = pci_map_single(s->itv->pdev,
253  buf->buf, s->buf_size + 256, s->dma);
254  ivtv_buf_sync_for_cpu(s, buf);
255  }
256  ivtv_enqueue(s, buf, &s->q_free);
257  }
258  if (i == s->buffers)
259  return 0;
260  IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
261  ivtv_stream_free(s);
262  return -ENOMEM;
263 }
264 
266 {
267  struct ivtv_buffer *buf;
268 
269  /* move all buffers to q_free */
271 
272  /* empty q_free */
273  while ((buf = ivtv_dequeue(s, &s->q_free))) {
274  if (ivtv_might_use_dma(s))
275  pci_unmap_single(s->itv->pdev, buf->dma_handle,
276  s->buf_size + 256, s->dma);
277  kfree(buf->buf);
278  kfree(buf);
279  }
280 
281  /* Free SG Array/Lists */
282  if (s->sg_dma != NULL) {
283  if (s->sg_handle != IVTV_DMA_UNMAPPED) {
284  pci_unmap_single(s->itv->pdev, s->sg_handle,
285  sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
287  }
288  kfree(s->sg_pending);
289  kfree(s->sg_processing);
290  kfree(s->sg_dma);
291  s->sg_pending = NULL;
292  s->sg_processing = NULL;
293  s->sg_dma = NULL;
294  s->sg_pending_size = 0;
295  s->sg_processing_size = 0;
296  }
297 }