Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ispqueue.c
Go to the documentation of this file.
1 /*
2  * ispqueue.c
3  *
4  * TI OMAP3 ISP - Video buffers queue handling
5  *
6  * Copyright (C) 2010 Nokia Corporation
7  *
8  * Contacts: Laurent Pinchart <[email protected]>
9  * Sakari Ailus <[email protected]>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  */
25 
26 #include <asm/cacheflush.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/poll.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/vmalloc.h>
35 
36 #include "ispqueue.h"
37 
38 /* -----------------------------------------------------------------------------
39  * Video buffers management
40  */
41 
42 /*
43  * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
44  *
45  * The typical operation required here is Cache Invalidation across
46  * the (user space) buffer address range. And this _must_ be done
47  * at QBUF stage (and *only* at QBUF).
48  *
49  * We try to use optimal cache invalidation function:
50  * - dmac_map_area:
51  * - used when the number of pages are _low_.
52  * - it becomes quite slow as the number of pages increase.
53  * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
54  * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
55  *
56  * - flush_cache_all:
57  * - used when the number of pages are _high_.
58  * - time taken in the range of 500-900 us.
59  * - has a higher penalty but, as whole dcache + icache is invalidated
60  */
61 /*
62  * FIXME: dmac_inv_range crashes randomly on the user space buffer
63  * address. Fall back to flush_cache_all for now.
64  */
65 #define ISP_CACHE_FLUSH_PAGES_MAX 0
66 
67 static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
68 {
69  if (buf->skip_cache)
70  return;
71 
72  if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
75  else {
76  dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
78  outer_inv_range(buf->vbuf.m.userptr,
79  buf->vbuf.m.userptr + buf->vbuf.length);
80  }
81 }
82 
83 /*
84  * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
85  *
86  * Lock the VMAs underlying the given buffer into memory. This avoids the
87  * userspace buffer mapping from being swapped out, making VIPT cache handling
88  * easier.
89  *
90  * Note that the pages will not be freed as the buffers have been locked to
91  * memory using by a call to get_user_pages(), but the userspace mapping could
92  * still disappear if the VMAs are not locked. This is caused by the memory
93  * management code trying to be as lock-less as possible, which results in the
94  * userspace mapping manager not finding out that the pages are locked under
95  * some conditions.
96  */
97 static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
98 {
99  struct vm_area_struct *vma;
100  unsigned long start;
101  unsigned long end;
102  int ret = 0;
103 
104  if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
105  return 0;
106 
107  /* We can be called from workqueue context if the current task dies to
108  * unlock the VMAs. In that case there's no current memory management
109  * context so unlocking can't be performed, but the VMAs have been or
110  * are getting destroyed anyway so it doesn't really matter.
111  */
112  if (!current || !current->mm)
113  return lock ? -EINVAL : 0;
114 
115  start = buf->vbuf.m.userptr;
116  end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
117 
118  down_write(&current->mm->mmap_sem);
119  spin_lock(&current->mm->page_table_lock);
120 
121  do {
122  vma = find_vma(current->mm, start);
123  if (vma == NULL) {
124  ret = -EFAULT;
125  goto out;
126  }
127 
128  if (lock)
129  vma->vm_flags |= VM_LOCKED;
130  else
131  vma->vm_flags &= ~VM_LOCKED;
132 
133  start = vma->vm_end + 1;
134  } while (vma->vm_end < end);
135 
136  if (lock)
137  buf->vm_flags |= VM_LOCKED;
138  else
139  buf->vm_flags &= ~VM_LOCKED;
140 
141 out:
142  spin_unlock(&current->mm->page_table_lock);
143  up_write(&current->mm->mmap_sem);
144  return ret;
145 }
146 
147 /*
148  * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
149  *
150  * Iterate over the vmalloc'ed area and create a scatter list entry for every
151  * page.
152  */
153 static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
154 {
155  struct scatterlist *sglist;
156  unsigned int npages;
157  unsigned int i;
158  void *addr;
159 
160  addr = buf->vaddr;
161  npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
162 
163  sglist = vmalloc(npages * sizeof(*sglist));
164  if (sglist == NULL)
165  return -ENOMEM;
166 
167  sg_init_table(sglist, npages);
168 
169  for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
170  struct page *page = vmalloc_to_page(addr);
171 
172  if (page == NULL || PageHighMem(page)) {
173  vfree(sglist);
174  return -EINVAL;
175  }
176 
177  sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
178  }
179 
180  buf->sglen = npages;
181  buf->sglist = sglist;
182 
183  return 0;
184 }
185 
186 /*
187  * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
188  *
189  * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
190  */
191 static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
192 {
193  struct scatterlist *sglist;
194  unsigned int offset = buf->offset;
195  unsigned int i;
196 
197  sglist = vmalloc(buf->npages * sizeof(*sglist));
198  if (sglist == NULL)
199  return -ENOMEM;
200 
201  sg_init_table(sglist, buf->npages);
202 
203  for (i = 0; i < buf->npages; ++i) {
204  if (PageHighMem(buf->pages[i])) {
205  vfree(sglist);
206  return -EINVAL;
207  }
208 
209  sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
210  offset);
211  offset = 0;
212  }
213 
214  buf->sglen = buf->npages;
215  buf->sglist = sglist;
216 
217  return 0;
218 }
219 
220 /*
221  * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
222  *
223  * Create a scatter list of physically contiguous pages starting at the buffer
224  * memory physical address.
225  */
226 static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
227 {
228  struct scatterlist *sglist;
229  unsigned int offset = buf->offset;
230  unsigned long pfn = buf->paddr >> PAGE_SHIFT;
231  unsigned int i;
232 
233  sglist = vmalloc(buf->npages * sizeof(*sglist));
234  if (sglist == NULL)
235  return -ENOMEM;
236 
237  sg_init_table(sglist, buf->npages);
238 
239  for (i = 0; i < buf->npages; ++i, ++pfn) {
240  sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
241  offset);
242  /* PFNMAP buffers will not get DMA-mapped, set the DMA address
243  * manually.
244  */
245  sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
246  offset = 0;
247  }
248 
249  buf->sglen = buf->npages;
250  buf->sglist = sglist;
251 
252  return 0;
253 }
254 
255 /*
256  * isp_video_buffer_cleanup - Release pages for a userspace VMA.
257  *
258  * Release pages locked by a call isp_video_buffer_prepare_user and free the
259  * pages table.
260  */
261 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
262 {
264  unsigned int i;
265 
266  if (buf->queue->ops->buffer_cleanup)
267  buf->queue->ops->buffer_cleanup(buf);
268 
269  if (!(buf->vm_flags & VM_PFNMAP)) {
270  direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
272  dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
273  direction);
274  }
275 
276  vfree(buf->sglist);
277  buf->sglist = NULL;
278  buf->sglen = 0;
279 
280  if (buf->pages != NULL) {
281  isp_video_buffer_lock_vma(buf, 0);
282 
283  for (i = 0; i < buf->npages; ++i)
284  page_cache_release(buf->pages[i]);
285 
286  vfree(buf->pages);
287  buf->pages = NULL;
288  }
289 
290  buf->npages = 0;
291  buf->skip_cache = false;
292 }
293 
294 /*
295  * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
296  *
297  * This function creates a list of pages for a userspace VMA. The number of
298  * pages is first computed based on the buffer size, and pages are then
299  * retrieved by a call to get_user_pages.
300  *
301  * Pages are pinned to memory by get_user_pages, making them available for DMA
302  * transfers. However, due to memory management optimization, it seems the
303  * get_user_pages doesn't guarantee that the pinned pages will not be written
304  * to swap and removed from the userspace mapping(s). When this happens, a page
305  * fault can be generated when accessing those unmapped pages.
306  *
307  * If the fault is triggered by a page table walk caused by VIPT cache
308  * management operations, the page fault handler might oops if the MM semaphore
309  * is held, as it can't handle kernel page faults in that case. To fix that, a
310  * fixup entry needs to be added to the cache management code, or the userspace
311  * VMA must be locked to avoid removing pages from the userspace mapping in the
312  * first place.
313  *
314  * If the number of pages retrieved is smaller than the number required by the
315  * buffer size, the function returns -EFAULT.
316  */
317 static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
318 {
319  unsigned long data;
320  unsigned int first;
321  unsigned int last;
322  int ret;
323 
324  data = buf->vbuf.m.userptr;
325  first = (data & PAGE_MASK) >> PAGE_SHIFT;
326  last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
327 
328  buf->offset = data & ~PAGE_MASK;
329  buf->npages = last - first + 1;
330  buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
331  if (buf->pages == NULL)
332  return -ENOMEM;
333 
334  down_read(&current->mm->mmap_sem);
335  ret = get_user_pages(current, current->mm, data & PAGE_MASK,
336  buf->npages,
337  buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
338  buf->pages, NULL);
339  up_read(&current->mm->mmap_sem);
340 
341  if (ret != buf->npages) {
342  buf->npages = ret < 0 ? 0 : ret;
343  isp_video_buffer_cleanup(buf);
344  return -EFAULT;
345  }
346 
347  ret = isp_video_buffer_lock_vma(buf, 1);
348  if (ret < 0)
349  isp_video_buffer_cleanup(buf);
350 
351  return ret;
352 }
353 
354 /*
355  * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
356  *
357  * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
358  * memory and if they span a single VMA.
359  *
360  * Return 0 if the buffer is valid, or -EFAULT otherwise.
361  */
362 static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
363 {
364  struct vm_area_struct *vma;
365  unsigned long prev_pfn;
366  unsigned long this_pfn;
367  unsigned long start;
368  unsigned long end;
369  dma_addr_t pa;
370  int ret = -EFAULT;
371 
372  start = buf->vbuf.m.userptr;
373  end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
374 
375  buf->offset = start & ~PAGE_MASK;
376  buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
377  buf->pages = NULL;
378 
379  down_read(&current->mm->mmap_sem);
380  vma = find_vma(current->mm, start);
381  if (vma == NULL || vma->vm_end < end)
382  goto done;
383 
384  for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
385  ret = follow_pfn(vma, start, &this_pfn);
386  if (ret)
387  goto done;
388 
389  if (prev_pfn == 0)
390  pa = this_pfn << PAGE_SHIFT;
391  else if (this_pfn != prev_pfn + 1) {
392  ret = -EFAULT;
393  goto done;
394  }
395 
396  prev_pfn = this_pfn;
397  }
398 
399  buf->paddr = pa + buf->offset;
400  ret = 0;
401 
402 done:
403  up_read(&current->mm->mmap_sem);
404  return ret;
405 }
406 
407 /*
408  * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
409  *
410  * This function locates the VMAs for the buffer's userspace address and checks
411  * that their flags match. The only flag that we need to care for at the moment
412  * is VM_PFNMAP.
413  *
414  * The buffer vm_flags field is set to the first VMA flags.
415  *
416  * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
417  * have incompatible flags.
418  */
419 static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
420 {
421  struct vm_area_struct *vma;
423  unsigned long start;
424  unsigned long end;
425  int ret = -EFAULT;
426 
427  start = buf->vbuf.m.userptr;
428  end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
429 
430  down_read(&current->mm->mmap_sem);
431 
432  do {
433  vma = find_vma(current->mm, start);
434  if (vma == NULL)
435  goto done;
436 
437  if (start == buf->vbuf.m.userptr) {
438  buf->vm_flags = vma->vm_flags;
439  vm_page_prot = vma->vm_page_prot;
440  }
441 
442  if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
443  goto done;
444 
445  if (vm_page_prot != vma->vm_page_prot)
446  goto done;
447 
448  start = vma->vm_end + 1;
449  } while (vma->vm_end < end);
450 
451  /* Skip cache management to enhance performances for non-cached or
452  * write-combining buffers.
453  */
454  if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
455  vm_page_prot == pgprot_writecombine(vm_page_prot))
456  buf->skip_cache = true;
457 
458  ret = 0;
459 
460 done:
461  up_read(&current->mm->mmap_sem);
462  return ret;
463 }
464 
465 /*
466  * isp_video_buffer_prepare - Make a buffer ready for operation
467  *
468  * Preparing a buffer involves:
469  *
470  * - validating VMAs (userspace buffers only)
471  * - locking pages and VMAs into memory (userspace buffers only)
472  * - building page and scatter-gather lists
473  * - mapping buffers for DMA operation
474  * - performing driver-specific preparation
475  *
476  * The function must be called in userspace context with a valid mm context
477  * (this excludes cleanup paths such as sys_close when the userspace process
478  * segfaults).
479  */
480 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
481 {
483  int ret;
484 
485  switch (buf->vbuf.memory) {
486  case V4L2_MEMORY_MMAP:
487  ret = isp_video_buffer_sglist_kernel(buf);
488  break;
489 
490  case V4L2_MEMORY_USERPTR:
491  ret = isp_video_buffer_prepare_vm_flags(buf);
492  if (ret < 0)
493  return ret;
494 
495  if (buf->vm_flags & VM_PFNMAP) {
496  ret = isp_video_buffer_prepare_pfnmap(buf);
497  if (ret < 0)
498  return ret;
499 
500  ret = isp_video_buffer_sglist_pfnmap(buf);
501  } else {
502  ret = isp_video_buffer_prepare_user(buf);
503  if (ret < 0)
504  return ret;
505 
506  ret = isp_video_buffer_sglist_user(buf);
507  }
508  break;
509 
510  default:
511  return -EINVAL;
512  }
513 
514  if (ret < 0)
515  goto done;
516 
517  if (!(buf->vm_flags & VM_PFNMAP)) {
518  direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
520  ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
521  direction);
522  if (ret != buf->sglen) {
523  ret = -EFAULT;
524  goto done;
525  }
526  }
527 
528  if (buf->queue->ops->buffer_prepare)
529  ret = buf->queue->ops->buffer_prepare(buf);
530 
531 done:
532  if (ret < 0) {
533  isp_video_buffer_cleanup(buf);
534  return ret;
535  }
536 
537  return ret;
538 }
539 
540 /*
541  * isp_video_queue_query - Query the status of a given buffer
542  *
543  * Locking: must be called with the queue lock held.
544  */
545 static void isp_video_buffer_query(struct isp_video_buffer *buf,
546  struct v4l2_buffer *vbuf)
547 {
548  memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
549 
550  if (buf->vma_use_count)
551  vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
552 
553  switch (buf->state) {
554  case ISP_BUF_STATE_ERROR:
555  vbuf->flags |= V4L2_BUF_FLAG_ERROR;
556  case ISP_BUF_STATE_DONE:
557  vbuf->flags |= V4L2_BUF_FLAG_DONE;
560  vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
561  break;
562  case ISP_BUF_STATE_IDLE:
563  default:
564  break;
565  }
566 }
567 
568 /*
569  * isp_video_buffer_wait - Wait for a buffer to be ready
570  *
571  * In non-blocking mode, return immediately with 0 if the buffer is ready or
572  * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
573  *
574  * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
575  * queue using the same condition.
576  */
577 static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
578 {
579  if (nonblocking) {
580  return (buf->state != ISP_BUF_STATE_QUEUED &&
581  buf->state != ISP_BUF_STATE_ACTIVE)
582  ? 0 : -EAGAIN;
583  }
584 
585  return wait_event_interruptible(buf->wait,
586  buf->state != ISP_BUF_STATE_QUEUED &&
587  buf->state != ISP_BUF_STATE_ACTIVE);
588 }
589 
590 /* -----------------------------------------------------------------------------
591  * Queue management
592  */
593 
594 /*
595  * isp_video_queue_free - Free video buffers memory
596  *
597  * Buffers can only be freed if the queue isn't streaming and if no buffer is
598  * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
599  *
600  * This function must be called with the queue lock held.
601  */
602 static int isp_video_queue_free(struct isp_video_queue *queue)
603 {
604  unsigned int i;
605 
606  if (queue->streaming)
607  return -EBUSY;
608 
609  for (i = 0; i < queue->count; ++i) {
610  if (queue->buffers[i]->vma_use_count != 0)
611  return -EBUSY;
612  }
613 
614  for (i = 0; i < queue->count; ++i) {
615  struct isp_video_buffer *buf = queue->buffers[i];
616 
617  isp_video_buffer_cleanup(buf);
618 
619  vfree(buf->vaddr);
620  buf->vaddr = NULL;
621 
622  kfree(buf);
623  queue->buffers[i] = NULL;
624  }
625 
626  INIT_LIST_HEAD(&queue->queue);
627  queue->count = 0;
628  return 0;
629 }
630 
631 /*
632  * isp_video_queue_alloc - Allocate video buffers memory
633  *
634  * This function must be called with the queue lock held.
635  */
636 static int isp_video_queue_alloc(struct isp_video_queue *queue,
637  unsigned int nbuffers,
638  unsigned int size, enum v4l2_memory memory)
639 {
640  struct isp_video_buffer *buf;
641  unsigned int i;
642  void *mem;
643  int ret;
644 
645  /* Start by freeing the buffers. */
646  ret = isp_video_queue_free(queue);
647  if (ret < 0)
648  return ret;
649 
650  /* Bail out if no buffers should be allocated. */
651  if (nbuffers == 0)
652  return 0;
653 
654  /* Initialize the allocated buffers. */
655  for (i = 0; i < nbuffers; ++i) {
656  buf = kzalloc(queue->bufsize, GFP_KERNEL);
657  if (buf == NULL)
658  break;
659 
660  if (memory == V4L2_MEMORY_MMAP) {
661  /* Allocate video buffers memory for mmap mode. Align
662  * the size to the page size.
663  */
664  mem = vmalloc_32_user(PAGE_ALIGN(size));
665  if (mem == NULL) {
666  kfree(buf);
667  break;
668  }
669 
670  buf->vbuf.m.offset = i * PAGE_ALIGN(size);
671  buf->vaddr = mem;
672  }
673 
674  buf->vbuf.index = i;
675  buf->vbuf.length = size;
676  buf->vbuf.type = queue->type;
677  buf->vbuf.field = V4L2_FIELD_NONE;
678  buf->vbuf.memory = memory;
679 
680  buf->queue = queue;
681  init_waitqueue_head(&buf->wait);
682 
683  queue->buffers[i] = buf;
684  }
685 
686  if (i == 0)
687  return -ENOMEM;
688 
689  queue->count = i;
690  return nbuffers;
691 }
692 
705 {
706  return isp_video_queue_free(queue);
707 }
708 
731  enum v4l2_buf_type type,
732  const struct isp_video_queue_operations *ops,
733  struct device *dev, unsigned int bufsize)
734 {
735  INIT_LIST_HEAD(&queue->queue);
736  mutex_init(&queue->lock);
737  spin_lock_init(&queue->irqlock);
738 
739  queue->type = type;
740  queue->ops = ops;
741  queue->dev = dev;
742  queue->bufsize = bufsize;
743 
744  return 0;
745 }
746 
747 /* -----------------------------------------------------------------------------
748  * V4L2 operations
749  */
750 
773  struct v4l2_requestbuffers *rb)
774 {
775  unsigned int nbuffers = rb->count;
776  unsigned int size;
777  int ret;
778 
779  if (rb->type != queue->type)
780  return -EINVAL;
781 
782  queue->ops->queue_prepare(queue, &nbuffers, &size);
783  if (size == 0)
784  return -EINVAL;
785 
786  nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
787 
788  mutex_lock(&queue->lock);
789 
790  ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
791  if (ret < 0)
792  goto done;
793 
794  rb->count = ret;
795  ret = 0;
796 
797 done:
798  mutex_unlock(&queue->lock);
799  return ret;
800 }
801 
811  struct v4l2_buffer *vbuf)
812 {
813  struct isp_video_buffer *buf;
814  int ret = 0;
815 
816  if (vbuf->type != queue->type)
817  return -EINVAL;
818 
819  mutex_lock(&queue->lock);
820 
821  if (vbuf->index >= queue->count) {
822  ret = -EINVAL;
823  goto done;
824  }
825 
826  buf = queue->buffers[vbuf->index];
827  isp_video_buffer_query(buf, vbuf);
828 
829 done:
830  mutex_unlock(&queue->lock);
831  return ret;
832 }
833 
848  struct v4l2_buffer *vbuf)
849 {
850  struct isp_video_buffer *buf;
851  unsigned long flags;
852  int ret = -EINVAL;
853 
854  if (vbuf->type != queue->type)
855  goto done;
856 
857  mutex_lock(&queue->lock);
858 
859  if (vbuf->index >= queue->count)
860  goto done;
861 
862  buf = queue->buffers[vbuf->index];
863 
864  if (vbuf->memory != buf->vbuf.memory)
865  goto done;
866 
867  if (buf->state != ISP_BUF_STATE_IDLE)
868  goto done;
869 
870  if (vbuf->memory == V4L2_MEMORY_USERPTR &&
871  vbuf->length < buf->vbuf.length)
872  goto done;
873 
874  if (vbuf->memory == V4L2_MEMORY_USERPTR &&
875  vbuf->m.userptr != buf->vbuf.m.userptr) {
876  isp_video_buffer_cleanup(buf);
877  buf->vbuf.m.userptr = vbuf->m.userptr;
878  buf->prepared = 0;
879  }
880 
881  if (!buf->prepared) {
882  ret = isp_video_buffer_prepare(buf);
883  if (ret < 0)
884  goto done;
885  buf->prepared = 1;
886  }
887 
888  isp_video_buffer_cache_sync(buf);
889 
891  list_add_tail(&buf->stream, &queue->queue);
892 
893  if (queue->streaming) {
894  spin_lock_irqsave(&queue->irqlock, flags);
895  queue->ops->buffer_queue(buf);
896  spin_unlock_irqrestore(&queue->irqlock, flags);
897  }
898 
899  ret = 0;
900 
901 done:
902  mutex_unlock(&queue->lock);
903  return ret;
904 }
905 
921  struct v4l2_buffer *vbuf, int nonblocking)
922 {
923  struct isp_video_buffer *buf;
924  int ret;
925 
926  if (vbuf->type != queue->type)
927  return -EINVAL;
928 
929  mutex_lock(&queue->lock);
930 
931  if (list_empty(&queue->queue)) {
932  ret = -EINVAL;
933  goto done;
934  }
935 
936  buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
937  ret = isp_video_buffer_wait(buf, nonblocking);
938  if (ret < 0)
939  goto done;
940 
941  list_del(&buf->stream);
942 
943  isp_video_buffer_query(buf, vbuf);
944  buf->state = ISP_BUF_STATE_IDLE;
945  vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
946 
947 done:
948  mutex_unlock(&queue->lock);
949  return ret;
950 }
951 
962 {
963  struct isp_video_buffer *buf;
964  unsigned long flags;
965 
966  mutex_lock(&queue->lock);
967 
968  if (queue->streaming)
969  goto done;
970 
971  queue->streaming = 1;
972 
973  spin_lock_irqsave(&queue->irqlock, flags);
974  list_for_each_entry(buf, &queue->queue, stream)
975  queue->ops->buffer_queue(buf);
976  spin_unlock_irqrestore(&queue->irqlock, flags);
977 
978 done:
979  mutex_unlock(&queue->lock);
980  return 0;
981 }
982 
994 {
995  struct isp_video_buffer *buf;
996  unsigned long flags;
997  unsigned int i;
998 
999  mutex_lock(&queue->lock);
1000 
1001  if (!queue->streaming)
1002  goto done;
1003 
1004  queue->streaming = 0;
1005 
1006  spin_lock_irqsave(&queue->irqlock, flags);
1007  for (i = 0; i < queue->count; ++i) {
1008  buf = queue->buffers[i];
1009 
1010  if (buf->state == ISP_BUF_STATE_ACTIVE)
1011  wake_up(&buf->wait);
1012 
1013  buf->state = ISP_BUF_STATE_IDLE;
1014  }
1015  spin_unlock_irqrestore(&queue->irqlock, flags);
1016 
1017  INIT_LIST_HEAD(&queue->queue);
1018 
1019 done:
1020  mutex_unlock(&queue->lock);
1021 }
1022 
1035 {
1036  struct isp_video_buffer *buf;
1037  unsigned int i;
1038 
1039  mutex_lock(&queue->lock);
1040 
1041  if (!queue->streaming)
1042  goto done;
1043 
1044  for (i = 0; i < queue->count; ++i) {
1045  buf = queue->buffers[i];
1046 
1047  if (buf->state == ISP_BUF_STATE_DONE)
1048  buf->state = ISP_BUF_STATE_ERROR;
1049  }
1050 
1051 done:
1052  mutex_unlock(&queue->lock);
1053 }
1054 
1055 static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1056 {
1057  struct isp_video_buffer *buf = vma->vm_private_data;
1058 
1059  buf->vma_use_count++;
1060 }
1061 
1062 static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1063 {
1064  struct isp_video_buffer *buf = vma->vm_private_data;
1065 
1066  buf->vma_use_count--;
1067 }
1068 
1069 static const struct vm_operations_struct isp_video_queue_vm_ops = {
1070  .open = isp_video_queue_vm_open,
1071  .close = isp_video_queue_vm_close,
1072 };
1073 
1083  struct vm_area_struct *vma)
1084 {
1085  struct isp_video_buffer *uninitialized_var(buf);
1086  unsigned long size;
1087  unsigned int i;
1088  int ret = 0;
1089 
1090  mutex_lock(&queue->lock);
1091 
1092  for (i = 0; i < queue->count; ++i) {
1093  buf = queue->buffers[i];
1094  if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1095  break;
1096  }
1097 
1098  if (i == queue->count) {
1099  ret = -EINVAL;
1100  goto done;
1101  }
1102 
1103  size = vma->vm_end - vma->vm_start;
1104 
1105  if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1106  size != PAGE_ALIGN(buf->vbuf.length)) {
1107  ret = -EINVAL;
1108  goto done;
1109  }
1110 
1111  ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1112  if (ret < 0)
1113  goto done;
1114 
1115  vma->vm_ops = &isp_video_queue_vm_ops;
1116  vma->vm_private_data = buf;
1117  isp_video_queue_vm_open(vma);
1118 
1119 done:
1120  mutex_unlock(&queue->lock);
1121  return ret;
1122 }
1123 
1133 unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1134  struct file *file, poll_table *wait)
1135 {
1136  struct isp_video_buffer *buf;
1137  unsigned int mask = 0;
1138 
1139  mutex_lock(&queue->lock);
1140  if (list_empty(&queue->queue)) {
1141  mask |= POLLERR;
1142  goto done;
1143  }
1144  buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1145 
1146  poll_wait(file, &buf->wait, wait);
1147  if (buf->state == ISP_BUF_STATE_DONE ||
1148  buf->state == ISP_BUF_STATE_ERROR) {
1149  if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1150  mask |= POLLIN | POLLRDNORM;
1151  else
1152  mask |= POLLOUT | POLLWRNORM;
1153  }
1154 
1155 done:
1156  mutex_unlock(&queue->lock);
1157  return mask;
1158 }