Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
videobuf2-core.c
Go to the documentation of this file.
1 /*
2  * videobuf2-core.c - V4L2 driver helper framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <[email protected]>
7  * Marek Szyprowski <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.
12  */
13 
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25 #include <media/videobuf2-core.h>
26 
27 static int debug;
28 module_param(debug, int, 0644);
29 
30 #define dprintk(level, fmt, arg...) \
31  do { \
32  if (debug >= level) \
33  printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34  } while (0)
35 
36 #define call_memop(q, op, args...) \
37  (((q)->mem_ops->op) ? \
38  ((q)->mem_ops->op(args)) : 0)
39 
40 #define call_qop(q, op, args...) \
41  (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
42 
43 #define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
44  V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
45  V4L2_BUF_FLAG_PREPARED)
46 
50 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
51 {
52  struct vb2_queue *q = vb->vb2_queue;
53  void *mem_priv;
54  int plane;
55 
56  /* Allocate memory for all planes in this buffer */
57  for (plane = 0; plane < vb->num_planes; ++plane) {
58  mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
59  q->plane_sizes[plane]);
60  if (IS_ERR_OR_NULL(mem_priv))
61  goto free;
62 
63  /* Associate allocator private data with this plane */
64  vb->planes[plane].mem_priv = mem_priv;
65  vb->v4l2_planes[plane].length = q->plane_sizes[plane];
66  }
67 
68  return 0;
69 free:
70  /* Free already allocated memory if one of the allocations failed */
71  for (; plane > 0; --plane) {
72  call_memop(q, put, vb->planes[plane - 1].mem_priv);
73  vb->planes[plane - 1].mem_priv = NULL;
74  }
75 
76  return -ENOMEM;
77 }
78 
82 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
83 {
84  struct vb2_queue *q = vb->vb2_queue;
85  unsigned int plane;
86 
87  for (plane = 0; plane < vb->num_planes; ++plane) {
88  call_memop(q, put, vb->planes[plane].mem_priv);
89  vb->planes[plane].mem_priv = NULL;
90  dprintk(3, "Freed plane %d of buffer %d\n", plane,
91  vb->v4l2_buf.index);
92  }
93 }
94 
99 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
100 {
101  struct vb2_queue *q = vb->vb2_queue;
102  unsigned int plane;
103 
104  for (plane = 0; plane < vb->num_planes; ++plane) {
105  if (vb->planes[plane].mem_priv)
106  call_memop(q, put_userptr, vb->planes[plane].mem_priv);
107  vb->planes[plane].mem_priv = NULL;
108  }
109 }
110 
115 static void __setup_offsets(struct vb2_queue *q, unsigned int n)
116 {
117  unsigned int buffer, plane;
118  struct vb2_buffer *vb;
119  unsigned long off;
120 
121  if (q->num_buffers) {
122  struct v4l2_plane *p;
123  vb = q->bufs[q->num_buffers - 1];
124  p = &vb->v4l2_planes[vb->num_planes - 1];
125  off = PAGE_ALIGN(p->m.mem_offset + p->length);
126  } else {
127  off = 0;
128  }
129 
130  for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
131  vb = q->bufs[buffer];
132  if (!vb)
133  continue;
134 
135  for (plane = 0; plane < vb->num_planes; ++plane) {
136  vb->v4l2_planes[plane].length = q->plane_sizes[plane];
137  vb->v4l2_planes[plane].m.mem_offset = off;
138 
139  dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
140  buffer, plane, off);
141 
142  off += vb->v4l2_planes[plane].length;
143  off = PAGE_ALIGN(off);
144  }
145  }
146 }
147 
155 static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
156  unsigned int num_buffers, unsigned int num_planes)
157 {
158  unsigned int buffer;
159  struct vb2_buffer *vb;
160  int ret;
161 
162  for (buffer = 0; buffer < num_buffers; ++buffer) {
163  /* Allocate videobuf buffer structures */
164  vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
165  if (!vb) {
166  dprintk(1, "Memory alloc for buffer struct failed\n");
167  break;
168  }
169 
170  /* Length stores number of planes for multiplanar buffers */
172  vb->v4l2_buf.length = num_planes;
173 
175  vb->vb2_queue = q;
176  vb->num_planes = num_planes;
177  vb->v4l2_buf.index = q->num_buffers + buffer;
178  vb->v4l2_buf.type = q->type;
179  vb->v4l2_buf.memory = memory;
180 
181  /* Allocate video buffer memory for the MMAP type */
182  if (memory == V4L2_MEMORY_MMAP) {
183  ret = __vb2_buf_mem_alloc(vb);
184  if (ret) {
185  dprintk(1, "Failed allocating memory for "
186  "buffer %d\n", buffer);
187  kfree(vb);
188  break;
189  }
190  /*
191  * Call the driver-provided buffer initialization
192  * callback, if given. An error in initialization
193  * results in queue setup failure.
194  */
195  ret = call_qop(q, buf_init, vb);
196  if (ret) {
197  dprintk(1, "Buffer %d %p initialization"
198  " failed\n", buffer, vb);
199  __vb2_buf_mem_free(vb);
200  kfree(vb);
201  break;
202  }
203  }
204 
205  q->bufs[q->num_buffers + buffer] = vb;
206  }
207 
208  __setup_offsets(q, buffer);
209 
210  dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
211  buffer, num_planes);
212 
213  return buffer;
214 }
215 
219 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
220 {
221  unsigned int buffer;
222  struct vb2_buffer *vb;
223 
224  for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
225  ++buffer) {
226  vb = q->bufs[buffer];
227  if (!vb)
228  continue;
229 
230  /* Free MMAP buffers or release USERPTR buffers */
231  if (q->memory == V4L2_MEMORY_MMAP)
232  __vb2_buf_mem_free(vb);
233  else
234  __vb2_buf_userptr_put(vb);
235  }
236 }
237 
243 static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
244 {
245  unsigned int buffer;
246 
247  /* Call driver-provided cleanup function for each buffer, if provided */
248  if (q->ops->buf_cleanup) {
249  for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
250  ++buffer) {
251  if (NULL == q->bufs[buffer])
252  continue;
253  q->ops->buf_cleanup(q->bufs[buffer]);
254  }
255  }
256 
257  /* Release video buffer memory */
258  __vb2_free_mem(q, buffers);
259 
260  /* Free videobuf buffers */
261  for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
262  ++buffer) {
263  kfree(q->bufs[buffer]);
264  q->bufs[buffer] = NULL;
265  }
266 
267  q->num_buffers -= buffers;
268  if (!q->num_buffers)
269  q->memory = 0;
270  INIT_LIST_HEAD(&q->queued_list);
271 }
272 
277 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
278 {
280  return 0;
281 
282  /* Is memory for copying plane information present? */
283  if (NULL == b->m.planes) {
284  dprintk(1, "Multi-planar buffer passed but "
285  "planes array not provided\n");
286  return -EINVAL;
287  }
288 
289  if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
290  dprintk(1, "Incorrect planes array length, "
291  "expected %d, got %d\n", vb->num_planes, b->length);
292  return -EINVAL;
293  }
294 
295  return 0;
296 }
297 
302 static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
303 {
304  unsigned int plane;
305  for (plane = 0; plane < vb->num_planes; ++plane) {
306  void *mem_priv = vb->planes[plane].mem_priv;
307  /*
308  * If num_users() has not been provided, call_memop
309  * will return 0, apparently nobody cares about this
310  * case anyway. If num_users() returns more than 1,
311  * we are not the only user of the plane's memory.
312  */
313  if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
314  return true;
315  }
316  return false;
317 }
318 
323 static bool __buffers_in_use(struct vb2_queue *q)
324 {
325  unsigned int buffer;
326  for (buffer = 0; buffer < q->num_buffers; ++buffer) {
327  if (__buffer_in_use(q, q->bufs[buffer]))
328  return true;
329  }
330  return false;
331 }
332 
337 static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
338 {
339  struct vb2_queue *q = vb->vb2_queue;
340 
341  /* Copy back data such as timestamp, flags, etc. */
342  memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
343  b->reserved2 = vb->v4l2_buf.reserved2;
344  b->reserved = vb->v4l2_buf.reserved;
345 
346  if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
347  /*
348  * Fill in plane-related data if userspace provided an array
349  * for it. The caller has already verified memory and size.
350  */
351  b->length = vb->num_planes;
352  memcpy(b->m.planes, vb->v4l2_planes,
353  b->length * sizeof(struct v4l2_plane));
354  } else {
355  /*
356  * We use length and offset in v4l2_planes array even for
357  * single-planar buffers, but userspace does not.
358  */
359  b->length = vb->v4l2_planes[0].length;
360  b->bytesused = vb->v4l2_planes[0].bytesused;
361  if (q->memory == V4L2_MEMORY_MMAP)
362  b->m.offset = vb->v4l2_planes[0].m.mem_offset;
363  else if (q->memory == V4L2_MEMORY_USERPTR)
364  b->m.userptr = vb->v4l2_planes[0].m.userptr;
365  }
366 
367  /*
368  * Clear any buffer state related flags.
369  */
371 
372  switch (vb->state) {
376  break;
377  case VB2_BUF_STATE_ERROR:
379  /* fall through */
380  case VB2_BUF_STATE_DONE:
382  break;
385  break;
387  /* nothing */
388  break;
389  }
390 
391  if (__buffer_in_use(q, vb))
393 }
394 
408 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
409 {
410  struct vb2_buffer *vb;
411  int ret;
412 
413  if (b->type != q->type) {
414  dprintk(1, "querybuf: wrong buffer type\n");
415  return -EINVAL;
416  }
417 
418  if (b->index >= q->num_buffers) {
419  dprintk(1, "querybuf: buffer index out of range\n");
420  return -EINVAL;
421  }
422  vb = q->bufs[b->index];
423  ret = __verify_planes_array(vb, b);
424  if (!ret)
425  __fill_v4l2_buffer(vb, b);
426  return ret;
427 }
429 
434 static int __verify_userptr_ops(struct vb2_queue *q)
435 {
436  if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
437  !q->mem_ops->put_userptr)
438  return -EINVAL;
439 
440  return 0;
441 }
442 
447 static int __verify_mmap_ops(struct vb2_queue *q)
448 {
449  if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
450  !q->mem_ops->put || !q->mem_ops->mmap)
451  return -EINVAL;
452 
453  return 0;
454 }
455 
460 static int __verify_memory_type(struct vb2_queue *q,
461  enum v4l2_memory memory, enum v4l2_buf_type type)
462 {
463  if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
464  dprintk(1, "reqbufs: unsupported memory type\n");
465  return -EINVAL;
466  }
467 
468  if (type != q->type) {
469  dprintk(1, "reqbufs: requested type is incorrect\n");
470  return -EINVAL;
471  }
472 
473  /*
474  * Make sure all the required memory ops for given memory type
475  * are available.
476  */
477  if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
478  dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
479  return -EINVAL;
480  }
481 
482  if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
483  dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
484  return -EINVAL;
485  }
486 
487  /*
488  * Place the busy tests at the end: -EBUSY can be ignored when
489  * create_bufs is called with count == 0, but count == 0 should still
490  * do the memory and type validation.
491  */
492  if (q->fileio) {
493  dprintk(1, "reqbufs: file io in progress\n");
494  return -EBUSY;
495  }
496  return 0;
497 }
498 
522 static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
523 {
524  unsigned int num_buffers, allocated_buffers, num_planes = 0;
525  int ret;
526 
527  if (q->streaming) {
528  dprintk(1, "reqbufs: streaming active\n");
529  return -EBUSY;
530  }
531 
532  if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
533  /*
534  * We already have buffers allocated, so first check if they
535  * are not in use and can be freed.
536  */
537  if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
538  dprintk(1, "reqbufs: memory in use, cannot free\n");
539  return -EBUSY;
540  }
541 
542  __vb2_queue_free(q, q->num_buffers);
543 
544  /*
545  * In case of REQBUFS(0) return immediately without calling
546  * driver's queue_setup() callback and allocating resources.
547  */
548  if (req->count == 0)
549  return 0;
550  }
551 
552  /*
553  * Make sure the requested values and current defaults are sane.
554  */
555  num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
556  memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
557  memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
558  q->memory = req->memory;
559 
560  /*
561  * Ask the driver how many buffers and planes per buffer it requires.
562  * Driver also sets the size and allocator context for each plane.
563  */
564  ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
565  q->plane_sizes, q->alloc_ctx);
566  if (ret)
567  return ret;
568 
569  /* Finally, allocate buffers and video memory */
570  ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
571  if (ret == 0) {
572  dprintk(1, "Memory allocation failed\n");
573  return -ENOMEM;
574  }
575 
576  allocated_buffers = ret;
577 
578  /*
579  * Check if driver can handle the allocated number of buffers.
580  */
581  if (allocated_buffers < num_buffers) {
582  num_buffers = allocated_buffers;
583 
584  ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
585  &num_planes, q->plane_sizes, q->alloc_ctx);
586 
587  if (!ret && allocated_buffers < num_buffers)
588  ret = -ENOMEM;
589 
590  /*
591  * Either the driver has accepted a smaller number of buffers,
592  * or .queue_setup() returned an error
593  */
594  }
595 
596  q->num_buffers = allocated_buffers;
597 
598  if (ret < 0) {
599  __vb2_queue_free(q, allocated_buffers);
600  return ret;
601  }
602 
603  /*
604  * Return the number of successfully allocated buffers
605  * to the userspace.
606  */
607  req->count = allocated_buffers;
608 
609  return 0;
610 }
611 
618 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
619 {
620  int ret = __verify_memory_type(q, req->memory, req->type);
621 
622  return ret ? ret : __reqbufs(q, req);
623 }
625 
641 static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
642 {
643  unsigned int num_planes = 0, num_buffers, allocated_buffers;
644  int ret;
645 
646  if (q->num_buffers == VIDEO_MAX_FRAME) {
647  dprintk(1, "%s(): maximum number of buffers already allocated\n",
648  __func__);
649  return -ENOBUFS;
650  }
651 
652  if (!q->num_buffers) {
653  memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
654  memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
655  q->memory = create->memory;
656  }
657 
658  num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
659 
660  /*
661  * Ask the driver, whether the requested number of buffers, planes per
662  * buffer and their sizes are acceptable
663  */
664  ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
665  &num_planes, q->plane_sizes, q->alloc_ctx);
666  if (ret)
667  return ret;
668 
669  /* Finally, allocate buffers and video memory */
670  ret = __vb2_queue_alloc(q, create->memory, num_buffers,
671  num_planes);
672  if (ret == 0) {
673  dprintk(1, "Memory allocation failed\n");
674  return -ENOMEM;
675  }
676 
677  allocated_buffers = ret;
678 
679  /*
680  * Check if driver can handle the so far allocated number of buffers.
681  */
682  if (ret < num_buffers) {
683  num_buffers = ret;
684 
685  /*
686  * q->num_buffers contains the total number of buffers, that the
687  * queue driver has set up
688  */
689  ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
690  &num_planes, q->plane_sizes, q->alloc_ctx);
691 
692  if (!ret && allocated_buffers < num_buffers)
693  ret = -ENOMEM;
694 
695  /*
696  * Either the driver has accepted a smaller number of buffers,
697  * or .queue_setup() returned an error
698  */
699  }
700 
701  q->num_buffers += allocated_buffers;
702 
703  if (ret < 0) {
704  __vb2_queue_free(q, allocated_buffers);
705  return -ENOMEM;
706  }
707 
708  /*
709  * Return the number of successfully allocated buffers
710  * to the userspace.
711  */
712  create->count = allocated_buffers;
713 
714  return 0;
715 }
716 
724 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
725 {
726  int ret = __verify_memory_type(q, create->memory, create->format.type);
727 
728  create->index = q->num_buffers;
729  if (create->count == 0)
730  return ret != -EBUSY ? ret : 0;
731  return ret ? ret : __create_bufs(q, create);
732 }
734 
743 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
744 {
745  struct vb2_queue *q = vb->vb2_queue;
746 
747  if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
748  return NULL;
749 
750  return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
751 
752 }
754 
766 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
767 {
768  struct vb2_queue *q = vb->vb2_queue;
769 
770  if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
771  return NULL;
772 
773  return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
774 }
776 
790 {
791  struct vb2_queue *q = vb->vb2_queue;
792  unsigned long flags;
793 
794  if (vb->state != VB2_BUF_STATE_ACTIVE)
795  return;
796 
797  if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
798  return;
799 
800  dprintk(4, "Done processing on buffer %d, state: %d\n",
801  vb->v4l2_buf.index, vb->state);
802 
803  /* Add the buffer to the done buffers list */
804  spin_lock_irqsave(&q->done_lock, flags);
805  vb->state = state;
808  spin_unlock_irqrestore(&q->done_lock, flags);
809 
810  /* Inform any processes that may be waiting for buffers */
811  wake_up(&q->done_wq);
812 }
814 
820 static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
821  struct v4l2_plane *v4l2_planes)
822 {
823  unsigned int plane;
824 
825  if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
826  /* Fill in driver-provided information for OUTPUT types */
827  if (V4L2_TYPE_IS_OUTPUT(b->type)) {
828  /*
829  * Will have to go up to b->length when API starts
830  * accepting variable number of planes.
831  */
832  for (plane = 0; plane < vb->num_planes; ++plane) {
833  v4l2_planes[plane].bytesused =
834  b->m.planes[plane].bytesused;
835  v4l2_planes[plane].data_offset =
836  b->m.planes[plane].data_offset;
837  }
838  }
839 
840  if (b->memory == V4L2_MEMORY_USERPTR) {
841  for (plane = 0; plane < vb->num_planes; ++plane) {
842  v4l2_planes[plane].m.userptr =
843  b->m.planes[plane].m.userptr;
844  v4l2_planes[plane].length =
845  b->m.planes[plane].length;
846  }
847  }
848  } else {
849  /*
850  * Single-planar buffers do not use planes array,
851  * so fill in relevant v4l2_buffer struct fields instead.
852  * In videobuf we use our internal V4l2_planes struct for
853  * single-planar buffers as well, for simplicity.
854  */
855  if (V4L2_TYPE_IS_OUTPUT(b->type))
856  v4l2_planes[0].bytesused = b->bytesused;
857 
858  if (b->memory == V4L2_MEMORY_USERPTR) {
859  v4l2_planes[0].m.userptr = b->m.userptr;
860  v4l2_planes[0].length = b->length;
861  }
862  }
863 
864  vb->v4l2_buf.field = b->field;
865  vb->v4l2_buf.timestamp = b->timestamp;
866  vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
867 }
868 
872 static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
873 {
874  struct v4l2_plane planes[VIDEO_MAX_PLANES];
875  struct vb2_queue *q = vb->vb2_queue;
876  void *mem_priv;
877  unsigned int plane;
878  int ret;
879  int write = !V4L2_TYPE_IS_OUTPUT(q->type);
880 
881  /* Copy relevant information provided by the userspace */
882  __fill_vb2_buffer(vb, b, planes);
883 
884  for (plane = 0; plane < vb->num_planes; ++plane) {
885  /* Skip the plane if already verified */
886  if (vb->v4l2_planes[plane].m.userptr &&
887  vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
888  && vb->v4l2_planes[plane].length == planes[plane].length)
889  continue;
890 
891  dprintk(3, "qbuf: userspace address for plane %d changed, "
892  "reacquiring memory\n", plane);
893 
894  /* Check if the provided plane buffer is large enough */
895  if (planes[plane].length < q->plane_sizes[plane]) {
896  ret = -EINVAL;
897  goto err;
898  }
899 
900  /* Release previously acquired memory if present */
901  if (vb->planes[plane].mem_priv)
902  call_memop(q, put_userptr, vb->planes[plane].mem_priv);
903 
904  vb->planes[plane].mem_priv = NULL;
905  vb->v4l2_planes[plane].m.userptr = 0;
906  vb->v4l2_planes[plane].length = 0;
907 
908  /* Acquire each plane's memory */
909  mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
910  planes[plane].m.userptr,
911  planes[plane].length, write);
912  if (IS_ERR_OR_NULL(mem_priv)) {
913  dprintk(1, "qbuf: failed acquiring userspace "
914  "memory for plane %d\n", plane);
915  ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
916  goto err;
917  }
918  vb->planes[plane].mem_priv = mem_priv;
919  }
920 
921  /*
922  * Call driver-specific initialization on the newly acquired buffer,
923  * if provided.
924  */
925  ret = call_qop(q, buf_init, vb);
926  if (ret) {
927  dprintk(1, "qbuf: buffer initialization failed\n");
928  goto err;
929  }
930 
931  /*
932  * Now that everything is in order, copy relevant information
933  * provided by userspace.
934  */
935  for (plane = 0; plane < vb->num_planes; ++plane)
936  vb->v4l2_planes[plane] = planes[plane];
937 
938  return 0;
939 err:
940  /* In case of errors, release planes that were already acquired */
941  for (plane = 0; plane < vb->num_planes; ++plane) {
942  if (vb->planes[plane].mem_priv)
943  call_memop(q, put_userptr, vb->planes[plane].mem_priv);
944  vb->planes[plane].mem_priv = NULL;
945  vb->v4l2_planes[plane].m.userptr = 0;
946  vb->v4l2_planes[plane].length = 0;
947  }
948 
949  return ret;
950 }
951 
955 static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
956 {
957  __fill_vb2_buffer(vb, b, vb->v4l2_planes);
958  return 0;
959 }
960 
964 static void __enqueue_in_driver(struct vb2_buffer *vb)
965 {
966  struct vb2_queue *q = vb->vb2_queue;
967 
970  q->ops->buf_queue(vb);
971 }
972 
973 static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
974 {
975  struct vb2_queue *q = vb->vb2_queue;
976  int ret;
977 
978  switch (q->memory) {
979  case V4L2_MEMORY_MMAP:
980  ret = __qbuf_mmap(vb, b);
981  break;
982  case V4L2_MEMORY_USERPTR:
983  ret = __qbuf_userptr(vb, b);
984  break;
985  default:
986  WARN(1, "Invalid queue type\n");
987  ret = -EINVAL;
988  }
989 
990  if (!ret)
991  ret = call_qop(q, buf_prepare, vb);
992  if (ret)
993  dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
994  else
996 
997  return ret;
998 }
999 
1015 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1016 {
1017  struct vb2_buffer *vb;
1018  int ret;
1019 
1020  if (q->fileio) {
1021  dprintk(1, "%s(): file io in progress\n", __func__);
1022  return -EBUSY;
1023  }
1024 
1025  if (b->type != q->type) {
1026  dprintk(1, "%s(): invalid buffer type\n", __func__);
1027  return -EINVAL;
1028  }
1029 
1030  if (b->index >= q->num_buffers) {
1031  dprintk(1, "%s(): buffer index out of range\n", __func__);
1032  return -EINVAL;
1033  }
1034 
1035  vb = q->bufs[b->index];
1036  if (NULL == vb) {
1037  /* Should never happen */
1038  dprintk(1, "%s(): buffer is NULL\n", __func__);
1039  return -EINVAL;
1040  }
1041 
1042  if (b->memory != q->memory) {
1043  dprintk(1, "%s(): invalid memory type\n", __func__);
1044  return -EINVAL;
1045  }
1046 
1047  if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1048  dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
1049  return -EINVAL;
1050  }
1051  ret = __verify_planes_array(vb, b);
1052  if (ret < 0)
1053  return ret;
1054  ret = __buf_prepare(vb, b);
1055  if (ret < 0)
1056  return ret;
1057 
1058  __fill_v4l2_buffer(vb, b);
1059 
1060  return 0;
1061 }
1063 
1081 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1082 {
1083  struct rw_semaphore *mmap_sem = NULL;
1084  struct vb2_buffer *vb;
1085  int ret = 0;
1086 
1087  /*
1088  * In case of user pointer buffers vb2 allocator needs to get direct
1089  * access to userspace pages. This requires getting read access on
1090  * mmap semaphore in the current process structure. The same
1091  * semaphore is taken before calling mmap operation, while both mmap
1092  * and qbuf are called by the driver or v4l2 core with driver's lock
1093  * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
1094  * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
1095  * release driver's lock, takes mmap_sem and then takes again driver's
1096  * lock.
1097  *
1098  * To avoid race with other vb2 calls, which might be called after
1099  * releasing driver's lock, this operation is performed at the
1100  * beggining of qbuf processing. This way the queue status is
1101  * consistent after getting driver's lock back.
1102  */
1103  if (q->memory == V4L2_MEMORY_USERPTR) {
1104  mmap_sem = &current->mm->mmap_sem;
1105  call_qop(q, wait_prepare, q);
1106  down_read(mmap_sem);
1107  call_qop(q, wait_finish, q);
1108  }
1109 
1110  if (q->fileio) {
1111  dprintk(1, "qbuf: file io in progress\n");
1112  ret = -EBUSY;
1113  goto unlock;
1114  }
1115 
1116  if (b->type != q->type) {
1117  dprintk(1, "qbuf: invalid buffer type\n");
1118  ret = -EINVAL;
1119  goto unlock;
1120  }
1121 
1122  if (b->index >= q->num_buffers) {
1123  dprintk(1, "qbuf: buffer index out of range\n");
1124  ret = -EINVAL;
1125  goto unlock;
1126  }
1127 
1128  vb = q->bufs[b->index];
1129  if (NULL == vb) {
1130  /* Should never happen */
1131  dprintk(1, "qbuf: buffer is NULL\n");
1132  ret = -EINVAL;
1133  goto unlock;
1134  }
1135 
1136  if (b->memory != q->memory) {
1137  dprintk(1, "qbuf: invalid memory type\n");
1138  ret = -EINVAL;
1139  goto unlock;
1140  }
1141  ret = __verify_planes_array(vb, b);
1142  if (ret)
1143  goto unlock;
1144 
1145  switch (vb->state) {
1147  ret = __buf_prepare(vb, b);
1148  if (ret)
1149  goto unlock;
1151  break;
1152  default:
1153  dprintk(1, "qbuf: buffer already in use\n");
1154  ret = -EINVAL;
1155  goto unlock;
1156  }
1157 
1158  /*
1159  * Add to the queued buffers list, a buffer will stay on it until
1160  * dequeued in dqbuf.
1161  */
1164 
1165  /*
1166  * If already streaming, give the buffer to driver for processing.
1167  * If not, the buffer will be given to driver on next streamon.
1168  */
1169  if (q->streaming)
1170  __enqueue_in_driver(vb);
1171 
1172  /* Fill buffer information for the userspace */
1173  __fill_v4l2_buffer(vb, b);
1174 
1175  dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1176 unlock:
1177  if (mmap_sem)
1178  up_read(mmap_sem);
1179  return ret;
1180 }
1182 
1189 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1190 {
1191  /*
1192  * All operations on vb_done_list are performed under done_lock
1193  * spinlock protection. However, buffers may be removed from
1194  * it and returned to userspace only while holding both driver's
1195  * lock and the done_lock spinlock. Thus we can be sure that as
1196  * long as we hold the driver's lock, the list will remain not
1197  * empty if list_empty() check succeeds.
1198  */
1199 
1200  for (;;) {
1201  int ret;
1202 
1203  if (!q->streaming) {
1204  dprintk(1, "Streaming off, will not wait for buffers\n");
1205  return -EINVAL;
1206  }
1207 
1208  if (!list_empty(&q->done_list)) {
1209  /*
1210  * Found a buffer that we were waiting for.
1211  */
1212  break;
1213  }
1214 
1215  if (nonblocking) {
1216  dprintk(1, "Nonblocking and no buffers to dequeue, "
1217  "will not wait\n");
1218  return -EAGAIN;
1219  }
1220 
1221  /*
1222  * We are streaming and blocking, wait for another buffer to
1223  * become ready or for streamoff. Driver's lock is released to
1224  * allow streamoff or qbuf to be called while waiting.
1225  */
1226  call_qop(q, wait_prepare, q);
1227 
1228  /*
1229  * All locks have been released, it is safe to sleep now.
1230  */
1231  dprintk(3, "Will sleep waiting for buffers\n");
1233  !list_empty(&q->done_list) || !q->streaming);
1234 
1235  /*
1236  * We need to reevaluate both conditions again after reacquiring
1237  * the locks or return an error if one occurred.
1238  */
1239  call_qop(q, wait_finish, q);
1240  if (ret) {
1241  dprintk(1, "Sleep was interrupted\n");
1242  return ret;
1243  }
1244  }
1245  return 0;
1246 }
1247 
1253 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1254  struct v4l2_buffer *b, int nonblocking)
1255 {
1256  unsigned long flags;
1257  int ret;
1258 
1259  /*
1260  * Wait for at least one buffer to become available on the done_list.
1261  */
1262  ret = __vb2_wait_for_done_vb(q, nonblocking);
1263  if (ret)
1264  return ret;
1265 
1266  /*
1267  * Driver's lock has been held since we last verified that done_list
1268  * is not empty, so no need for another list_empty(done_list) check.
1269  */
1270  spin_lock_irqsave(&q->done_lock, flags);
1271  *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1272  /*
1273  * Only remove the buffer from done_list if v4l2_buffer can handle all
1274  * the planes.
1275  */
1276  ret = __verify_planes_array(*vb, b);
1277  if (!ret)
1278  list_del(&(*vb)->done_entry);
1279  spin_unlock_irqrestore(&q->done_lock, flags);
1280 
1281  return ret;
1282 }
1283 
1294 {
1295  if (!q->streaming) {
1296  dprintk(1, "Streaming off, will not wait for buffers\n");
1297  return -EINVAL;
1298  }
1299 
1301  return 0;
1302 }
1304 
1326 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1327 {
1328  struct vb2_buffer *vb = NULL;
1329  int ret;
1330 
1331  if (q->fileio) {
1332  dprintk(1, "dqbuf: file io in progress\n");
1333  return -EBUSY;
1334  }
1335 
1336  if (b->type != q->type) {
1337  dprintk(1, "dqbuf: invalid buffer type\n");
1338  return -EINVAL;
1339  }
1340  ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1341  if (ret < 0)
1342  return ret;
1343 
1344  ret = call_qop(q, buf_finish, vb);
1345  if (ret) {
1346  dprintk(1, "dqbuf: buffer finish failed\n");
1347  return ret;
1348  }
1349 
1350  switch (vb->state) {
1351  case VB2_BUF_STATE_DONE:
1352  dprintk(3, "dqbuf: Returning done buffer\n");
1353  break;
1354  case VB2_BUF_STATE_ERROR:
1355  dprintk(3, "dqbuf: Returning done buffer with errors\n");
1356  break;
1357  default:
1358  dprintk(1, "dqbuf: Invalid buffer state\n");
1359  return -EINVAL;
1360  }
1361 
1362  /* Fill buffer information for the userspace */
1363  __fill_v4l2_buffer(vb, b);
1364  /* Remove from videobuf queue */
1365  list_del(&vb->queued_entry);
1366 
1367  dprintk(1, "dqbuf of buffer %d, with state %d\n",
1368  vb->v4l2_buf.index, vb->state);
1369 
1371  return 0;
1372 }
1374 
1381 static void __vb2_queue_cancel(struct vb2_queue *q)
1382 {
1383  unsigned int i;
1384 
1385  /*
1386  * Tell driver to stop all transactions and release all queued
1387  * buffers.
1388  */
1389  if (q->streaming)
1390  call_qop(q, stop_streaming, q);
1391  q->streaming = 0;
1392 
1393  /*
1394  * Remove all buffers from videobuf's list...
1395  */
1396  INIT_LIST_HEAD(&q->queued_list);
1397  /*
1398  * ...and done list; userspace will not receive any buffers it
1399  * has not already dequeued before initiating cancel.
1400  */
1401  INIT_LIST_HEAD(&q->done_list);
1402  atomic_set(&q->queued_count, 0);
1403  wake_up_all(&q->done_wq);
1404 
1405  /*
1406  * Reinitialize all buffers for next use.
1407  */
1408  for (i = 0; i < q->num_buffers; ++i)
1409  q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
1410 }
1411 
1425 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
1426 {
1427  struct vb2_buffer *vb;
1428  int ret;
1429 
1430  if (q->fileio) {
1431  dprintk(1, "streamon: file io in progress\n");
1432  return -EBUSY;
1433  }
1434 
1435  if (type != q->type) {
1436  dprintk(1, "streamon: invalid stream type\n");
1437  return -EINVAL;
1438  }
1439 
1440  if (q->streaming) {
1441  dprintk(1, "streamon: already streaming\n");
1442  return -EBUSY;
1443  }
1444 
1445  /*
1446  * If any buffers were queued before streamon,
1447  * we can now pass them to driver for processing.
1448  */
1450  __enqueue_in_driver(vb);
1451 
1452  /*
1453  * Let driver notice that streaming state has been enabled.
1454  */
1455  ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1456  if (ret) {
1457  dprintk(1, "streamon: driver refused to start streaming\n");
1458  __vb2_queue_cancel(q);
1459  return ret;
1460  }
1461 
1462  q->streaming = 1;
1463 
1464  dprintk(3, "Streamon successful\n");
1465  return 0;
1466 }
1468 
1469 
1485 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1486 {
1487  if (q->fileio) {
1488  dprintk(1, "streamoff: file io in progress\n");
1489  return -EBUSY;
1490  }
1491 
1492  if (type != q->type) {
1493  dprintk(1, "streamoff: invalid stream type\n");
1494  return -EINVAL;
1495  }
1496 
1497  if (!q->streaming) {
1498  dprintk(1, "streamoff: not streaming\n");
1499  return -EINVAL;
1500  }
1501 
1502  /*
1503  * Cancel will pause streaming and remove all buffers from the driver
1504  * and videobuf, effectively returning control over them to userspace.
1505  */
1506  __vb2_queue_cancel(q);
1507 
1508  dprintk(3, "Streamoff successful\n");
1509  return 0;
1510 }
1512 
1516 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1517  unsigned int *_buffer, unsigned int *_plane)
1518 {
1519  struct vb2_buffer *vb;
1520  unsigned int buffer, plane;
1521 
1522  /*
1523  * Go over all buffers and their planes, comparing the given offset
1524  * with an offset assigned to each plane. If a match is found,
1525  * return its buffer and plane numbers.
1526  */
1527  for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1528  vb = q->bufs[buffer];
1529 
1530  for (plane = 0; plane < vb->num_planes; ++plane) {
1531  if (vb->v4l2_planes[plane].m.mem_offset == off) {
1532  *_buffer = buffer;
1533  *_plane = plane;
1534  return 0;
1535  }
1536  }
1537  }
1538 
1539  return -EINVAL;
1540 }
1541 
1561 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1562 {
1563  unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1564  struct vb2_buffer *vb;
1565  unsigned int buffer, plane;
1566  int ret;
1567 
1568  if (q->memory != V4L2_MEMORY_MMAP) {
1569  dprintk(1, "Queue is not currently set up for mmap\n");
1570  return -EINVAL;
1571  }
1572 
1573  /*
1574  * Check memory area access mode.
1575  */
1576  if (!(vma->vm_flags & VM_SHARED)) {
1577  dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
1578  return -EINVAL;
1579  }
1580  if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1581  if (!(vma->vm_flags & VM_WRITE)) {
1582  dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
1583  return -EINVAL;
1584  }
1585  } else {
1586  if (!(vma->vm_flags & VM_READ)) {
1587  dprintk(1, "Invalid vma flags, VM_READ needed\n");
1588  return -EINVAL;
1589  }
1590  }
1591 
1592  /*
1593  * Find the plane corresponding to the offset passed by userspace.
1594  */
1595  ret = __find_plane_by_offset(q, off, &buffer, &plane);
1596  if (ret)
1597  return ret;
1598 
1599  vb = q->bufs[buffer];
1600 
1601  ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
1602  if (ret)
1603  return ret;
1604 
1605  dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
1606  return 0;
1607 }
1609 
1610 #ifndef CONFIG_MMU
1611 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
1612  unsigned long addr,
1613  unsigned long len,
1614  unsigned long pgoff,
1615  unsigned long flags)
1616 {
1617  unsigned long off = pgoff << PAGE_SHIFT;
1618  struct vb2_buffer *vb;
1619  unsigned int buffer, plane;
1620  int ret;
1621 
1622  if (q->memory != V4L2_MEMORY_MMAP) {
1623  dprintk(1, "Queue is not currently set up for mmap\n");
1624  return -EINVAL;
1625  }
1626 
1627  /*
1628  * Find the plane corresponding to the offset passed by userspace.
1629  */
1630  ret = __find_plane_by_offset(q, off, &buffer, &plane);
1631  if (ret)
1632  return ret;
1633 
1634  vb = q->bufs[buffer];
1635 
1636  return (unsigned long)vb2_plane_vaddr(vb, plane);
1637 }
1639 #endif
1640 
1641 static int __vb2_init_fileio(struct vb2_queue *q, int read);
1642 static int __vb2_cleanup_fileio(struct vb2_queue *q);
1643 
1663 unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
1664 {
1665  struct video_device *vfd = video_devdata(file);
1666  unsigned long req_events = poll_requested_events(wait);
1667  struct vb2_buffer *vb = NULL;
1668  unsigned int res = 0;
1669  unsigned long flags;
1670 
1671  if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
1672  struct v4l2_fh *fh = file->private_data;
1673 
1674  if (v4l2_event_pending(fh))
1675  res = POLLPRI;
1676  else if (req_events & POLLPRI)
1677  poll_wait(file, &fh->wait, wait);
1678  }
1679 
1680  /*
1681  * Start file I/O emulator only if streaming API has not been used yet.
1682  */
1683  if (q->num_buffers == 0 && q->fileio == NULL) {
1684  if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
1685  (req_events & (POLLIN | POLLRDNORM))) {
1686  if (__vb2_init_fileio(q, 1))
1687  return res | POLLERR;
1688  }
1689  if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
1690  (req_events & (POLLOUT | POLLWRNORM))) {
1691  if (__vb2_init_fileio(q, 0))
1692  return res | POLLERR;
1693  /*
1694  * Write to OUTPUT queue can be done immediately.
1695  */
1696  return res | POLLOUT | POLLWRNORM;
1697  }
1698  }
1699 
1700  /*
1701  * There is nothing to wait for if no buffers have already been queued.
1702  */
1703  if (list_empty(&q->queued_list))
1704  return res | POLLERR;
1705 
1706  poll_wait(file, &q->done_wq, wait);
1707 
1708  /*
1709  * Take first buffer available for dequeuing.
1710  */
1711  spin_lock_irqsave(&q->done_lock, flags);
1712  if (!list_empty(&q->done_list))
1713  vb = list_first_entry(&q->done_list, struct vb2_buffer,
1714  done_entry);
1715  spin_unlock_irqrestore(&q->done_lock, flags);
1716 
1717  if (vb && (vb->state == VB2_BUF_STATE_DONE
1718  || vb->state == VB2_BUF_STATE_ERROR)) {
1719  return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
1720  res | POLLOUT | POLLWRNORM :
1721  res | POLLIN | POLLRDNORM;
1722  }
1723  return res;
1724 }
1726 
1739 {
1740  /*
1741  * Sanity check
1742  */
1743  if (WARN_ON(!q) ||
1744  WARN_ON(!q->ops) ||
1745  WARN_ON(!q->mem_ops) ||
1746  WARN_ON(!q->type) ||
1747  WARN_ON(!q->io_modes) ||
1748  WARN_ON(!q->ops->queue_setup) ||
1749  WARN_ON(!q->ops->buf_queue))
1750  return -EINVAL;
1751 
1752  INIT_LIST_HEAD(&q->queued_list);
1753  INIT_LIST_HEAD(&q->done_list);
1756 
1757  if (q->buf_struct_size == 0)
1758  q->buf_struct_size = sizeof(struct vb2_buffer);
1759 
1760  return 0;
1761 }
1763 
1773 {
1774  __vb2_cleanup_fileio(q);
1775  __vb2_queue_cancel(q);
1776  __vb2_queue_free(q, q->num_buffers);
1777 }
1779 
1788  void *vaddr;
1789  unsigned int size;
1790  unsigned int pos;
1791  unsigned int queued:1;
1792 };
1793 
1804  struct v4l2_buffer b;
1806  unsigned int index;
1807  unsigned int q_count;
1808  unsigned int dq_count;
1809  unsigned int flags;
1810 };
1811 
1817 static int __vb2_init_fileio(struct vb2_queue *q, int read)
1818 {
1819  struct vb2_fileio_data *fileio;
1820  int i, ret;
1821  unsigned int count = 0;
1822 
1823  /*
1824  * Sanity check
1825  */
1826  if ((read && !(q->io_modes & VB2_READ)) ||
1827  (!read && !(q->io_modes & VB2_WRITE)))
1828  BUG();
1829 
1830  /*
1831  * Check if device supports mapping buffers to kernel virtual space.
1832  */
1833  if (!q->mem_ops->vaddr)
1834  return -EBUSY;
1835 
1836  /*
1837  * Check if streaming api has not been already activated.
1838  */
1839  if (q->streaming || q->num_buffers > 0)
1840  return -EBUSY;
1841 
1842  /*
1843  * Start with count 1, driver can increase it in queue_setup()
1844  */
1845  count = 1;
1846 
1847  dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
1848  (read) ? "read" : "write", count, q->io_flags);
1849 
1850  fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
1851  if (fileio == NULL)
1852  return -ENOMEM;
1853 
1854  fileio->flags = q->io_flags;
1855 
1856  /*
1857  * Request buffers and use MMAP type to force driver
1858  * to allocate buffers by itself.
1859  */
1860  fileio->req.count = count;
1861  fileio->req.memory = V4L2_MEMORY_MMAP;
1862  fileio->req.type = q->type;
1863  ret = vb2_reqbufs(q, &fileio->req);
1864  if (ret)
1865  goto err_kfree;
1866 
1867  /*
1868  * Check if plane_count is correct
1869  * (multiplane buffers are not supported).
1870  */
1871  if (q->bufs[0]->num_planes != 1) {
1872  ret = -EBUSY;
1873  goto err_reqbufs;
1874  }
1875 
1876  /*
1877  * Get kernel address of each buffer.
1878  */
1879  for (i = 0; i < q->num_buffers; i++) {
1880  fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
1881  if (fileio->bufs[i].vaddr == NULL)
1882  goto err_reqbufs;
1883  fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
1884  }
1885 
1886  /*
1887  * Read mode requires pre queuing of all buffers.
1888  */
1889  if (read) {
1890  /*
1891  * Queue all buffers.
1892  */
1893  for (i = 0; i < q->num_buffers; i++) {
1894  struct v4l2_buffer *b = &fileio->b;
1895  memset(b, 0, sizeof(*b));
1896  b->type = q->type;
1897  b->memory = q->memory;
1898  b->index = i;
1899  ret = vb2_qbuf(q, b);
1900  if (ret)
1901  goto err_reqbufs;
1902  fileio->bufs[i].queued = 1;
1903  }
1904 
1905  /*
1906  * Start streaming.
1907  */
1908  ret = vb2_streamon(q, q->type);
1909  if (ret)
1910  goto err_reqbufs;
1911  }
1912 
1913  q->fileio = fileio;
1914 
1915  return ret;
1916 
1917 err_reqbufs:
1918  fileio->req.count = 0;
1919  vb2_reqbufs(q, &fileio->req);
1920 
1921 err_kfree:
1922  kfree(fileio);
1923  return ret;
1924 }
1925 
1930 static int __vb2_cleanup_fileio(struct vb2_queue *q)
1931 {
1932  struct vb2_fileio_data *fileio = q->fileio;
1933 
1934  if (fileio) {
1935  /*
1936  * Hack fileio context to enable direct calls to vb2 ioctl
1937  * interface.
1938  */
1939  q->fileio = NULL;
1940 
1941  vb2_streamoff(q, q->type);
1942  fileio->req.count = 0;
1943  vb2_reqbufs(q, &fileio->req);
1944  kfree(fileio);
1945  dprintk(3, "file io emulator closed\n");
1946  }
1947  return 0;
1948 }
1949 
1959 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
1960  loff_t *ppos, int nonblock, int read)
1961 {
1962  struct vb2_fileio_data *fileio;
1963  struct vb2_fileio_buf *buf;
1964  int ret, index;
1965 
1966  dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
1967  read ? "read" : "write", (long)*ppos, count,
1968  nonblock ? "non" : "");
1969 
1970  if (!data)
1971  return -EINVAL;
1972 
1973  /*
1974  * Initialize emulator on first call.
1975  */
1976  if (!q->fileio) {
1977  ret = __vb2_init_fileio(q, read);
1978  dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
1979  if (ret)
1980  return ret;
1981  }
1982  fileio = q->fileio;
1983 
1984  /*
1985  * Hack fileio context to enable direct calls to vb2 ioctl interface.
1986  * The pointer will be restored before returning from this function.
1987  */
1988  q->fileio = NULL;
1989 
1990  index = fileio->index;
1991  buf = &fileio->bufs[index];
1992 
1993  /*
1994  * Check if we need to dequeue the buffer.
1995  */
1996  if (buf->queued) {
1997  struct vb2_buffer *vb;
1998 
1999  /*
2000  * Call vb2_dqbuf to get buffer back.
2001  */
2002  memset(&fileio->b, 0, sizeof(fileio->b));
2003  fileio->b.type = q->type;
2004  fileio->b.memory = q->memory;
2005  fileio->b.index = index;
2006  ret = vb2_dqbuf(q, &fileio->b, nonblock);
2007  dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2008  if (ret)
2009  goto end;
2010  fileio->dq_count += 1;
2011 
2012  /*
2013  * Get number of bytes filled by the driver
2014  */
2015  vb = q->bufs[index];
2016  buf->size = vb2_get_plane_payload(vb, 0);
2017  buf->queued = 0;
2018  }
2019 
2020  /*
2021  * Limit count on last few bytes of the buffer.
2022  */
2023  if (buf->pos + count > buf->size) {
2024  count = buf->size - buf->pos;
2025  dprintk(5, "reducing read count: %zd\n", count);
2026  }
2027 
2028  /*
2029  * Transfer data to userspace.
2030  */
2031  dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
2032  count, index, buf->pos);
2033  if (read)
2034  ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2035  else
2036  ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2037  if (ret) {
2038  dprintk(3, "file io: error copying data\n");
2039  ret = -EFAULT;
2040  goto end;
2041  }
2042 
2043  /*
2044  * Update counters.
2045  */
2046  buf->pos += count;
2047  *ppos += count;
2048 
2049  /*
2050  * Queue next buffer if required.
2051  */
2052  if (buf->pos == buf->size ||
2053  (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2054  /*
2055  * Check if this is the last buffer to read.
2056  */
2057  if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2058  fileio->dq_count == 1) {
2059  dprintk(3, "file io: read limit reached\n");
2060  /*
2061  * Restore fileio pointer and release the context.
2062  */
2063  q->fileio = fileio;
2064  return __vb2_cleanup_fileio(q);
2065  }
2066 
2067  /*
2068  * Call vb2_qbuf and give buffer to the driver.
2069  */
2070  memset(&fileio->b, 0, sizeof(fileio->b));
2071  fileio->b.type = q->type;
2072  fileio->b.memory = q->memory;
2073  fileio->b.index = index;
2074  fileio->b.bytesused = buf->pos;
2075  ret = vb2_qbuf(q, &fileio->b);
2076  dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2077  if (ret)
2078  goto end;
2079 
2080  /*
2081  * Buffer has been queued, update the status
2082  */
2083  buf->pos = 0;
2084  buf->queued = 1;
2085  buf->size = q->bufs[0]->v4l2_planes[0].length;
2086  fileio->q_count += 1;
2087 
2088  /*
2089  * Switch to the next buffer
2090  */
2091  fileio->index = (index + 1) % q->num_buffers;
2092 
2093  /*
2094  * Start streaming if required.
2095  */
2096  if (!read && !q->streaming) {
2097  ret = vb2_streamon(q, q->type);
2098  if (ret)
2099  goto end;
2100  }
2101  }
2102 
2103  /*
2104  * Return proper number of bytes processed.
2105  */
2106  if (ret == 0)
2107  ret = count;
2108 end:
2109  /*
2110  * Restore the fileio context and block vb2 ioctl interface.
2111  */
2112  q->fileio = fileio;
2113  return ret;
2114 }
2115 
2116 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2117  loff_t *ppos, int nonblocking)
2118 {
2119  return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2120 }
2122 
2123 size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
2124  loff_t *ppos, int nonblocking)
2125 {
2126  return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
2127 }
2129 
2130 
2131 /*
2132  * The following functions are not part of the vb2 core API, but are helper
2133  * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2134  * and struct vb2_ops.
2135  * They contain boilerplate code that most if not all drivers have to do
2136  * and so they simplify the driver code.
2137  */
2138 
2139 /* The queue is busy if there is a owner and you are not that owner. */
2140 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2141 {
2142  return vdev->queue->owner && vdev->queue->owner != file->private_data;
2143 }
2144 
2145 /* vb2 ioctl helpers */
2146 
2147 int vb2_ioctl_reqbufs(struct file *file, void *priv,
2148  struct v4l2_requestbuffers *p)
2149 {
2150  struct video_device *vdev = video_devdata(file);
2151  int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2152 
2153  if (res)
2154  return res;
2155  if (vb2_queue_is_busy(vdev, file))
2156  return -EBUSY;
2157  res = __reqbufs(vdev->queue, p);
2158  /* If count == 0, then the owner has released all buffers and he
2159  is no longer owner of the queue. Otherwise we have a new owner. */
2160  if (res == 0)
2161  vdev->queue->owner = p->count ? file->private_data : NULL;
2162  return res;
2163 }
2165 
2167  struct v4l2_create_buffers *p)
2168 {
2169  struct video_device *vdev = video_devdata(file);
2170  int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2171 
2172  p->index = vdev->queue->num_buffers;
2173  /* If count == 0, then just check if memory and type are valid.
2174  Any -EBUSY result from __verify_memory_type can be mapped to 0. */
2175  if (p->count == 0)
2176  return res != -EBUSY ? res : 0;
2177  if (res)
2178  return res;
2179  if (vb2_queue_is_busy(vdev, file))
2180  return -EBUSY;
2181  res = __create_bufs(vdev->queue, p);
2182  if (res == 0)
2183  vdev->queue->owner = file->private_data;
2184  return res;
2185 }
2187 
2189  struct v4l2_buffer *p)
2190 {
2191  struct video_device *vdev = video_devdata(file);
2192 
2193  if (vb2_queue_is_busy(vdev, file))
2194  return -EBUSY;
2195  return vb2_prepare_buf(vdev->queue, p);
2196 }
2198 
2199 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
2200 {
2201  struct video_device *vdev = video_devdata(file);
2202 
2203  /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
2204  return vb2_querybuf(vdev->queue, p);
2205 }
2207 
2208 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2209 {
2210  struct video_device *vdev = video_devdata(file);
2211 
2212  if (vb2_queue_is_busy(vdev, file))
2213  return -EBUSY;
2214  return vb2_qbuf(vdev->queue, p);
2215 }
2217 
2218 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2219 {
2220  struct video_device *vdev = video_devdata(file);
2221 
2222  if (vb2_queue_is_busy(vdev, file))
2223  return -EBUSY;
2224  return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
2225 }
2227 
2228 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
2229 {
2230  struct video_device *vdev = video_devdata(file);
2231 
2232  if (vb2_queue_is_busy(vdev, file))
2233  return -EBUSY;
2234  return vb2_streamon(vdev->queue, i);
2235 }
2237 
2238 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2239 {
2240  struct video_device *vdev = video_devdata(file);
2241 
2242  if (vb2_queue_is_busy(vdev, file))
2243  return -EBUSY;
2244  return vb2_streamoff(vdev->queue, i);
2245 }
2247 
2248 /* v4l2_file_operations helpers */
2249 
2250 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
2251 {
2252  struct video_device *vdev = video_devdata(file);
2253 
2254  return vb2_mmap(vdev->queue, vma);
2255 }
2257 
2259 {
2260  struct video_device *vdev = video_devdata(file);
2261 
2262  if (file->private_data == vdev->queue->owner) {
2263  vb2_queue_release(vdev->queue);
2264  vdev->queue->owner = NULL;
2265  }
2266  return v4l2_fh_release(file);
2267 }
2269 
2270 ssize_t vb2_fop_write(struct file *file, char __user *buf,
2271  size_t count, loff_t *ppos)
2272 {
2273  struct video_device *vdev = video_devdata(file);
2274  struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2275  int err = -EBUSY;
2276 
2277  if (lock && mutex_lock_interruptible(lock))
2278  return -ERESTARTSYS;
2279  if (vb2_queue_is_busy(vdev, file))
2280  goto exit;
2281  err = vb2_write(vdev->queue, buf, count, ppos,
2282  file->f_flags & O_NONBLOCK);
2283  if (vdev->queue->fileio)
2284  vdev->queue->owner = file->private_data;
2285 exit:
2286  if (lock)
2287  mutex_unlock(lock);
2288  return err;
2289 }
2291 
2292 ssize_t vb2_fop_read(struct file *file, char __user *buf,
2293  size_t count, loff_t *ppos)
2294 {
2295  struct video_device *vdev = video_devdata(file);
2296  struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2297  int err = -EBUSY;
2298 
2299  if (lock && mutex_lock_interruptible(lock))
2300  return -ERESTARTSYS;
2301  if (vb2_queue_is_busy(vdev, file))
2302  goto exit;
2303  err = vb2_read(vdev->queue, buf, count, ppos,
2304  file->f_flags & O_NONBLOCK);
2305  if (vdev->queue->fileio)
2306  vdev->queue->owner = file->private_data;
2307 exit:
2308  if (lock)
2309  mutex_unlock(lock);
2310  return err;
2311 }
2313 
2314 unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
2315 {
2316  struct video_device *vdev = video_devdata(file);
2317  struct vb2_queue *q = vdev->queue;
2318  struct mutex *lock = q->lock ? q->lock : vdev->lock;
2319  unsigned long req_events = poll_requested_events(wait);
2320  unsigned res;
2321  void *fileio;
2322  bool must_lock = false;
2323 
2324  /* Try to be smart: only lock if polling might start fileio,
2325  otherwise locking will only introduce unwanted delays. */
2326  if (q->num_buffers == 0 && q->fileio == NULL) {
2327  if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2328  (req_events & (POLLIN | POLLRDNORM)))
2329  must_lock = true;
2330  else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2331  (req_events & (POLLOUT | POLLWRNORM)))
2332  must_lock = true;
2333  }
2334 
2335  /* If locking is needed, but this helper doesn't know how, then you
2336  shouldn't be using this helper but you should write your own. */
2337  WARN_ON(must_lock && !lock);
2338 
2339  if (must_lock && lock && mutex_lock_interruptible(lock))
2340  return POLLERR;
2341 
2342  fileio = q->fileio;
2343 
2344  res = vb2_poll(vdev->queue, file, wait);
2345 
2346  /* If fileio was started, then we have a new queue owner. */
2347  if (must_lock && !fileio && q->fileio)
2348  q->owner = file->private_data;
2349  if (must_lock && lock)
2350  mutex_unlock(lock);
2351  return res;
2352 }
2354 
2355 #ifndef CONFIG_MMU
2356 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
2357  unsigned long len, unsigned long pgoff, unsigned long flags)
2358 {
2359  struct video_device *vdev = video_devdata(file);
2360 
2361  return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
2362 }
2364 #endif
2365 
2366 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
2367 
2369 {
2370  mutex_unlock(vq->lock);
2371 }
2373 
2375 {
2376  mutex_lock(vq->lock);
2377 }
2379 
2380 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
2381 MODULE_AUTHOR("Pawel Osciak <[email protected]>, Marek Szyprowski");
2382 MODULE_LICENSE("GPL");