Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
v4l2-mem2mem.c
Go to the documentation of this file.
1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <[email protected]>
9  * Marek Szyprowski, <[email protected]>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25 
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <[email protected]>");
28 MODULE_LICENSE("GPL");
29 
30 static bool debug;
31 module_param(debug, bool, 0644);
32 
33 #define dprintk(fmt, arg...) \
34  do { \
35  if (debug) \
36  printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37  } while (0)
38 
39 
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 
45 
46 /* Offset base for buffers on the destination queue - used to distinguish
47  * between source and destination buffers when mmapping - they receive the same
48  * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE (1 << 30)
50 
51 
59 struct v4l2_m2m_dev {
61 
64 
66 };
67 
68 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69  enum v4l2_buf_type type)
70 {
71  if (V4L2_TYPE_IS_OUTPUT(type))
72  return &m2m_ctx->out_q_ctx;
73  else
74  return &m2m_ctx->cap_q_ctx;
75 }
76 
80 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
81  enum v4l2_buf_type type)
82 {
83  struct v4l2_m2m_queue_ctx *q_ctx;
84 
85  q_ctx = get_queue_ctx(m2m_ctx, type);
86  if (!q_ctx)
87  return NULL;
88 
89  return &q_ctx->q;
90 }
92 
97 {
98  struct v4l2_m2m_buffer *b = NULL;
99  unsigned long flags;
100 
101  spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
102 
103  if (list_empty(&q_ctx->rdy_queue)) {
104  spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
105  return NULL;
106  }
107 
108  b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
109  spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
110  return &b->vb;
111 }
113 
119 {
120  struct v4l2_m2m_buffer *b = NULL;
121  unsigned long flags;
122 
123  spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
124  if (list_empty(&q_ctx->rdy_queue)) {
125  spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
126  return NULL;
127  }
128  b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
129  list_del(&b->list);
130  q_ctx->num_rdy--;
131  spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
132 
133  return &b->vb;
134 }
136 
137 /*
138  * Scheduling handlers
139  */
140 
145 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146 {
147  unsigned long flags;
148  void *ret = NULL;
149 
150  spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151  if (m2m_dev->curr_ctx)
152  ret = m2m_dev->curr_ctx->priv;
153  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
154 
155  return ret;
156 }
158 
164 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
165 {
166  unsigned long flags;
167 
168  spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169  if (NULL != m2m_dev->curr_ctx) {
170  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171  dprintk("Another instance is running, won't run now\n");
172  return;
173  }
174 
175  if (list_empty(&m2m_dev->job_queue)) {
176  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177  dprintk("No job pending\n");
178  return;
179  }
180 
181  m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
182  struct v4l2_m2m_ctx, queue);
183  m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
185 
186  m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
187 }
188 
205 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
206 {
207  struct v4l2_m2m_dev *m2m_dev;
208  unsigned long flags_job, flags;
209 
210  m2m_dev = m2m_ctx->m2m_dev;
211  dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
212 
213  if (!m2m_ctx->out_q_ctx.q.streaming
214  || !m2m_ctx->cap_q_ctx.q.streaming) {
215  dprintk("Streaming needs to be on for both queues\n");
216  return;
217  }
218 
219  spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
220  if (m2m_ctx->job_flags & TRANS_QUEUED) {
221  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
222  dprintk("On job queue already\n");
223  return;
224  }
225 
226  spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
227  if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
228  spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
229  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230  dprintk("No input buffers available\n");
231  return;
232  }
233  if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
234  spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
235  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236  dprintk("No output buffers available\n");
237  return;
238  }
239  spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
240 
241  if (m2m_dev->m2m_ops->job_ready
242  && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
243  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
244  dprintk("Driver not ready\n");
245  return;
246  }
247 
248  list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
249  m2m_ctx->job_flags |= TRANS_QUEUED;
250 
251  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
252 
253  v4l2_m2m_try_run(m2m_dev);
254 }
255 
268 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
269  struct v4l2_m2m_ctx *m2m_ctx)
270 {
271  unsigned long flags;
272 
273  spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
274  if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
275  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
276  dprintk("Called by an instance not currently running\n");
277  return;
278  }
279 
280  list_del(&m2m_dev->curr_ctx->queue);
281  m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
282  wake_up(&m2m_dev->curr_ctx->finished);
283  m2m_dev->curr_ctx = NULL;
284 
285  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
286 
287  /* This instance might have more buffers ready, but since we do not
288  * allow more than one job on the job_queue per instance, each has
289  * to be scheduled separately after the previous one finishes. */
290  v4l2_m2m_try_schedule(m2m_ctx);
291  v4l2_m2m_try_run(m2m_dev);
292 }
294 
298 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
299  struct v4l2_requestbuffers *reqbufs)
300 {
301  struct vb2_queue *vq;
302 
303  vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
304  return vb2_reqbufs(vq, reqbufs);
305 }
307 
313 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
314  struct v4l2_buffer *buf)
315 {
316  struct vb2_queue *vq;
317  int ret = 0;
318  unsigned int i;
319 
320  vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
321  ret = vb2_querybuf(vq, buf);
322 
323  /* Adjust MMAP memory offsets for the CAPTURE queue */
324  if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
325  if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
326  for (i = 0; i < buf->length; ++i)
327  buf->m.planes[i].m.mem_offset
329  } else {
330  buf->m.offset += DST_QUEUE_OFF_BASE;
331  }
332  }
333 
334  return ret;
335 }
337 
342 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
343  struct v4l2_buffer *buf)
344 {
345  struct vb2_queue *vq;
346  int ret;
347 
348  vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
349  ret = vb2_qbuf(vq, buf);
350  if (!ret)
351  v4l2_m2m_try_schedule(m2m_ctx);
352 
353  return ret;
354 }
356 
361 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
362  struct v4l2_buffer *buf)
363 {
364  struct vb2_queue *vq;
365 
366  vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
367  return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
368 }
370 
374 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
375  enum v4l2_buf_type type)
376 {
377  struct vb2_queue *vq;
378  int ret;
379 
380  vq = v4l2_m2m_get_vq(m2m_ctx, type);
381  ret = vb2_streamon(vq, type);
382  if (!ret)
383  v4l2_m2m_try_schedule(m2m_ctx);
384 
385  return ret;
386 }
388 
392 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
393  enum v4l2_buf_type type)
394 {
395  struct vb2_queue *vq;
396 
397  vq = v4l2_m2m_get_vq(m2m_ctx, type);
398  return vb2_streamoff(vq, type);
399 }
401 
410 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
411  struct poll_table_struct *wait)
412 {
413  struct video_device *vfd = video_devdata(file);
414  unsigned long req_events = poll_requested_events(wait);
415  struct vb2_queue *src_q, *dst_q;
416  struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
417  unsigned int rc = 0;
418  unsigned long flags;
419 
420  if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
421  struct v4l2_fh *fh = file->private_data;
422 
423  if (v4l2_event_pending(fh))
424  rc = POLLPRI;
425  else if (req_events & POLLPRI)
426  poll_wait(file, &fh->wait, wait);
427  if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
428  return rc;
429  }
430 
431  src_q = v4l2_m2m_get_src_vq(m2m_ctx);
432  dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
433 
434  /*
435  * There has to be at least one buffer queued on each queued_list, which
436  * means either in driver already or waiting for driver to claim it
437  * and start processing.
438  */
439  if ((!src_q->streaming || list_empty(&src_q->queued_list))
440  && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
441  rc |= POLLERR;
442  goto end;
443  }
444 
445  if (m2m_ctx->m2m_dev->m2m_ops->unlock)
446  m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
447 
448  poll_wait(file, &src_q->done_wq, wait);
449  poll_wait(file, &dst_q->done_wq, wait);
450 
451  if (m2m_ctx->m2m_dev->m2m_ops->lock)
452  m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
453 
454  spin_lock_irqsave(&src_q->done_lock, flags);
455  if (!list_empty(&src_q->done_list))
456  src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
457  done_entry);
458  if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
459  || src_vb->state == VB2_BUF_STATE_ERROR))
460  rc |= POLLOUT | POLLWRNORM;
461  spin_unlock_irqrestore(&src_q->done_lock, flags);
462 
463  spin_lock_irqsave(&dst_q->done_lock, flags);
464  if (!list_empty(&dst_q->done_list))
465  dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
466  done_entry);
467  if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
468  || dst_vb->state == VB2_BUF_STATE_ERROR))
469  rc |= POLLIN | POLLRDNORM;
470  spin_unlock_irqrestore(&dst_q->done_lock, flags);
471 
472 end:
473  return rc;
474 }
476 
487 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
488  struct vm_area_struct *vma)
489 {
490  unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
491  struct vb2_queue *vq;
492 
493  if (offset < DST_QUEUE_OFF_BASE) {
494  vq = v4l2_m2m_get_src_vq(m2m_ctx);
495  } else {
496  vq = v4l2_m2m_get_dst_vq(m2m_ctx);
498  }
499 
500  return vb2_mmap(vq, vma);
501 }
503 
510 {
511  struct v4l2_m2m_dev *m2m_dev;
512 
513  if (!m2m_ops)
514  return ERR_PTR(-EINVAL);
515 
516  BUG_ON(!m2m_ops->device_run);
517  BUG_ON(!m2m_ops->job_abort);
518 
519  m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
520  if (!m2m_dev)
521  return ERR_PTR(-ENOMEM);
522 
523  m2m_dev->curr_ctx = NULL;
524  m2m_dev->m2m_ops = m2m_ops;
525  INIT_LIST_HEAD(&m2m_dev->job_queue);
526  spin_lock_init(&m2m_dev->job_spinlock);
527 
528  return m2m_dev;
529 }
531 
537 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
538 {
539  kfree(m2m_dev);
540 }
542 
553  void *drv_priv,
554  int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
555 {
556  struct v4l2_m2m_ctx *m2m_ctx;
557  struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
558  int ret;
559 
560  m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
561  if (!m2m_ctx)
562  return ERR_PTR(-ENOMEM);
563 
564  m2m_ctx->priv = drv_priv;
565  m2m_ctx->m2m_dev = m2m_dev;
566  init_waitqueue_head(&m2m_ctx->finished);
567 
568  out_q_ctx = &m2m_ctx->out_q_ctx;
569  cap_q_ctx = &m2m_ctx->cap_q_ctx;
570 
571  INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
572  INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
573  spin_lock_init(&out_q_ctx->rdy_spinlock);
574  spin_lock_init(&cap_q_ctx->rdy_spinlock);
575 
576  INIT_LIST_HEAD(&m2m_ctx->queue);
577 
578  ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
579 
580  if (ret)
581  goto err;
582 
583  return m2m_ctx;
584 err:
585  kfree(m2m_ctx);
586  return ERR_PTR(ret);
587 }
589 
595 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
596 {
597  struct v4l2_m2m_dev *m2m_dev;
598  unsigned long flags;
599 
600  m2m_dev = m2m_ctx->m2m_dev;
601 
602  spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
603  if (m2m_ctx->job_flags & TRANS_RUNNING) {
604  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
605  m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
606  dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
607  wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
608  } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
609  list_del(&m2m_ctx->queue);
610  m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
611  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
612  dprintk("m2m_ctx: %p had been on queue and was removed\n",
613  m2m_ctx);
614  } else {
615  /* Do nothing, was not on queue/running */
616  spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
617  }
618 
619  vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
620  vb2_queue_release(&m2m_ctx->out_q_ctx.q);
621 
622  kfree(m2m_ctx);
623 }
625 
631 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
632 {
633  struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
634  struct v4l2_m2m_queue_ctx *q_ctx;
635  unsigned long flags;
636 
637  q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
638  if (!q_ctx)
639  return;
640 
641  spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
642  list_add_tail(&b->list, &q_ctx->rdy_queue);
643  q_ctx->num_rdy++;
644  spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
645 }
647