Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-buf.c
Go to the documentation of this file.
1 /*
2  * Framework for buffer objects that can be shared across devices/subsystems.
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Author: Sumit Semwal <[email protected]>
6  *
7  * Many thanks to linaro-mm-sig list, and specially
8  * Arnd Bergmann <[email protected]>, Rob Clark <[email protected]> and
9  * Daniel Vetter <[email protected]> for their support in creation and
10  * refining of this idea.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published by
14  * the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program. If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/export.h>
30 
31 static inline int is_dma_buf_file(struct file *);
32 
33 static int dma_buf_release(struct inode *inode, struct file *file)
34 {
35  struct dma_buf *dmabuf;
36 
37  if (!is_dma_buf_file(file))
38  return -EINVAL;
39 
40  dmabuf = file->private_data;
41 
42  dmabuf->ops->release(dmabuf);
43  kfree(dmabuf);
44  return 0;
45 }
46 
47 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
48 {
49  struct dma_buf *dmabuf;
50 
51  if (!is_dma_buf_file(file))
52  return -EINVAL;
53 
54  dmabuf = file->private_data;
55 
56  /* check for overflowing the buffer's size */
57  if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
58  dmabuf->size >> PAGE_SHIFT)
59  return -EINVAL;
60 
61  return dmabuf->ops->mmap(dmabuf, vma);
62 }
63 
64 static const struct file_operations dma_buf_fops = {
65  .release = dma_buf_release,
66  .mmap = dma_buf_mmap_internal,
67 };
68 
69 /*
70  * is_dma_buf_file - Check if struct file* is associated with dma_buf
71  */
72 static inline int is_dma_buf_file(struct file *file)
73 {
74  return file->f_op == &dma_buf_fops;
75 }
76 
92 struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
93  size_t size, int flags)
94 {
95  struct dma_buf *dmabuf;
96  struct file *file;
97 
98  if (WARN_ON(!priv || !ops
99  || !ops->map_dma_buf
100  || !ops->unmap_dma_buf
101  || !ops->release
102  || !ops->kmap_atomic
103  || !ops->kmap
104  || !ops->mmap)) {
105  return ERR_PTR(-EINVAL);
106  }
107 
108  dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
109  if (dmabuf == NULL)
110  return ERR_PTR(-ENOMEM);
111 
112  dmabuf->priv = priv;
113  dmabuf->ops = ops;
114  dmabuf->size = size;
115 
116  file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
117 
118  dmabuf->file = file;
119 
120  mutex_init(&dmabuf->lock);
121  INIT_LIST_HEAD(&dmabuf->attachments);
122 
123  return dmabuf;
124 }
126 
127 
135 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
136 {
137  int error, fd;
138 
139  if (!dmabuf || !dmabuf->file)
140  return -EINVAL;
141 
142  error = get_unused_fd_flags(flags);
143  if (error < 0)
144  return error;
145  fd = error;
146 
147  fd_install(fd, dmabuf->file);
148 
149  return fd;
150 }
152 
161 struct dma_buf *dma_buf_get(int fd)
162 {
163  struct file *file;
164 
165  file = fget(fd);
166 
167  if (!file)
168  return ERR_PTR(-EBADF);
169 
170  if (!is_dma_buf_file(file)) {
171  fput(file);
172  return ERR_PTR(-EINVAL);
173  }
174 
175  return file->private_data;
176 }
178 
185 void dma_buf_put(struct dma_buf *dmabuf)
186 {
187  if (WARN_ON(!dmabuf || !dmabuf->file))
188  return;
189 
190  fput(dmabuf->file);
191 }
193 
205  struct device *dev)
206 {
207  struct dma_buf_attachment *attach;
208  int ret;
209 
210  if (WARN_ON(!dmabuf || !dev))
211  return ERR_PTR(-EINVAL);
212 
213  attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
214  if (attach == NULL)
215  return ERR_PTR(-ENOMEM);
216 
217  attach->dev = dev;
218  attach->dmabuf = dmabuf;
219 
220  mutex_lock(&dmabuf->lock);
221 
222  if (dmabuf->ops->attach) {
223  ret = dmabuf->ops->attach(dmabuf, dev, attach);
224  if (ret)
225  goto err_attach;
226  }
227  list_add(&attach->node, &dmabuf->attachments);
228 
229  mutex_unlock(&dmabuf->lock);
230  return attach;
231 
232 err_attach:
233  kfree(attach);
234  mutex_unlock(&dmabuf->lock);
235  return ERR_PTR(ret);
236 }
238 
246 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
247 {
248  if (WARN_ON(!dmabuf || !attach))
249  return;
250 
251  mutex_lock(&dmabuf->lock);
252  list_del(&attach->node);
253  if (dmabuf->ops->detach)
254  dmabuf->ops->detach(dmabuf, attach);
255 
256  mutex_unlock(&dmabuf->lock);
257  kfree(attach);
258 }
260 
274 {
275  struct sg_table *sg_table = ERR_PTR(-EINVAL);
276 
277  might_sleep();
278 
279  if (WARN_ON(!attach || !attach->dmabuf))
280  return ERR_PTR(-EINVAL);
281 
282  sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
283 
284  return sg_table;
285 }
287 
298  struct sg_table *sg_table,
300 {
301  if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
302  return;
303 
304  attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
305  direction);
306 }
308 
309 
322 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
324 {
325  int ret = 0;
326 
327  if (WARN_ON(!dmabuf))
328  return -EINVAL;
329 
330  if (dmabuf->ops->begin_cpu_access)
331  ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
332 
333  return ret;
334 }
336 
349 void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
351 {
352  WARN_ON(!dmabuf);
353 
354  if (dmabuf->ops->end_cpu_access)
355  dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
356 }
358 
368 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
369 {
370  WARN_ON(!dmabuf);
371 
372  return dmabuf->ops->kmap_atomic(dmabuf, page_num);
373 }
375 
384 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
385  void *vaddr)
386 {
387  WARN_ON(!dmabuf);
388 
389  if (dmabuf->ops->kunmap_atomic)
390  dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
391 }
393 
403 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
404 {
405  WARN_ON(!dmabuf);
406 
407  return dmabuf->ops->kmap(dmabuf, page_num);
408 }
410 
419 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
420  void *vaddr)
421 {
422  WARN_ON(!dmabuf);
423 
424  if (dmabuf->ops->kunmap)
425  dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
426 }
428 
429 
444 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
445  unsigned long pgoff)
446 {
447  if (WARN_ON(!dmabuf || !vma))
448  return -EINVAL;
449 
450  /* check for offset overflow */
451  if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
452  return -EOVERFLOW;
453 
454  /* check for overflowing the buffer's size */
455  if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
456  dmabuf->size >> PAGE_SHIFT)
457  return -EINVAL;
458 
459  /* readjust the vma */
460  if (vma->vm_file)
461  fput(vma->vm_file);
462 
463  vma->vm_file = get_file(dmabuf->file);
464 
465  vma->vm_pgoff = pgoff;
466 
467  return dmabuf->ops->mmap(dmabuf, vma);
468 }
470 
481 void *dma_buf_vmap(struct dma_buf *dmabuf)
482 {
483  if (WARN_ON(!dmabuf))
484  return NULL;
485 
486  if (dmabuf->ops->vmap)
487  return dmabuf->ops->vmap(dmabuf);
488  return NULL;
489 }
491 
497 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
498 {
499  if (WARN_ON(!dmabuf))
500  return;
501 
502  if (dmabuf->ops->vunmap)
503  dmabuf->ops->vunmap(dmabuf, vaddr);
504 }