Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
udl_gem.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2012 Red Hat
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License v2. See the file COPYING in the main directory of this archive for
6  * more details.
7  */
8 
9 #include <drm/drmP.h>
10 #include "udl_drv.h"
11 #include <linux/shmem_fs.h>
12 #include <linux/dma-buf.h>
13 
15  size_t size)
16 {
17  struct udl_gem_object *obj;
18 
19  obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20  if (obj == NULL)
21  return NULL;
22 
23  if (drm_gem_object_init(dev, &obj->base, size) != 0) {
24  kfree(obj);
25  return NULL;
26  }
27 
28  return obj;
29 }
30 
31 static int
32 udl_gem_create(struct drm_file *file,
33  struct drm_device *dev,
34  uint64_t size,
35  uint32_t *handle_p)
36 {
37  struct udl_gem_object *obj;
38  int ret;
39  u32 handle;
40 
41  size = roundup(size, PAGE_SIZE);
42 
43  obj = udl_gem_alloc_object(dev, size);
44  if (obj == NULL)
45  return -ENOMEM;
46 
47  ret = drm_gem_handle_create(file, &obj->base, &handle);
48  if (ret) {
50  kfree(obj);
51  return ret;
52  }
53 
54  drm_gem_object_unreference(&obj->base);
55  *handle_p = handle;
56  return 0;
57 }
58 
59 int udl_dumb_create(struct drm_file *file,
60  struct drm_device *dev,
61  struct drm_mode_create_dumb *args)
62 {
63  args->pitch = args->width * ((args->bpp + 1) / 8);
64  args->size = args->pitch * args->height;
65  return udl_gem_create(file, dev,
66  args->size, &args->handle);
67 }
68 
69 int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
70  uint32_t handle)
71 {
72  return drm_gem_handle_delete(file, handle);
73 }
74 
75 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76 {
77  int ret;
78 
79  ret = drm_gem_mmap(filp, vma);
80  if (ret)
81  return ret;
82 
83  vma->vm_flags &= ~VM_PFNMAP;
84  vma->vm_flags |= VM_MIXEDMAP;
85 
86  return ret;
87 }
88 
89 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91  struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
92  struct page *page;
93  unsigned int page_offset;
94  int ret = 0;
95 
96  page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
97  PAGE_SHIFT;
98 
99  if (!obj->pages)
100  return VM_FAULT_SIGBUS;
101 
102  page = obj->pages[page_offset];
103  ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
104  switch (ret) {
105  case -EAGAIN:
106  set_need_resched();
107  case 0:
108  case -ERESTARTSYS:
109  return VM_FAULT_NOPAGE;
110  case -ENOMEM:
111  return VM_FAULT_OOM;
112  default:
113  return VM_FAULT_SIGBUS;
114  }
115 }
116 
117 int udl_gem_init_object(struct drm_gem_object *obj)
118 {
119  BUG();
120 
121  return 0;
122 }
123 
124 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125 {
126  int page_count, i;
127  struct page *page;
128  struct inode *inode;
129  struct address_space *mapping;
130 
131  if (obj->pages)
132  return 0;
133 
134  page_count = obj->base.size / PAGE_SIZE;
135  BUG_ON(obj->pages != NULL);
136  obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
137  if (obj->pages == NULL)
138  return -ENOMEM;
139 
140  inode = obj->base.filp->f_path.dentry->d_inode;
141  mapping = inode->i_mapping;
142  gfpmask |= mapping_gfp_mask(mapping);
143 
144  for (i = 0; i < page_count; i++) {
145  page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146  if (IS_ERR(page))
147  goto err_pages;
148  obj->pages[i] = page;
149  }
150 
151  return 0;
152 err_pages:
153  while (i--)
154  page_cache_release(obj->pages[i]);
155  drm_free_large(obj->pages);
156  obj->pages = NULL;
157  return PTR_ERR(page);
158 }
159 
160 static void udl_gem_put_pages(struct udl_gem_object *obj)
161 {
162  int page_count = obj->base.size / PAGE_SIZE;
163  int i;
164 
165  if (obj->base.import_attach) {
166  drm_free_large(obj->pages);
167  obj->pages = NULL;
168  return;
169  }
170 
171  for (i = 0; i < page_count; i++)
172  page_cache_release(obj->pages[i]);
173 
174  drm_free_large(obj->pages);
175  obj->pages = NULL;
176 }
177 
178 int udl_gem_vmap(struct udl_gem_object *obj)
179 {
180  int page_count = obj->base.size / PAGE_SIZE;
181  int ret;
182 
183  if (obj->base.import_attach) {
184  obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
185  if (!obj->vmapping)
186  return -ENOMEM;
187  return 0;
188  }
189 
190  ret = udl_gem_get_pages(obj, GFP_KERNEL);
191  if (ret)
192  return ret;
193 
194  obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
195  if (!obj->vmapping)
196  return -ENOMEM;
197  return 0;
198 }
199 
201 {
202  if (obj->base.import_attach) {
203  dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
204  return;
205  }
206 
207  if (obj->vmapping)
208  vunmap(obj->vmapping);
209 
210  udl_gem_put_pages(obj);
211 }
212 
213 void udl_gem_free_object(struct drm_gem_object *gem_obj)
214 {
215  struct udl_gem_object *obj = to_udl_bo(gem_obj);
216 
217  if (obj->vmapping)
218  udl_gem_vunmap(obj);
219 
220  if (gem_obj->import_attach)
221  drm_prime_gem_destroy(gem_obj, obj->sg);
222 
223  if (obj->pages)
224  udl_gem_put_pages(obj);
225 
226  if (gem_obj->map_list.map)
227  drm_gem_free_mmap_offset(gem_obj);
228 }
229 
230 /* the dumb interface doesn't work with the GEM straight MMAP
231  interface, it expects to do MMAP on the drm fd, like normal */
232 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
233  uint32_t handle, uint64_t *offset)
234 {
235  struct udl_gem_object *gobj;
236  struct drm_gem_object *obj;
237  int ret = 0;
238 
239  mutex_lock(&dev->struct_mutex);
240  obj = drm_gem_object_lookup(dev, file, handle);
241  if (obj == NULL) {
242  ret = -ENOENT;
243  goto unlock;
244  }
245  gobj = to_udl_bo(obj);
246 
247  ret = udl_gem_get_pages(gobj, GFP_KERNEL);
248  if (ret)
249  goto out;
250  if (!gobj->base.map_list.map) {
251  ret = drm_gem_create_mmap_offset(obj);
252  if (ret)
253  goto out;
254  }
255 
256  *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
257 
258 out:
259  drm_gem_object_unreference(&gobj->base);
260 unlock:
261  mutex_unlock(&dev->struct_mutex);
262  return ret;
263 }
264 
265 static int udl_prime_create(struct drm_device *dev,
266  size_t size,
267  struct sg_table *sg,
268  struct udl_gem_object **obj_p)
269 {
270  struct udl_gem_object *obj;
271  int npages;
272 
273  npages = size / PAGE_SIZE;
274 
275  *obj_p = NULL;
276  obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
277  if (!obj)
278  return -ENOMEM;
279 
280  obj->sg = sg;
281  obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
282  if (obj->pages == NULL) {
283  DRM_ERROR("obj pages is NULL %d\n", npages);
284  return -ENOMEM;
285  }
286 
287  drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
288 
289  *obj_p = obj;
290  return 0;
291 }
292 
293 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
294  struct dma_buf *dma_buf)
295 {
296  struct dma_buf_attachment *attach;
297  struct sg_table *sg;
298  struct udl_gem_object *uobj;
299  int ret;
300 
301  /* need to attach */
302  attach = dma_buf_attach(dma_buf, dev->dev);
303  if (IS_ERR(attach))
304  return ERR_CAST(attach);
305 
307  if (IS_ERR(sg)) {
308  ret = PTR_ERR(sg);
309  goto fail_detach;
310  }
311 
312  ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
313  if (ret) {
314  goto fail_unmap;
315  }
316 
317  uobj->base.import_attach = attach;
318 
319  return &uobj->base;
320 
321 fail_unmap:
323 fail_detach:
324  dma_buf_detach(dma_buf, attach);
325  return ERR_PTR(ret);
326 }