32 udl_gem_create(
struct drm_file *
file,
54 drm_gem_object_unreference(&obj->
base);
65 return udl_gem_create(file, dev,
96 page_offset = ((
unsigned long)vmf->virtual_address - vma->
vm_start) >>
100 return VM_FAULT_SIGBUS;
103 ret =
vm_insert_page(vma, (
unsigned long)vmf->virtual_address, page);
109 return VM_FAULT_NOPAGE;
113 return VM_FAULT_SIGBUS;
136 obj->
pages = drm_malloc_ab(page_count,
sizeof(
struct page *));
140 inode = obj->
base.filp->f_path.dentry->d_inode;
142 gfpmask |= mapping_gfp_mask(mapping);
144 for (i = 0; i < page_count; i++) {
155 drm_free_large(obj->
pages);
157 return PTR_ERR(page);
165 if (obj->
base.import_attach) {
166 drm_free_large(obj->
pages);
171 for (i = 0; i < page_count; i++)
174 drm_free_large(obj->
pages);
183 if (obj->
base.import_attach) {
202 if (obj->
base.import_attach) {
210 udl_gem_put_pages(obj);
220 if (gem_obj->import_attach)
224 udl_gem_put_pages(obj);
226 if (gem_obj->map_list.map)
236 struct drm_gem_object *obj;
250 if (!gobj->
base.map_list.map) {
259 drm_gem_object_unreference(&gobj->
base);
265 static int udl_prime_create(
struct drm_device *dev,
281 obj->
pages = drm_malloc_ab(npages,
sizeof(
struct page *));
283 DRM_ERROR(
"obj pages is NULL %d\n", npages);
304 return ERR_CAST(attach);
312 ret = udl_prime_create(dev, dma_buf->
size, sg, &uobj);