24 static struct sg_table *omap_gem_map_dma_buf(
28 struct drm_gem_object *obj = attachment->
dmabuf->priv;
65 struct drm_gem_object *obj = attachment->
dmabuf->priv;
73 struct drm_gem_object *obj = buffer->
priv;
77 drm_gem_object_unreference_unlocked(obj);
81 static int omap_gem_dmabuf_begin_cpu_access(
struct dma_buf *
buffer,
84 struct drm_gem_object *obj = buffer->
priv;
96 static void omap_gem_dmabuf_end_cpu_access(
struct dma_buf *buffer,
99 struct drm_gem_object *obj = buffer->
priv;
104 static void *omap_gem_dmabuf_kmap_atomic(
struct dma_buf *buffer,
105 unsigned long page_num)
107 struct drm_gem_object *obj = buffer->
priv;
114 static void omap_gem_dmabuf_kunmap_atomic(
struct dma_buf *buffer,
115 unsigned long page_num,
void *
addr)
120 static void *omap_gem_dmabuf_kmap(
struct dma_buf *buffer,
121 unsigned long page_num)
123 struct drm_gem_object *obj = buffer->
priv;
127 return kmap(pages[page_num]);
130 static void omap_gem_dmabuf_kunmap(
struct dma_buf *buffer,
131 unsigned long page_num,
void *
addr)
133 struct drm_gem_object *obj = buffer->
priv;
143 static int omap_gem_dmabuf_mmap(
struct dma_buf *buffer,
146 struct drm_gem_object *obj = buffer->
priv;
158 if (!obj->dev->driver->gem_vm_ops) {
163 vma->
vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 vma->
vm_ops = obj->dev->driver->gem_vm_ops;
182 .map_dma_buf = omap_gem_map_dma_buf,
183 .unmap_dma_buf = omap_gem_unmap_dma_buf,
184 .release = omap_gem_dmabuf_release,
185 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
186 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
187 .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
188 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
189 .kmap = omap_gem_dmabuf_kmap,
190 .kunmap = omap_gem_dmabuf_kunmap,
191 .mmap = omap_gem_dmabuf_mmap,
195 struct drm_gem_object *obj,
int flags)
203 struct drm_gem_object *obj;
206 if (buffer->
ops == &omap_dmabuf_ops) {
209 if (obj->dev == dev) {
210 drm_gem_object_reference(obj);