37 int npages = nvbo->
bo.num_pages;
60 if (nvbo->
gem->export_dma_buf == dma_buf) {
61 nvbo->
gem->export_dma_buf =
NULL;
62 drm_gem_object_unreference_unlocked(nvbo->
gem);
66 static void *nouveau_gem_kmap_atomic(
struct dma_buf *dma_buf,
unsigned long page_num)
71 static void nouveau_gem_kunmap_atomic(
struct dma_buf *dma_buf,
unsigned long page_num,
void *
addr)
75 static void *nouveau_gem_kmap(
struct dma_buf *dma_buf,
unsigned long page_num)
80 static void nouveau_gem_kunmap(
struct dma_buf *dma_buf,
unsigned long page_num,
void *
addr)
85 static int nouveau_gem_prime_mmap(
struct dma_buf *dma_buf,
struct vm_area_struct *vma)
90 static void *nouveau_gem_prime_vmap(
struct dma_buf *dma_buf)
114 static void nouveau_gem_prime_vunmap(
struct dma_buf *dma_buf,
void *
vaddr)
127 static const struct dma_buf_ops nouveau_dmabuf_ops = {
128 .map_dma_buf = nouveau_gem_map_dma_buf,
129 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
130 .release = nouveau_gem_dmabuf_release,
131 .kmap = nouveau_gem_kmap,
132 .kmap_atomic = nouveau_gem_kmap_atomic,
133 .kunmap = nouveau_gem_kunmap,
134 .kunmap_atomic = nouveau_gem_kunmap_atomic,
135 .mmap = nouveau_gem_prime_mmap,
136 .vmap = nouveau_gem_prime_vmap,
137 .vunmap = nouveau_gem_prime_vunmap,
165 nouveau_bo_ref(
NULL, pnvbo);
169 nvbo->
gem->driver_private = nvbo;
174 struct drm_gem_object *obj,
int flags)
176 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
184 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
188 struct dma_buf *dma_buf)
195 if (dma_buf->
ops == &nouveau_dmabuf_ops) {
196 nvbo = dma_buf->
priv;
198 if (nvbo->
gem->dev == dev) {
199 drm_gem_object_reference(nvbo->
gem);
207 return ERR_PTR(PTR_ERR(attach));
215 ret = nouveau_prime_new(dev, dma_buf->
size, sg, &nvbo);