27 #include <drm/exynos_drm.h>
52 sg_set_page(sgl, pages[i], page_size, 0);
74 DRM_DEBUG_PRIME(
"%s\n", __FILE__);
82 DRM_ERROR(
"pages is null.\n");
90 DRM_DEBUG_PRIME(
"exynos_pages_to_sg returned NULL!\n");
95 DRM_DEBUG_PRIME(
"npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
113 static void exynos_dmabuf_release(
struct dma_buf *dmabuf)
117 DRM_DEBUG_PRIME(
"%s\n", __FILE__);
125 if (exynos_gem_obj->
base.export_dma_buf == dmabuf) {
126 exynos_gem_obj->
base.export_dma_buf =
NULL;
132 drm_gem_object_unreference_unlocked(&exynos_gem_obj->
base);
136 static void *exynos_gem_dmabuf_kmap_atomic(
struct dma_buf *
dma_buf,
137 unsigned long page_num)
144 static void exynos_gem_dmabuf_kunmap_atomic(
struct dma_buf *
dma_buf,
145 unsigned long page_num,
152 unsigned long page_num)
160 unsigned long page_num,
void *
addr)
172 .map_dma_buf = exynos_gem_map_dma_buf,
173 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
174 .kmap = exynos_gem_dmabuf_kmap,
175 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
176 .kunmap = exynos_gem_dmabuf_kunmap,
177 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
178 .mmap = exynos_gem_dmabuf_mmap,
179 .release = exynos_dmabuf_release,
183 struct drm_gem_object *obj,
int flags)
188 exynos_gem_obj->
base.size, 0600);
202 DRM_DEBUG_PRIME(
"%s\n", __FILE__);
205 if (dma_buf->
ops == &exynos_dmabuf_ops) {
206 struct drm_gem_object *obj;
208 exynos_gem_obj = dma_buf->
priv;
209 obj = &exynos_gem_obj->
base;
212 if (obj->dev == drm_dev) {
213 drm_gem_object_reference(obj);
224 if (IS_ERR_OR_NULL(sgt)) {
229 buffer = kzalloc(
sizeof(*buffer),
GFP_KERNEL);
231 DRM_ERROR(
"failed to allocate exynos_drm_gem_buf.\n");
233 goto err_unmap_attach;
237 if (!buffer->
pages) {
238 DRM_ERROR(
"failed to allocate pages.\n");
240 goto err_free_buffer;
244 if (!exynos_gem_obj) {
251 if (sgt->
nents == 1) {
261 while (i < sgt->nents) {
262 buffer->
pages[
i] = sg_page(sgl);
275 DRM_DEBUG_PRIME(
"dma_addr = 0x%x, size = 0x%lx\n", buffer->
dma_addr,
278 return &exynos_gem_obj->
base;