30 bool dirty,
bool accessed);
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
40 #define OMAP_BO_DMA 0x01000000
41 #define OMAP_BO_EXT_SYNC 0x02000000
42 #define OMAP_BO_EXT_MEM 0x04000000
46 struct drm_gem_object
base;
121 static int get_pages(
struct drm_gem_object *obj,
struct page ***
pages);
122 static uint64_t mmap_offset(
struct drm_gem_object *obj);
135 #define NUM_USERGART_ENTRIES 2
139 struct drm_gem_object *
obj;
152 static void evict_entry(
struct drm_gem_object *
obj,
155 if (obj->dev->dev_mapping) {
157 int n = usergart[
fmt].height;
159 loff_t off = mmap_offset(obj) +
165 for (i = n; i > 0; i--) {
179 static void evict(
struct drm_gem_object *obj)
192 if (entry->
obj == obj)
193 evict_entry(obj, fmt, entry);
205 static inline bool is_shmem(
struct drm_gem_object *obj)
207 return obj->filp !=
NULL;
214 static inline bool is_cached_coherent(
struct drm_gem_object *obj)
217 return is_shmem(obj) &&
224 static int omap_gem_attach_pages(
struct drm_gem_object *obj)
241 dev_err(obj->dev->dev,
"could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
255 for (i = 0; i < npages; i++) {
260 addrs = kzalloc(npages *
sizeof(*addrs),
GFP_KERNEL);
267 omap_obj->
addrs = addrs;
279 static void omap_gem_detach_pages(
struct drm_gem_object *obj)
288 for (i = 0; i < npages; i++) {
308 static uint64_t mmap_offset(
struct drm_gem_object *obj)
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
314 if (!obj->map_list.map) {
320 dev_err(dev->dev,
"could not allocate mmap offset\n");
332 offset = mmap_offset(obj);
341 size_t size = obj->size;
362 *w = omap_obj->
width;
370 static int fault_1d(
struct drm_gem_object *obj,
378 pgoff = ((
unsigned long)vmf->virtual_address -
381 if (omap_obj->
pages) {
389 VERB(
"Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
396 static int fault_2d(
struct drm_gem_object *obj,
402 struct page *pages[64];
413 const int n = usergart[
fmt].height;
414 const int n_shift = usergart[
fmt].height_shift;
425 pgoff = ((
unsigned long)vmf->virtual_address -
435 slots = omap_obj->
width >> usergart[
fmt].slot_shift;
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) <<
PAGE_SHIFT);
439 entry = &usergart[
fmt].entry[usergart[
fmt].last];
443 evict_entry(entry->
obj, fmt, entry);
449 base_pgoff = (base_pgoff >> n_shift) * slots;
456 slots =
min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
469 sizeof(
struct page *) * slots);
471 sizeof(
struct page *) * (n - slots));
475 dev_err(obj->dev->dev,
"failed to pin: %d\n", ret);
481 VERB(
"Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
484 for (i = n; i > 0; i--) {
486 pfn += usergart[
fmt].stride_pfn;
523 ret = get_pages(obj, &pages);
535 ret = fault_2d(obj, vma, vmf);
537 ret = fault_1d(obj, vma, vmf);
546 return VM_FAULT_NOPAGE;
550 return VM_FAULT_SIGBUS;
561 DBG(
"mmap failed: %d", ret);
596 vma->
vm_file = get_file(obj->filp);
659 struct drm_gem_object *obj;
671 drm_gem_object_unreference_unlocked(obj);
689 dev_err(obj->dev->dev,
"invalid roll: %d\n", roll);
698 if (omap_obj->
block) {
700 ret = get_pages(obj, &pages);
705 dev_err(obj->dev->dev,
"could not repin: %d\n", ret);
722 if (is_cached_coherent(obj) && omap_obj->
addrs[pgoff]) {
725 omap_obj->
addrs[pgoff] = 0;
736 if (is_cached_coherent(obj)) {
741 for (i = 0; i < npages; i++) {
742 if (!omap_obj->
addrs[i]) {
769 if (remap && is_shmem(obj) && priv->
has_dmm) {
778 ret = get_pages(obj, &pages);
791 ret = PTR_ERR(block);
793 "could not remap: %d (%d)\n", ret, fmt);
799 omap_obj->
roll,
true);
803 "could not pin: %d\n", ret);
810 DBG(
"got paddr: %08x", omap_obj->
paddr);
815 *paddr = omap_obj->
paddr;
817 *paddr = omap_obj->
paddr;
844 "could not unpin pages: %d\n", ret);
850 "could not release unmap: %d\n", ret);
893 static int get_pages(
struct drm_gem_object *obj,
struct page ***pages)
898 if (is_shmem(obj) && !omap_obj->
pages) {
899 ret = omap_gem_attach_pages(obj);
901 dev_err(obj->dev->dev,
"could not attach pages\n");
907 *pages = omap_obj->
pages;
928 if (!omap_obj->
pages)
930 *pages = omap_obj->
pages;
934 ret = get_pages(obj, pages);
956 WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
957 if (!omap_obj->
vaddr) {
959 int ret = get_pages(obj, &pages);
965 return omap_obj->
vaddr;
968 #ifdef CONFIG_DEBUG_FS
969 void omap_gem_describe(
struct drm_gem_object *obj,
struct seq_file *m)
975 WARN_ON(! mutex_is_locked(&dev->struct_mutex));
977 if (obj->map_list.map)
978 off = (
uint64_t)obj->map_list.hash.key;
980 seq_printf(m,
"%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
981 omap_obj->
flags, obj->name, obj->refcount.refcount.counter,
987 if (omap_obj->
block) {
990 area->
p0.x, area->
p0.y,
991 area->
p1.x, area->
p1.y);
1007 struct drm_gem_object *obj = &omap_obj->
base;
1009 omap_gem_describe(obj, m);
1014 seq_printf(m,
"Total %d objects, %zu bytes\n", count, size);
1052 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1053 printk(KERN_ERR "%s:%d: "fmt"\n", \
1054 __func__, __LINE__, ##__VA_ARGS__); \
1058 static void sync_op_update(
void)
1062 if (!is_waiting(waiter)) {
1064 SYNC(
"notify: %p", waiter);
1071 static inline int sync_op(
struct drm_gem_object *obj,
1077 spin_lock(&sync_lock);
1079 if (!omap_obj->
sync) {
1081 if (!omap_obj->
sync) {
1089 omap_obj->
sync->read_pending++;
1091 omap_obj->
sync->write_pending++;
1093 if (op & OMAP_GEM_READ)
1094 omap_obj->
sync->read_complete++;
1096 omap_obj->
sync->write_complete++;
1101 spin_unlock(&sync_lock);
1115 spin_lock(&sync_lock);
1117 spin_unlock(&sync_lock);
1123 return sync_op(obj, op,
true);
1128 return sync_op(obj, op,
false);
1133 static void sync_notify(
void *
arg)
1136 *waiter_task =
NULL;
1144 if (omap_obj->
sync) {
1157 waiter->
notify = sync_notify;
1158 waiter->
arg = &waiter_task;
1160 spin_lock(&sync_lock);
1161 if (is_waiting(waiter)) {
1162 SYNC(
"waited: %p", waiter);
1164 spin_unlock(&sync_lock);
1166 (waiter_task ==
NULL));
1167 spin_lock(&sync_lock);
1169 SYNC(
"interrupted: %p", waiter);
1178 spin_unlock(&sync_lock);
1197 void (*fxn)(
void *
arg),
void *arg)
1200 if (omap_obj->
sync) {
1215 spin_lock(&sync_lock);
1216 if (is_waiting(waiter)) {
1217 SYNC(
"waited: %p", waiter);
1219 spin_unlock(&sync_lock);
1223 spin_unlock(&sync_lock);
1242 spin_lock(&sync_lock);
1253 omap_obj->
sync = syncobj;
1256 if (omap_obj->
sync) {
1261 omap_obj->
sync = syncobj;
1265 spin_unlock(&sync_lock);
1284 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1288 if (obj->map_list.map) {
1299 if (omap_obj->
pages) {
1300 omap_gem_detach_pages(obj);
1302 if (!is_shmem(obj)) {
1305 }
else if (omap_obj->
vaddr) {
1324 struct drm_gem_object *obj;
1339 drm_gem_object_unreference_unlocked(obj);
1350 struct drm_gem_object *obj =
NULL;
1356 dev_err(dev->dev,
"Tiled buffers require DMM\n");
1382 omap_obj = kzalloc(
sizeof(*omap_obj),
GFP_KERNEL);
1384 dev_err(dev->dev,
"could not allocate GEM object\n");
1390 obj = &omap_obj->
base;
1398 if (omap_obj->
vaddr) {
1405 if (flags & OMAP_BO_TILED) {
1440 dev_warn(dev->dev,
"DMM not available, disable DMM support\n");
1444 usergart = kzalloc(3 *
sizeof(*usergart),
GFP_KERNEL);
1446 dev_warn(dev->dev,
"could not allocate usergart\n");
1458 usergart[
i].height =
h;
1459 usergart[
i].height_shift =
ilog2(h);
1467 if (IS_ERR(block)) {
1469 "reserve failed: %d, %d, %ld\n",
1470 i, j, PTR_ERR(block));
1476 DBG(
"%d:%d: %dx%d: paddr=%08x stride=%d", i, j,
w, h,