29 #include <drm/i915_drm.h>
34 #include <linux/slab.h>
36 #include <linux/pci.h>
43 bool map_and_fenceable,
48 struct drm_file *
file);
78 dev_priv->
mm.object_count++;
79 dev_priv->
mm.object_memory +=
size;
85 dev_priv->
mm.object_count--;
86 dev_priv->
mm.object_memory -=
size;
107 DRM_ERROR(
"Timed out waiting for the gpu reset to complete\n");
109 }
else if (ret < 0) {
121 spin_unlock_irqrestore(&x->
wait.
lock, flags);
130 ret = i915_gem_wait_for_error(dev);
150 struct drm_file *
file)
154 if (drm_core_check_feature(dev, DRIVER_MODESET))
175 struct drm_file *
file)
196 i915_gem_create(
struct drm_file *
file,
217 i915_gem_info_remove_obj(dev->dev_private, obj->
base.size);
223 drm_gem_object_unreference(&obj->
base);
224 trace_i915_gem_object_create(obj);
238 return i915_gem_create(file, dev,
254 struct drm_file *file)
258 return i915_gem_create(file, dev,
271 __copy_to_user_swizzled(
char __user *cpu_vaddr,
272 const char *gpu_vaddr,
int gpu_offset,
278 int cacheline_end =
ALIGN(gpu_offset + 1, 64);
279 int this_length =
min(cacheline_end - gpu_offset, length);
280 int swizzled_gpu_offset = gpu_offset ^ 64;
283 gpu_vaddr + swizzled_gpu_offset,
288 cpu_offset += this_length;
289 gpu_offset += this_length;
290 length -= this_length;
297 __copy_from_user_swizzled(
char *gpu_vaddr,
int gpu_offset,
298 const char __user *cpu_vaddr,
301 int ret, cpu_offset = 0;
304 int cacheline_end =
ALIGN(gpu_offset + 1, 64);
305 int this_length =
min(cacheline_end - gpu_offset, length);
306 int swizzled_gpu_offset = gpu_offset ^ 64;
309 cpu_vaddr + cpu_offset,
314 cpu_offset += this_length;
315 gpu_offset += this_length;
316 length -= this_length;
328 bool page_do_bit17_swizzling,
bool needs_clflush)
333 if (
unlikely(page_do_bit17_swizzling))
341 vaddr + shmem_page_offset,
349 shmem_clflush_swizzled_range(
char *
addr,
unsigned long length,
353 unsigned long start = (
unsigned long) addr;
354 unsigned long end = (
unsigned long) addr + length;
373 shmem_pread_slow(
struct page *page,
int shmem_page_offset,
int page_length,
374 char __user *user_data,
375 bool page_do_bit17_swizzling,
bool needs_clflush)
382 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
384 page_do_bit17_swizzling);
386 if (page_do_bit17_swizzling)
387 ret = __copy_to_user_swizzled(user_data,
388 vaddr, shmem_page_offset,
392 vaddr + shmem_page_offset,
396 return ret ? -
EFAULT : 0;
403 struct drm_file *file)
409 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
410 int hit_slowpath = 0;
412 int needs_clflush = 0;
419 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
439 i915_gem_object_pin_pages(obj);
458 page_length = remain;
459 if ((shmem_page_offset + page_length) >
PAGE_SIZE)
460 page_length =
PAGE_SIZE - shmem_page_offset;
463 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
466 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
467 user_data, page_do_bit17_swizzling,
476 ret = fault_in_multipages_writeable(user_data, remain);
485 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
486 user_data, page_do_bit17_swizzling,
503 i915_gem_object_unpin_pages(obj);
508 i915_gem_object_truncate(obj);
521 struct drm_file *file)
555 if (!obj->
base.filp) {
560 trace_i915_gem_object_pread(obj, args->
offset, args->
size);
562 ret = i915_gem_shmem_pread(dev, obj, args, file);
565 drm_gem_object_unreference(&obj->
base);
576 fast_user_write(
struct io_mapping *
mapping,
578 char __user *user_data,
583 unsigned long unwritten;
585 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
587 vaddr = (
void __force*)vaddr_atomic + page_offset;
588 unwritten = __copy_from_user_inatomic_nocache(vaddr,
590 io_mapping_unmap_atomic(vaddr_atomic);
599 i915_gem_gtt_pwrite_fast(
struct drm_device *dev,
602 struct drm_file *file)
636 page_length = remain;
644 if (fast_user_write(dev_priv->
mm.gtt_mapping, page_base,
645 page_offset, user_data, page_length)) {
666 shmem_pwrite_fast(
struct page *page,
int shmem_page_offset,
int page_length,
667 char __user *user_data,
668 bool page_do_bit17_swizzling,
669 bool needs_clflush_before,
670 bool needs_clflush_after)
675 if (
unlikely(page_do_bit17_swizzling))
679 if (needs_clflush_before)
682 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
685 if (needs_clflush_after)
696 shmem_pwrite_slow(
struct page *page,
int shmem_page_offset,
int page_length,
697 char __user *user_data,
698 bool page_do_bit17_swizzling,
699 bool needs_clflush_before,
700 bool needs_clflush_after)
706 if (
unlikely(needs_clflush_before || page_do_bit17_swizzling))
707 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
709 page_do_bit17_swizzling);
710 if (page_do_bit17_swizzling)
711 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
718 if (needs_clflush_after)
719 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
721 page_do_bit17_swizzling);
731 struct drm_file *file)
737 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
738 int hit_slowpath = 0;
739 int needs_clflush_after = 0;
740 int needs_clflush_before = 0;
747 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
755 needs_clflush_after = 1;
766 needs_clflush_before = 1;
772 i915_gem_object_pin_pages(obj);
779 int partial_cacheline_write;
794 page_length = remain;
795 if ((shmem_page_offset + page_length) >
PAGE_SIZE)
796 page_length =
PAGE_SIZE - shmem_page_offset;
801 partial_cacheline_write = needs_clflush_before &&
806 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
809 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
818 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
819 user_data, page_do_bit17_swizzling,
820 partial_cacheline_write,
821 needs_clflush_after);
838 i915_gem_object_unpin_pages(obj);
843 i915_gem_object_truncate(obj);
852 if (needs_clflush_after)
865 struct drm_file *file)
904 if (!obj->
base.filp) {
909 trace_i915_gem_object_pwrite(obj, args->
offset, args->
size);
919 ret = i915_gem_phys_pwrite(dev, obj, args, file);
926 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
933 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
936 drm_gem_object_unreference(&obj->
base);
948 bool recovery_complete;
953 recovery_complete = x->
done > 0;
954 spin_unlock_irqrestore(&x->
wait.
lock, flags);
962 if (recovery_complete)
980 BUG_ON(!mutex_is_locked(&ring->
dev->struct_mutex));
1000 bool interruptible,
struct timespec *timeout)
1004 unsigned long timeout_jiffies;
1006 bool wait_forever =
true;
1012 trace_i915_gem_request_wait_begin(ring, seqno);
1014 if (timeout !=
NULL) {
1015 wait_time = *timeout;
1016 wait_forever =
false;
1028 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029 atomic_read(&dev_priv->mm.wedged))
1042 }
while (end == 0 && wait_forever);
1047 trace_i915_gem_request_wait_end(ring, seqno);
1051 struct timespec sleep_time = timespec_sub(now, before);
1052 *timeout = timespec_sub(*timeout, sleep_time);
1079 bool interruptible = dev_priv->
mm.interruptible;
1082 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1089 ret = i915_gem_check_olr(ring, seqno);
1093 return __wait_seqno(ring, seqno, interruptible,
NULL);
1143 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144 BUG_ON(!dev_priv->
mm.interruptible);
1154 ret = i915_gem_check_olr(ring, seqno);
1159 ret = __wait_seqno(ring, seqno,
true,
NULL);
1182 struct drm_file *file)
1194 if (read_domains & I915_GEM_GPU_DOMAINS)
1200 if (write_domain != 0 && read_domains != write_domain)
1217 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1235 drm_gem_object_unreference(&obj->
base);
1246 struct drm_file *file)
1264 i915_gem_object_flush_cpu_write_domain(obj);
1266 drm_gem_object_unreference(&obj->
base);
1281 struct drm_file *file)
1284 struct drm_gem_object *obj;
1295 drm_gem_object_unreference_unlocked(obj);
1302 drm_gem_object_unreference_unlocked(obj);
1303 if (IS_ERR((
void *)addr))
1335 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1338 page_offset = ((
unsigned long)vmf->virtual_address - vma->
vm_start) >>
1345 trace_i915_gem_object_fault(obj, page_offset,
true, write);
1354 ret = i915_gem_object_bind_to_gtt(obj, 0,
true,
false);
1370 if (i915_gem_object_is_inactive(obj))
1371 list_move_tail(&obj->
mm_list, &dev_priv->
mm.inactive_list);
1379 ret =
vm_insert_pfn(vma, (
unsigned long)vmf->virtual_address, pfn);
1389 return VM_FAULT_SIGBUS;
1407 return VM_FAULT_NOPAGE;
1409 return VM_FAULT_OOM;
1411 return VM_FAULT_SIGBUS;
1413 WARN_ONCE(ret,
"unhandled error in i915_gem_fault: %i\n", ret);
1414 return VM_FAULT_SIGBUS;
1438 if (obj->
base.dev->dev_mapping)
1457 gtt_size = 1024*1024;
1459 gtt_size = 512*1024;
1461 while (gtt_size < size)
1475 i915_gem_get_gtt_alignment(
struct drm_device *dev,
1491 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1520 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1528 if (obj->
base.map_list.map)
1547 i915_gem_shrink_all(dev_priv);
1553 if (!obj->
base.map_list.map)
1579 if (obj->
base.size > dev_priv->
mm.gtt_mappable_end) {
1585 DRM_ERROR(
"Attempting to mmap a purgeable buffer\n");
1590 ret = i915_gem_object_create_mmap_offset(obj);
1597 drm_gem_object_unreference(&obj->
base);
1620 struct drm_file *file)
1633 i915_gem_object_free_mmap_offset(obj);
1643 inode = obj->
base.filp->f_path.dentry->d_inode;
1674 if (i915_gem_object_needs_bit17_swizzle(obj))
1681 struct page *page = sg_page(sg);
1714 if (i915_gem_object_is_purgeable(obj))
1715 i915_gem_object_truncate(obj);
1727 &dev_priv->
mm.unbound_list,
1729 if (i915_gem_object_is_purgeable(obj) &&
1730 i915_gem_object_put_pages(obj) == 0) {
1732 if (count >= target)
1738 &dev_priv->
mm.inactive_list,
1740 if (i915_gem_object_is_purgeable(obj) &&
1742 i915_gem_object_put_pages(obj) == 0) {
1744 if (count >= target)
1760 i915_gem_object_put_pages(obj);
1785 page_count = obj->base.size /
PAGE_SIZE;
1797 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1798 gfp = mapping_gfp_mask(mapping);
1804 i915_gem_purge(dev_priv, page_count);
1815 i915_gem_shrink_all(dev_priv);
1829 if (i915_gem_object_needs_bit17_swizzle(obj))
1859 BUG_ON(obj->pages_pin_count);
1882 drm_gem_object_reference(&obj->
base);
1887 list_move_tail(&obj->
mm_list, &dev_priv->
mm.active_list);
1901 &dev_priv->
mm.fence_list);
1918 list_move_tail(&obj->
mm_list, &dev_priv->
mm.inactive_list);
1925 obj->
base.write_domain = 0;
1931 drm_gem_object_unreference(&obj->
base);
1960 struct drm_file *file,
1965 u32 request_ring_position;
1982 if (request ==
NULL)
1992 request_ring_position = intel_ring_get_tail(ring);
2000 trace_i915_gem_request_add(ring, seqno);
2004 request->
tail = request_ring_position;
2013 spin_lock(&file_priv->
mm.lock);
2016 &file_priv->
mm.request_list);
2017 spin_unlock(&file_priv->
mm.lock);
2022 if (!dev_priv->
mm.suspended) {
2023 if (i915_enable_hangcheck) {
2030 &dev_priv->
mm.retire_work,
HZ);
2048 spin_lock(&file_priv->
mm.lock);
2053 spin_unlock(&file_priv->
mm.lock);
2067 i915_gem_request_remove_from_client(request);
2078 i915_gem_object_move_to_inactive(obj);
2082 static void i915_gem_reset_fences(
struct drm_device *dev)
2090 i915_gem_write_fence(dev, i,
NULL);
2093 i915_gem_object_fence_lost(reg->
obj);
2100 INIT_LIST_HEAD(&dev_priv->
mm.fence_list);
2111 i915_gem_reset_ring_lists(dev_priv, ring);
2117 &dev_priv->
mm.inactive_list,
2124 i915_gem_reset_fences(dev);
2154 if (!i915_seqno_passed(seqno, request->
seqno))
2157 trace_i915_gem_request_retire(ring, request->
seqno);
2166 i915_gem_request_remove_from_client(request);
2183 i915_gem_object_move_to_inactive(obj);
2217 dev = dev_priv->
dev;
2238 if (!dev_priv->
mm.suspended && !idle)
2301 timeout = &timeout_stack;
2315 ret = i915_gem_object_flush_active(obj);
2335 drm_gem_object_unreference(&obj->
base);
2338 ret = __wait_seqno(ring, seqno,
true, timeout);
2340 WARN_ON(!timespec_valid(timeout));
2346 drm_gem_object_unreference(&obj->
base);
2371 if (from ==
NULL || to == from)
2375 return i915_gem_object_wait_rendering(obj,
false);
2377 idx = intel_ring_sync_index(from, to);
2383 ret = i915_gem_check_olr(obj->
ring, seqno);
2387 ret = to->
sync_to(to, from, seqno);
2396 u32 old_write_domain, old_read_domains;
2407 old_read_domains = obj->
base.read_domains;
2408 old_write_domain = obj->
base.write_domain;
2413 trace_i915_gem_object_change_domain(obj,
2443 i915_gem_object_finish_gtt(obj);
2450 trace_i915_gem_object_unbind(obj);
2461 list_move_tail(&obj->
gtt_list, &dev_priv->
mm.unbound_list);
2492 ret = i915_ring_idle(ring);
2500 static void sandybridge_write_fence_reg(
struct drm_device *dev,
int reg,
2525 static void i965_write_fence_reg(
struct drm_device *dev,
int reg,
2548 static void i915_write_fence_reg(
struct drm_device *dev,
int reg,
2560 (size & -size) != size ||
2562 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2571 pitch_val = obj->
stride / tile_width;
2572 pitch_val =
ffs(pitch_val) - 1;
2592 static void i830_write_fence_reg(
struct drm_device *dev,
int reg,
2603 (size & -size) != size ||
2605 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2608 pitch_val = obj->
stride / 128;
2609 pitch_val =
ffs(pitch_val) - 1;
2624 static void i915_gem_write_fence(
struct drm_device *dev,
int reg,
2629 case 6: sandybridge_write_fence_reg(dev, reg, obj);
break;
2631 case 4: i965_write_fence_reg(dev, reg, obj);
break;
2632 case 3: i915_write_fence_reg(dev, reg, obj);
break;
2633 case 2: i830_write_fence_reg(dev, reg, obj);
break;
2649 int reg = fence_number(dev_priv, fence);
2651 i915_gem_write_fence(obj->
base.dev, reg, enable ? obj :
NULL);
2656 list_move_tail(&fence->
lru_list, &dev_priv->
mm.fence_list);
2691 ret = i915_gem_object_flush_fence(obj);
2698 i915_gem_object_update_fence(obj,
2701 i915_gem_object_fence_lost(obj);
2715 for (i = dev_priv->
fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2765 ret = i915_gem_object_flush_fence(obj);
2775 &dev_priv->
mm.fence_list);
2778 }
else if (enable) {
2779 reg = i915_find_fence_reg(dev);
2786 ret = i915_gem_object_flush_fence(old);
2790 i915_gem_object_fence_lost(old);
2795 i915_gem_object_update_fence(obj, reg, enable);
2801 static bool i915_gem_valid_gtt_space(
struct drm_device *dev,
2814 if (gtt_space ==
NULL)
2831 static void i915_gem_verify_gtt(
struct drm_device *dev)
2840 printk(
KERN_ERR "object found on GTT list with no space reserved\n");
2846 printk(
KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2855 if (!i915_gem_valid_gtt_space(dev,
2858 printk(
KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2883 u32 size, fence_size, fence_alignment, unfenced_alignment;
2884 bool mappable, fenceable;
2888 DRM_ERROR(
"Attempting to bind a purgeable object\n");
2892 fence_size = i915_gem_get_gtt_size(dev,
2895 fence_alignment = i915_gem_get_gtt_alignment(dev,
2898 unfenced_alignment =
2904 alignment = map_and_fenceable ? fence_alignment :
2906 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2907 DRM_ERROR(
"Invalid object alignment requested %u\n", alignment);
2911 size = map_and_fenceable ? fence_size : obj->
base.size;
2916 if (obj->
base.size >
2917 (map_and_fenceable ? dev_priv->
mm.gtt_mappable_end : dev_priv->
mm.gtt_total)) {
2918 DRM_ERROR(
"Attempting to bind an object larger than the aperture\n");
2927 if (map_and_fenceable)
2929 drm_mm_search_free_in_range_color(&dev_priv->
mm.gtt_space,
2931 0, dev_priv->
mm.gtt_mappable_end,
2934 free_space = drm_mm_search_free_color(&dev_priv->
mm.gtt_space,
2938 if (free_space !=
NULL) {
2939 if (map_and_fenceable)
2943 0, dev_priv->
mm.gtt_mappable_end,
2961 if (
WARN_ON(!i915_gem_valid_gtt_space(dev,
2977 if (!dev_priv->
mm.aliasing_ppgtt)
2980 list_move_tail(&obj->
gtt_list, &dev_priv->
mm.bound_list);
2987 (obj->
gtt_space->start & (fence_alignment - 1)) == 0;
2994 trace_i915_gem_object_bind(obj, map_and_fenceable);
2995 i915_gem_verify_gtt(dev);
3020 trace_i915_gem_object_clflush(obj);
3044 old_write_domain = obj->
base.write_domain;
3045 obj->
base.write_domain = 0;
3047 trace_i915_gem_object_change_domain(obj,
3048 obj->
base.read_domains,
3063 old_write_domain = obj->
base.write_domain;
3064 obj->
base.write_domain = 0;
3066 trace_i915_gem_object_change_domain(obj,
3067 obj->
base.read_domains,
3081 uint32_t old_write_domain, old_read_domains;
3091 ret = i915_gem_object_wait_rendering(obj, !write);
3095 i915_gem_object_flush_cpu_write_domain(obj);
3097 old_write_domain = obj->
base.write_domain;
3098 old_read_domains = obj->
base.read_domains;
3111 trace_i915_gem_object_change_domain(obj,
3116 if (i915_gem_object_is_inactive(obj))
3117 list_move_tail(&obj->
mm_list, &dev_priv->
mm.inactive_list);
3133 DRM_DEBUG(
"can not change the cache level of pinned objects\n");
3137 if (!i915_gem_valid_gtt_space(dev, obj->
gtt_space, cache_level)) {
3148 i915_gem_object_finish_gtt(obj);
3170 u32 old_read_domains, old_write_domain;
3181 old_read_domains = obj->
base.read_domains;
3182 old_write_domain = obj->
base.write_domain;
3187 trace_i915_gem_object_change_domain(obj,
3193 i915_gem_verify_gtt(dev);
3198 struct drm_file *file)
3216 drm_gem_object_unreference(&obj->
base);
3223 struct drm_file *file)
3253 drm_gem_object_unreference(&obj->
base);
3269 u32 old_read_domains, old_write_domain;
3272 if (pipelined != obj->
ring) {
3299 i915_gem_object_flush_cpu_write_domain(obj);
3301 old_write_domain = obj->
base.write_domain;
3302 old_read_domains = obj->
base.read_domains;
3307 obj->
base.write_domain = 0;
3310 trace_i915_gem_object_change_domain(obj,
3325 ret = i915_gem_object_wait_rendering(obj,
false);
3343 uint32_t old_write_domain, old_read_domains;
3349 ret = i915_gem_object_wait_rendering(obj, !write);
3353 i915_gem_object_flush_gtt_write_domain(obj);
3355 old_write_domain = obj->
base.write_domain;
3356 old_read_domains = obj->
base.read_domains;
3378 trace_i915_gem_object_change_domain(obj,
3396 i915_gem_ring_throttle(
struct drm_device *dev,
struct drm_file *file)
3409 spin_lock(&file_priv->
mm.lock);
3414 ring = request->
ring;
3415 seqno = request->
seqno;
3417 spin_unlock(&file_priv->
mm.lock);
3422 ret = __wait_seqno(ring, seqno,
true,
NULL);
3432 bool map_and_fenceable,
3441 if ((alignment && obj->
gtt_offset & (alignment - 1)) ||
3444 "bo is already pinned with incorrect alignment:"
3445 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3446 " obj->map_and_fenceable=%d\n",
3457 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3485 struct drm_file *file)
3502 DRM_ERROR(
"Attempting to pin a purgeable buffer\n");
3508 DRM_ERROR(
"Already pinned in i915_gem_pin_ioctl(): %d\n",
3525 i915_gem_object_flush_cpu_write_domain(obj);
3528 drm_gem_object_unreference(&obj->
base);
3536 struct drm_file *file)
3553 DRM_ERROR(
"Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3565 drm_gem_object_unreference(&obj->
base);
3573 struct drm_file *file)
3594 ret = i915_gem_object_flush_active(obj);
3599 args->
busy |= intel_ring_flag(obj->
ring) << 16;
3602 drm_gem_object_unreference(&obj->
base);
3610 struct drm_file *file_priv)
3612 return i915_gem_ring_throttle(dev, file_priv);
3617 struct drm_file *file_priv)
3623 switch (args->
madv) {
3650 if (i915_gem_object_is_purgeable(obj) && obj->
pages ==
NULL)
3651 i915_gem_object_truncate(obj);
3656 drm_gem_object_unreference(&obj->
base);
3665 INIT_LIST_HEAD(&obj->
mm_list);
3677 i915_gem_info_add_obj(obj->
base.dev->dev_private, obj->
base.size);
3681 .get_pages = i915_gem_object_get_pages_gtt,
3682 .put_pages = i915_gem_object_put_pages_gtt,
3708 mapping = obj->
base.filp->f_path.dentry->d_inode->i_mapping;
3709 mapping_set_gfp_mask(mapping, mask);
3749 trace_i915_gem_object_destroy(obj);
3756 bool was_interruptible;
3758 was_interruptible = dev_priv->
mm.interruptible;
3759 dev_priv->
mm.interruptible =
false;
3763 dev_priv->
mm.interruptible = was_interruptible;
3767 i915_gem_object_put_pages(obj);
3768 i915_gem_object_free_mmap_offset(obj);
3772 if (obj->
base.import_attach)
3776 i915_gem_info_remove_obj(dev_priv, obj->
base.size);
3790 if (dev_priv->
mm.suspended) {
3803 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3806 i915_gem_reset_fences(dev);
3812 dev_priv->
mm.suspended = 1;
3835 if (!dev_priv->
mm.l3_remap_info)
3844 if (remap && remap != dev_priv->
mm.l3_remap_info[i/4])
3845 DRM_DEBUG(
"0x%x was already programmed to %x\n",
3847 if (remap && !dev_priv->
mm.l3_remap_info[i/4])
3848 DRM_DEBUG_DRIVER(
"Clearing remapped register\n");
3889 if (!dev_priv->
mm.aliasing_ppgtt)
3897 if (dev_priv->
mm.gtt->needs_dmar)
3905 writel(pd_entry, pd_addr + i);
3948 if (
IS_GEN6(dev) && dev->pdev->revision < 8) {
3949 DRM_INFO(
"BLT not supported on this pre-production hardware;"
3950 " graphics performance will be degraded.\n");
3980 goto cleanup_render_ring;
3983 if (intel_enable_blt(dev)) {
3986 goto cleanup_bsd_ring;
4002 cleanup_render_ring:
4010 if (i915_enable_ppgtt >= 0)
4011 return i915_enable_ppgtt;
4013 #ifdef CONFIG_INTEL_IOMMU
4025 unsigned long gtt_size, mappable_size;
4028 gtt_size = dev_priv->
mm.gtt->gtt_total_entries <<
PAGE_SHIFT;
4029 mappable_size = dev_priv->
mm.gtt->gtt_mappable_entries <<
PAGE_SHIFT;
4067 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4068 dev_priv->
dri1.allow_batchbuffer = 1;
4085 struct drm_file *file_priv)
4090 if (drm_core_check_feature(dev, DRIVER_MODESET))
4094 DRM_ERROR(
"Reenabling wedged hardware, good luck\n");
4099 dev_priv->
mm.suspended = 0;
4107 BUG_ON(!list_empty(&dev_priv->
mm.active_list));
4112 goto cleanup_ringbuffer;
4119 dev_priv->
mm.suspended = 1;
4127 struct drm_file *file_priv)
4129 if (drm_core_check_feature(dev, DRIVER_MODESET))
4141 if (drm_core_check_feature(dev, DRIVER_MODESET))
4146 DRM_ERROR(
"failed to idle hardware: %d\n", ret);
4162 INIT_LIST_HEAD(&dev_priv->
mm.active_list);
4163 INIT_LIST_HEAD(&dev_priv->
mm.inactive_list);
4164 INIT_LIST_HEAD(&dev_priv->
mm.unbound_list);
4165 INIT_LIST_HEAD(&dev_priv->
mm.bound_list);
4166 INIT_LIST_HEAD(&dev_priv->
mm.fence_list);
4168 init_ring_lists(&dev_priv->
ring[i]);
4170 INIT_LIST_HEAD(&dev_priv->
fence_regs[i].lru_list);
4172 i915_gem_retire_work_handler);
4184 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4193 i915_gem_reset_fences(dev);
4198 dev_priv->
mm.interruptible =
true;
4200 dev_priv->
mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4209 static int i915_gem_init_phys_object(
struct drm_device *dev,
4210 int id,
int size,
int align)
4216 if (dev_priv->
mm.phys_objs[
id - 1] || !size)
4234 dev_priv->
mm.phys_objs[
id - 1] = phys_obj;
4242 static void i915_gem_free_phys_object(
struct drm_device *dev,
int id)
4247 if (!dev_priv->
mm.phys_objs[
id - 1])
4250 phys_obj = dev_priv->
mm.phys_objs[
id - 1];
4260 dev_priv->
mm.phys_objs[
id - 1] =
NULL;
4268 i915_gem_free_phys_object(dev, i);
4274 struct address_space *mapping = obj->
base.filp->f_path.dentry->d_inode->i_mapping;
4281 vaddr = obj->
phys_obj->handle->vaddr;
4284 for (i = 0; i < page_count; i++) {
4285 struct page *page = shmem_read_mapping_page(mapping, i);
4286 if (!IS_ERR(page)) {
4310 struct address_space *mapping = obj->
base.filp->f_path.dentry->d_inode->i_mapping;
4326 if (!dev_priv->
mm.phys_objs[
id - 1]) {
4327 ret = i915_gem_init_phys_object(dev,
id,
4328 obj->
base.size, align);
4330 DRM_ERROR(
"failed to init phys object %d size: %zu\n",
4331 id, obj->
base.size);
4337 obj->
phys_obj = dev_priv->
mm.phys_objs[
id - 1];
4342 for (i = 0; i < page_count; i++) {
4346 page = shmem_read_mapping_page(mapping, i);
4348 return PTR_ERR(page);
4366 struct drm_file *file_priv)
4371 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->
size)) {
4372 unsigned long unwritten;
4397 spin_lock(&file_priv->
mm.lock);
4398 while (!list_empty(&file_priv->
mm.request_list)) {
4407 spin_unlock(&file_priv->
mm.lock);
4416 mm.inactive_shrinker);
4426 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4428 i915_gem_shrink_all(dev_priv);
4437 cnt += obj->
base.size >> PAGE_SHIFT;